filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
auth/auth.go
|
package auth
import (
"errors"
"fmt"
"github.com/dgrijalva/jwt-go"
"golang.org/x/crypto/bcrypt"
"os"
"time"
)
//HashAndSalt take a plain password and return an hashed and salted version of it
func HashAndSalt(pwd string) (string, error) {
var (
err error
hash []byte // Hashed and salted password
)
// Return error if passed a blank password
if pwd == "" {
return "", errors.New("can't compute blank password")
}
hash, err = bcrypt.GenerateFromPassword([]byte(pwd), bcrypt.DefaultCost)
if err != nil {
return "", err
}
return string(hash), nil
}
//ComparePassword take an hashed password, a plain text password and test if they match.
func ComparePassword(hashPwd string, plainPwd string) bool {
var (
err error
)
// Return failed match if passed a blank password
if plainPwd == "" {
return false
}
// Check if passed password and stored password match
err = bcrypt.CompareHashAndPassword([]byte(hashPwd), []byte(plainPwd))
if err != nil {
return false
}
return true
}
//CreateJWT sign a new JWT token with passed claims
//
//s string: secret key to sign the token
//
//c: claims to add to the token
func CreateJWT(s string, c map[string]interface{}) (string, error) {
var (
err error
token *jwt.Token //token object
claims jwt.MapClaims //token claims
expiration time.Duration //token expiration
t string //signed JWT token
)
// Parse expiration from env variable, if err set default 24H
expiration, err = time.ParseDuration(os.Getenv("JWT_EXPIRE"))
if err != nil {
expiration, _ = time.ParseDuration("24h")
}
token = jwt.New(jwt.SigningMethodHS256)
// Create claims
claims = token.Claims.(jwt.MapClaims)
claims["iat"] = time.Now()
claims["exp"] = time.Now().Add(expiration)
for k, v := range c {
claims[k] = v
}
// Sign token
t, err = token.SignedString([]byte(s))
if err != nil {
return "", errors.New(fmt.Sprintf("Error signing token: %v\n", err))
}
return t, nil
}
|
[
"\"JWT_EXPIRE\""
] |
[] |
[
"JWT_EXPIRE"
] |
[]
|
["JWT_EXPIRE"]
|
go
| 1 | 0 | |
fitacf_fitlomb_compare.py
|
# scripts to compare fit files from fitacf and fitlomb
# generate rawacf file with known targets
# process file with fitacf and fitlomb
# compare output..
import os
import sys
import pdb
import datetime
import h5py
import pydarn.sdio as sdio
from multiprocessing import Manager
from rawacf_generator import rawacf_record
from superdarn_tools import *
sys.path.append('../SuperDARN_FitLOMB')
import lagstate
from cuda_bayes import BayesGPU
from pydarncuda_fitlomb import CULombFit
SANDBOX = '/home/radar/repos/SuperDARN_pydmap_write/sandbox'
ACF_NAME = '20150101.0000.00.tst'
RAWACF_EXT = '.rawacf'
FITACF_EXT = '.fitacf'
HDF5_EXT = '.fitlomb.hdf5'
RADAR = 'tst'
RECORDTIME = datetime.datetime(2015, 1, 1, 0, 0)
DEF_NOISE = .1
C = 3e8
MAX_TFREQ = 16e6
LOMB_PASSES = 1
NFREQS = 256
NALFS = 256
FWHM_TO_SIGMA = 2.355 # conversion of fwhm to std deviation, assuming gaussian
MAX_V = 2000 # m/s, max velocity (doppler shift) to include in lomb
MAX_W = 1200 # m/s, max spectral width to include in lomb
# synthesizes a rawacf file with velocity and spectral width at gates rgates
def generate_rawacf(rawacfname, targets = [], noise = DEF_NOISE):
rawacf = rawacf_record(filename = rawacfname)
for target in targets:
rawacf.addTarget(target)
rawacf.generateScatter()
rawacf.applyNoise(noise)
rawacf.calcPwr0()
rawacf.setTime(RECORDTIME)
rawacf.write()
rawacf.close()
# generates a fitacf file fitacfname from rawacf file rawacfname
def generate_fitacf(rawacfname, fitacfname):
cmd = 'make_fit -new {} > {}'.format(rawacfname, fitacfname)
os.system(cmd)
# generates a fitlomb file fitacfname from rawacf file rawacfname
def generate_fitlomb(rawacfname, fitlombname):
os.environ['DAVIT_LOCALDIR'] = SANDBOX
os.environ['DAVIT_DIRFORMAT'] = '%(dirtree)s/'
radar = RADAR
manager = Manager()
lock = manager.Lock()
stime = RECORDTIME
etime = None
hdf5file = h5py.File(fitlombname, 'w')
myPtr = sdio.radDataOpen(stime,radar,eTime=etime,channel=None,bmnum=None,cp=None,fileType='rawacf',filtered=False, src='local', noCache = True)
drec = sdio.radDataReadRec(myPtr)
amax = np.ceil((np.pi * 2 * MAX_TFREQ * MAX_W) / C)
fmax = np.ceil(MAX_V * 2 * MAX_TFREQ / C)
freqs = np.linspace(-fmax,fmax, NFREQS)
alfs = np.linspace(0, amax, NALFS)
fit = CULombFit(drec)
gpu_lambda = BayesGPU(fit.lags, freqs, alfs, fit.nrang, LAMBDA_FIT)
txlag_cache = lagstate.good_lags_txsamples(fit)
fit.SetBadlags(txlag_cache = txlag_cache)
fit.CudaProcessPulse(gpu_lambda)
fit.CudaCopyPeaks(gpu_lambda)
fit.WriteLSSFit(hdf5file)
hdf5file.close()
# parse fitacf, returns a list of targets at ranges rgates
def parse_fitacf(fitacfname, rgates):
dumpstr = get_dmapdumpstring()
scandata = parse_dmapdumpstring(dumpstr)
targets = []
for rgate in rgates:
if rgate in scandata['slist']:
sidx = np.nonzero(scandata['slist'] == rgate)[0][0]
v = scandata['v'][sidx]
w_l = scandata['w_l'][sidx]
p_l = scandata['p_l'][sidx]
v_e = scandata['v_e'][sidx]
w_l_e = scandata['w_l_e'][sidx]
nlag = scandata['nlag'][sidx]
targets.append(target(rangegate = rgate, velocity = v, width = w_l, power = p_l, v_e = v_e, w_e = w_l_e, nlag = nlag))
else:
print('scatter not found on range gate {}'.format(rgate))
targets.append(target(rangegate = rgate, nlag = 0))
return targets
# parse fitlomb, return a list of targets at ranges rgates
def parse_fitlomb(fitlombname, rgates):
h5f = h5py.File(fitlombname, 'r')
beamgrp = h5f[h5f.keys()[0]]
targets = []
v = beamgrp['v'][...]
v_e = beamgrp['v_e'][...]
p_l = beamgrp['p_l'][...]
w_l = beamgrp['w_l'][...]
w_l_e = beamgrp['w_l_e'][...]
nlag = beamgrp['nlag'][...]
for rgate in rgates:
targets.append(target(rangegate = rgate, velocity = v[rgate], width = w_l[rgate], power = p_l[rgate], v_e = v_e[rgate], w_e = w_l_e[rgate], nlag = nlag[rgate]))
return targets
def test_fitacf():
rawacf_name = SANDBOX + '/' + ACF_NAME + RAWACF_EXT
fitacf_name = SANDBOX + '/' + ACF_NAME + FITACF_EXT
synthetic_targets = []
t = target(rangegate = 5, velocity = 500, width = 200, power = 1)
synthetic_targets.append(t)
gates = [t.rangegate for t in synthetic_targets]
generate_rawacf(rawacf_name, targets = synthetic_targets, noise = .1)
generate_fitacf(rawacf_name, fitacf_name)
fitacf_targets = parse_fitacf(fitacf_name, gates)
print 'synthetic targets:'
for t in synthetic_targets:
print t
print 'fitacf fitted targets:'
for t in fitacf_targets:
print t
def main():
print 'finished importing...'
rawacf_name = SANDBOX + '/' + ACF_NAME + RAWACF_EXT
fitacf_name = SANDBOX + '/' + ACF_NAME + FITACF_EXT
fitlomb_name = SANDBOX + '/' + ACF_NAME + HDF5_EXT
print 'creating targets...'
synthetic_targets = []
synthetic_targets.append(target(rangegate = 5, velocity = 500, width = 200, power = 1))
synthetic_targets.append(target(rangegate = 6, velocity = 300, width = 100, power = 1))
synthetic_targets.append(target(rangegate = 7, velocity = 000, width = 000, power = 1))
gates = [t.rangegate for t in synthetic_targets]
print 'creating rawacf...'
generate_rawacf(rawacf_name, targets = synthetic_targets, noise = .1)
print 'creating fitacf...'
generate_fitacf(rawacf_name, fitacf_name)
print 'creating fitlomb...'
generate_fitlomb(rawacf_name, fitlomb_name)
fitacf_targets = parse_fitacf(fitacf_name, gates)
fitlomb_targets = parse_fitlomb(fitlomb_name, gates)
print 'synthetic targets:'
for t in synthetic_targets:
print t
print 'fitacf fitted targets:'
for t in fitacf_targets:
print t
print 'fitlomb fitted targets:'
for t in fitlomb_targets:
print t
if __name__ == '__main__':
main()
|
[] |
[] |
[
"DAVIT_DIRFORMAT",
"DAVIT_LOCALDIR"
] |
[]
|
["DAVIT_DIRFORMAT", "DAVIT_LOCALDIR"]
|
python
| 2 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["digitalio", "busio"]
autodoc_mock_imports = ["displayio"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit ST7735R Library"
copyright = "2019 Scott Shawcroft"
author = "Scott Shawcroft"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitSt7735RLibrarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitST7735RLibrary.tex",
"AdafruitST7735R Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitST7735Rlibrary",
"Adafruit ST7735R Library Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitST7735RLibrary",
"Adafruit ST7735R Library Documentation",
author,
"AdafruitST7735RLibrary",
"One line description of project.",
"Miscellaneous",
),
]
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
model_zoo/official/cv/vgg16/src/vgg.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Image classifiation.
"""
import math
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.common import initializer as init
from mindspore.common.initializer import initializer
from .utils.var_init import default_recurisive_init, KaimingNormal
def _make_layer(base, args, batch_norm):
"""Make stage network of VGG."""
layers = []
in_channels = 3
for v in base:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
weight = 'ones'
if args.initialize_mode == "XavierUniform":
weight_shape = (v, in_channels, 3, 3)
weight = initializer('XavierUniform', shape=weight_shape, dtype=mstype.float32)
conv2d = nn.Conv2d(in_channels=in_channels,
out_channels=v,
kernel_size=3,
padding=args.padding,
pad_mode=args.pad_mode,
has_bias=args.has_bias,
weight_init=weight)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = v
return nn.SequentialCell(layers)
class Vgg(nn.Cell):
"""
VGG network definition.
Args:
base (list): Configuration for different layers, mainly the channel number of Conv layer.
num_classes (int): Class numbers. Default: 1000.
batch_norm (bool): Whether to do the batchnorm. Default: False.
batch_size (int): Batch size. Default: 1.
include_top(bool): Whether to include the 3 fully-connected layers at the top of the network. Default: True.
Returns:
Tensor, infer output tensor.
Examples:
>>> Vgg([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
>>> num_classes=1000, batch_norm=False, batch_size=1)
"""
def __init__(self, base, num_classes=1000, batch_norm=False, batch_size=1, args=None, phase="train",
include_top=True):
super(Vgg, self).__init__()
_ = batch_size
self.layers = _make_layer(base, args, batch_norm=batch_norm)
self.include_top = include_top
self.flatten = nn.Flatten()
dropout_ratio = 0.5
if not args.has_dropout or phase == "test":
dropout_ratio = 1.0
self.classifier = nn.SequentialCell([
nn.Dense(512 * 7 * 7, 4096),
nn.ReLU(),
nn.Dropout(dropout_ratio),
nn.Dense(4096, 4096),
nn.ReLU(),
nn.Dropout(dropout_ratio),
nn.Dense(4096, num_classes)])
if args.initialize_mode == "KaimingNormal":
default_recurisive_init(self)
self.custom_init_weight()
def construct(self, x):
x = self.layers(x)
if self.include_top:
x = self.flatten(x)
x = self.classifier(x)
return x
def custom_init_weight(self):
"""
Init the weight of Conv2d and Dense in the net.
"""
for _, cell in self.cells_and_names():
if isinstance(cell, nn.Conv2d):
cell.weight.set_data(init.initializer(
KaimingNormal(a=math.sqrt(5), mode='fan_out', nonlinearity='relu'),
cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(
'zeros', cell.bias.shape, cell.bias.dtype))
elif isinstance(cell, nn.Dense):
cell.weight.set_data(init.initializer(
init.Normal(0.01), cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(
'zeros', cell.bias.shape, cell.bias.dtype))
cfg = {
'11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg16(num_classes=1000, args=None, phase="train", **kwargs):
"""
Get Vgg16 neural network with Batch Normalization.
Args:
num_classes (int): Class numbers. Default: 1000.
args(namespace): param for net init.
phase(str): train or test mode.
Returns:
Cell, cell instance of Vgg16 neural network with Batch Normalization.
Examples:
>>> vgg16(num_classes=1000, args=args, **kwargs)
"""
if args is None:
from .config import cifar_cfg
args = cifar_cfg
net = Vgg(cfg['16'], num_classes=num_classes, args=args, batch_norm=args.batch_norm, phase=phase, **kwargs)
return net
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
al_figure_1.py
|
""" Figure 1
1. Load data and compute performance
2. Compute average learning rates
3. Run statistical tests
4. Prepare figure
5. Plot task trial schematic
6. Plot block example and model computations
7. Plot performance and average learning rates
8. Add subplot labels and save figure
"""
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
import statsmodels.api as sm
import os
from al_simulation import simulation
from al_utilities import get_mean_voi, get_stats
from al_plot_utils import latex_plt, plot_image, cm2inch, label_subplots, swarm_boxplot
# Update matplotlib to use Latex and to change some defaults
os.environ["PATH"] += os.pathsep + '/usr/local/texlive/2016/bin/x86_64-darwin'
matplotlib = latex_plt(matplotlib)
# Get home directory
paths = os.getcwd()
path = paths.split(os.path.sep)
home_dir = path[1]
# ------------------------------------
# 1. Load data and compute performance
# ------------------------------------
# Load data
df_exp1 = pd.read_pickle('al_data/data_prepr_1.pkl')
n_subj = len(np.unique(df_exp1['subj_num']))
# Compute estimation errors
voi = 1
e_t = get_mean_voi(df_exp1, voi)
# ---------------------------------
# 2. Compute average learning rates
# ---------------------------------
# Initialize learning rate and age_group variables
alpha = np.full(n_subj, np.nan)
age_group = np.full(n_subj, np.nan)
# Cycle over participants
for i in range(0, n_subj):
# Extract data of current participant
df_subj = df_exp1[(df_exp1['subj_num'] == i + 1)].copy()
x = np.linspace(0, len(df_subj) - 1, len(df_subj))
df_subj.loc[:, 'trial'] = x.tolist()
df_subj = df_subj.set_index('trial')
# Extract prediction error and prediction update and add intercept to data frame
X = df_subj['delta_t']
Y = df_subj['a_t']
X = X.dropna()
Y = Y.dropna()
X = sm.add_constant(X) # adding a constant as intercept
# Estimate model and extract learning rate parameter alpha (i.e., influence of delta_t on a_t)
model = sm.OLS(Y, X).fit()
alpha[i] = model.params['delta_t']
age_group[i] = np.unique(df_subj['age_group'])
# Uncomment for single-trial figure
# plt.figure()
# plt.plot(X, Y, '.')
# Add learning rate results to data frame
df_alpha = pd.DataFrame()
df_alpha['alpha'] = alpha
df_alpha['age_group'] = age_group
# ------------------------
# 3. Run statistical tests
# ------------------------
# Estimation errors
# -----------------
# Print out estimation error statistics for paper
print('\n\nEstimation error Experiment 1\n')
median_est_err, q1_est_err, q3_est_err, p_est_err, stat_est_err = get_stats(e_t, 1, 'e_t')
# Create data frames to save statistics for Latex manuscript
fig_1_c_desc = pd.DataFrame()
fig_1_c_stat = pd.DataFrame()
# Median estimation error
fig_1_c_desc['median'] = round(median_est_err, 3)
# First quartile
fig_1_c_desc['q1'] = round(q1_est_err, 3)
# Third quartile
fig_1_c_desc['q3'] = round(q3_est_err, 3)
# Make sure to have correct index and labels
fig_1_c_desc.index.name = 'age_group'
fig_1_c_desc = fig_1_c_desc.rename({1: 'ch', 2: 'ad', 3: 'ya', 4: 'oa'}, axis='index')
# P-values and test statistics
fig_1_c_stat['p'] = p_est_err
fig_1_c_stat['stat'] = stat_est_err
fig_1_c_stat.index.name = 'test'
fig_1_c_stat = fig_1_c_stat.rename({0: 'kw', 1: 'ch_ad', 2: 'ch_ya', 3: 'ch_oa', 4: 'ad_ya', 5: 'ad_oa', 6: 'ya_oa'},
axis='index')
# Save estimation-error statistics for Latex manuscript
fig_1_c_desc.to_csv('~/Dropbox/Apps/Overleaf/al_manuscript/al_dataframes/fig_1_c_desc.csv')
fig_1_c_stat.to_csv('~/Dropbox/Apps/Overleaf/al_manuscript/al_dataframes/fig_1_c_stat.csv')
# Average learning rates
# ----------------------
# Print out average learning rate statistics for paper
print('\n\nAlpha Experiment 1\n')
median_alpha, q1_alpha, q3_alpha, p_alpha, stat_alpha = get_stats(df_alpha, 1, 'alpha')
# Create data frames to save statistics for Latex manuscript
fig_1_d_desc = pd.DataFrame()
fig_1_d_stat = pd.DataFrame()
# Median alpha
fig_1_d_desc['median'] = round(median_alpha, 3)
# First quartile
fig_1_d_desc['q1'] = round(q1_alpha, 3)
# Third quartile
fig_1_d_desc['q3'] = round(q3_alpha, 3)
# Make sure to have correct index and labels
fig_1_d_desc.index.name = 'age_group'
fig_1_d_desc = fig_1_d_desc.rename({1: 'ch', 2: 'ad', 3: 'ya', 4: 'oa'}, axis='index')
# P-values and test statistics
fig_1_d_stat['p'] = p_alpha
fig_1_d_stat['stat'] = stat_alpha
fig_1_d_stat.index.name = 'test'
fig_1_d_stat = fig_1_d_stat.rename({0: 'kw', 1: 'ch_ad', 2: 'ch_ya', 3: 'ch_oa', 4: 'ad_ya', 5: 'ad_oa', 6: 'ya_oa'},
axis='index')
# Save learning-rate statistics for Latex manuscript
fig_1_d_desc.to_csv('~/Dropbox/Apps/Overleaf/al_manuscript/al_dataframes/fig_1_d_desc.csv')
fig_1_d_stat.to_csv('~/Dropbox/Apps/Overleaf/al_manuscript/al_dataframes/fig_1_d_stat.csv')
# -----------------
# 4. Prepare figure
# -----------------
# Size of figure
fig_height = 13.125
fig_width = 8
# Create figure
f = plt.figure(figsize=cm2inch(fig_width, fig_height))
f.canvas.draw()
# f.canvas.tostring_argb()
# Create plot grid
gs_0 = gridspec.GridSpec(4, 1, wspace=0.5, hspace=0.7, top=0.95, bottom=0.07, left=0.18, right=0.95)
# Plot colors
colors = ["#92e0a9", "#69b0c1", "#6d6192", "#352d4d"]
sns.set_palette(sns.color_palette(colors))
# ----------------------------
# 5. Plot task trial schematic
# ----------------------------
# Create subplot grid and axis
gs_00 = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_0[0])
ax_0 = plt.Subplot(f, gs_00[0, 0])
f.add_subplot(ax_0)
# Picture paths
path = ['al_figures/prediction.png', 'al_figures/outcome.png',
'al_figures/prediction_error.png', 'al_figures/update.png']
# Figure text and font size
text = ['Prediction', 'Outcome\n(1.4s)', 'Prediction\nerror', 'Update (max. 6s)']
fontsize = 6
# Initialize image coordinates
cell_x0 = 0.0
cell_x1 = 0.2
image_y = 0.8
# Initialize text coordinates
text_y_dist = [0.1, 0.22, 0.22, 0.1]
text_pos = 'left_below'
# Cycle over images
for i in range(0, 4):
# Plot images and text
plot_image(f, path[i], cell_x0, cell_x1, image_y, ax_0, text_y_dist[i], text[i], text_pos, fontsize, zoom=0.05)
# Update coordinates
cell_x0 += 0.25
cell_x1 += 0.25
image_y += -0.2
# Delete unnecessary axes
ax_0.axis('off')
# --------------------------------------------
# 6. Plot block example and model computations
# --------------------------------------------
# Create subplot grid
gs_01 = gridspec.GridSpecFromSubplotSpec(4, 1, subplot_spec=gs_0[1:3], hspace=0.5)
# Simulation parameters
n_sim = 1
model_params = pd.DataFrame(columns=['omikron_0', 'omikron_1', 'b_0', 'b_1', 'h', 's', 'u', 'q', 'sigma_H', 'd',
'subj_num', 'age_group'])
model_params.loc[0, 'omikron_0'] = 0.01
model_params.loc[0, 'omikron_1'] = 0
model_params.loc[0, 'b_0'] = -30
model_params.loc[0, 'b_1'] = -1.5
model_params.loc[0, 'h'] = 0.1
model_params.loc[0, 's'] = 1
model_params.loc[0, 'u'] = 0
model_params.loc[0, 'q'] = 0
model_params.loc[0, 'sigma_H'] = 0
model_params.loc[0, 'd'] = 0.0
model_params.loc[0, 'subj_num'] = 1.0
model_params.loc[0, 'age_group'] = 0
# Normative model simulation
sim_pers = False # no perseveration simulation
_, _, df_data, _, = simulation(df_exp1, model_params, n_sim, sim_pers)
# Indicate plot range and x-axis
plot_range = (200, 225)
x = np.linspace(0, plot_range[1]-plot_range[0]-1, plot_range[1]-plot_range[0])
# Mean, outcomes and predictions
ax_10 = plt.Subplot(f, gs_01[0:2, 0])
f.add_subplot(ax_10)
ax_10.plot(x, df_exp1['mu_t'][plot_range[0]:plot_range[1]], '--',
x, df_exp1['x_t'][plot_range[0]:plot_range[1]], '.', color="#090030")
ax_10.plot(x, df_data['sim_b_t'][plot_range[0]:plot_range[1]], linewidth=2, color="#f30a49", alpha=0.8)
ax_10.set_ylabel('Screen unit')
ax_10.legend(["Helicopter", "Outcome", "Model"], loc=1, framealpha=0.8)
ax_10.set_ylim(0, 309)
ax_10.set_xticklabels([''])
# Prediction errors
ax_11 = plt.Subplot(f, gs_01[2, 0])
f.add_subplot(ax_11)
ax_11.plot(x, df_data['delta_t'][plot_range[0]:plot_range[1]], linewidth=2, color="#090030", alpha=1)
ax_11.set_xticklabels([''])
ax_11.set_ylabel('Pred. error')
# Relative uncertainty, changepoint probability and learning rate
ax_12 = plt.Subplot(f, gs_01[3, 0])
f.add_subplot(ax_12)
ax_12.plot(x, df_data['tau_t'][plot_range[0]:plot_range[1]], linewidth=2, color="#04879c", alpha=1)
ax_12.plot(x, df_data['omega_t'][plot_range[0]:plot_range[1]], linewidth=2, color="#0c3c78", alpha=1)
ax_12.plot(x, df_data['alpha_t'][plot_range[0]:plot_range[1]], linewidth=2, color="#f30a49", alpha=0.8)
ax_12.legend(['RU', 'CPP', 'Learning\nrate'], loc=1)
ax_12.set_xlabel('Trial')
ax_12.set_ylabel('Variable')
# ----------------------------------------------
# 7. Plot performance and average learning rates
# ----------------------------------------------
# Create subplot grid
gs_02 = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_0[3], hspace=0.1, wspace=0.5)
# Plot estimation-error swarm-boxplot
exp = 1
ax_2 = plt.Subplot(f, gs_02[0, 0])
f.add_subplot(ax_2)
ax_2 = swarm_boxplot(ax_2, e_t, 'e_t', 'Estimation error', exp)
# Plot learning-rate swarm-boxplot
exp = 1
ax_3 = plt.Subplot(f, gs_02[0, 1])
f.add_subplot(ax_3)
ax_3 = swarm_boxplot(ax_3, df_alpha, 'alpha', 'Learning rate', exp)
# Delete unnecessary axes
sns.despine()
# -------------------------------------
# 8. Add subplot labels and save figure
# -------------------------------------
# Label letters
texts = ['a', 'b', ' ', ' ', 'c', 'd']
# Add labels
label_subplots(f, texts, x_offset=0.15, y_offset=0.0)
# Save figure
savename = "/" + home_dir + "/rasmus/Dropbox/Apps/Overleaf/al_manuscript/al_figures/al_figure_1.pdf"
plt.savefig(savename, transparent=True, dpi=400)
# Show plot
plt.show()
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
mosh.go
|
// Command mosh is an alternative wrapper to mosh-client command that plays well with socks proxies.
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"log"
"net"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"time"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/crypto/ssh/knownhosts"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/net/proxy"
"github.com/artyom/autoflags"
)
func main() {
defaultUser := os.Getenv("MOSH_USER")
if defaultUser == "" {
defaultUser = os.Getenv("USER")
}
defaultPorts := os.Getenv("MOSH_PORTS")
if defaultPorts == "" {
defaultPorts = "60000:60050"
}
params := struct {
SSHPort int `flag:"sshport,ssh port to use"`
Login string `flag:"l,login"`
MoshPorts string `flag:"p,server-side UDP port or colon-separated range"`
Timeout time.Duration `flag:"timeout,ssh connect timeout"`
}{
SSHPort: 22,
Login: defaultUser,
MoshPorts: defaultPorts,
Timeout: 5 * time.Second,
}
autoflags.Define(¶ms)
flag.Parse()
if len(flag.Args()) != 1 {
flag.Usage()
os.Exit(1)
}
addr := flag.Args()[0]
ips, err := net.LookupIP(addr)
if err != nil {
log.Fatal(err)
}
if len(ips) == 0 {
log.Fatalf("name %q resolved to %v", addr, ips)
}
clientPath, err := exec.LookPath("mosh-client")
if err != nil {
log.Fatal(err)
}
port, key, err := runServer(addr, params.Login, params.MoshPorts, params.SSHPort, params.Timeout)
if err != nil {
log.Fatal(err)
}
newEnv := append([]string{}, os.Environ()...)
newEnv = append(newEnv, "MOSH_KEY="+key)
log.Fatal(syscall.Exec(clientPath, []string{"mosh-client", ips[0].String(), strconv.Itoa(port)}, newEnv))
}
func runServer(addr, login, moshPorts string, port int, tout time.Duration) (int, string, error) {
hostKeyCallback, err := knownhosts.New(os.ExpandEnv("$HOME/.ssh/known_hosts"))
if err != nil {
return 0, "", err
}
var sshAgent agent.Agent
agentConn, err := net.DialTimeout("unix", os.Getenv("SSH_AUTH_SOCK"), tout)
if err != nil {
return 0, "", err
}
sshAgent = agent.NewClient(agentConn)
defer agentConn.Close()
signers, err := sshAgent.Signers()
if err != nil {
return 0, "", err
}
sshConfig := &ssh.ClientConfig{
User: login,
Auth: []ssh.AuthMethod{ssh.PublicKeys(signers...),
ssh.KeyboardInteractive(keyboardChallenge)},
HostKeyCallback: hostKeyCallback,
}
client, err := sshDial("tcp", net.JoinHostPort(addr, strconv.Itoa(port)), sshConfig)
if err != nil {
return 0, "", err
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
return 0, "", err
}
defer session.Close()
width, height := 80, 25
if w, h, err := terminal.GetSize(0); err == nil {
width, height = w, h
}
if err := session.RequestPty(os.Getenv("TERM"), height, width, make(ssh.TerminalModes)); err != nil {
return 0, "", err
}
rdata, err := session.CombinedOutput("mosh-server new -p " + moshPorts)
if err != nil {
os.Stderr.Write(rdata)
return 0, "", err
}
return parsePortKey(rdata)
}
func parsePortKey(b []byte) (port int, key string, err error) {
for s := bufio.NewScanner(bytes.NewReader(b)); s.Scan(); {
if !bytes.HasPrefix(s.Bytes(), []byte("MOSH CONNECT")) {
continue
}
fields := strings.Fields(s.Text())
if len(fields) != 4 {
return 0, "", fmt.Errorf("unexpected response line from mosh-server: %q", s.Text())
}
port, err = strconv.Atoi(fields[2])
if err != nil {
return 0, "", err
}
key = fields[3]
return port, key, nil
}
return 0, "", fmt.Errorf("no 'MOSH CONNECT' line from mosh-server")
}
func sshDial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
proxyDialer := proxy.FromEnvironment()
conn, err := proxyDialer.Dial(network, addr)
if err != nil {
return nil, err
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
if err != nil {
return nil, err
}
return ssh.NewClient(c, chans, reqs), nil
}
func init() {
log.SetFlags(0)
log.SetPrefix("mosh: ")
flag.Usage = func() {
fmt.Fprintln(os.Stderr, "Usage: mosh [flags] hostname")
flag.PrintDefaults()
}
}
func keyboardChallenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
if len(questions) != 0 {
return nil, fmt.Errorf("keyboard interactive challenge is not supported")
}
// https://godoc.org/golang.org/x/crypto/ssh#KeyboardInteractiveChallenge
//
// After successful authentication, the server may send
// a challenge with no questions, for which the user and
// instruction messages should be printed.
if user != "" {
log.Println(user)
}
if instruction != "" {
log.Println(instruction)
}
return nil, nil
}
|
[
"\"MOSH_USER\"",
"\"USER\"",
"\"MOSH_PORTS\"",
"\"SSH_AUTH_SOCK\"",
"\"TERM\""
] |
[] |
[
"SSH_AUTH_SOCK",
"USER",
"MOSH_PORTS",
"TERM",
"MOSH_USER"
] |
[]
|
["SSH_AUTH_SOCK", "USER", "MOSH_PORTS", "TERM", "MOSH_USER"]
|
go
| 5 | 0 | |
Leg-UP/models/recommender/Recommender.py
|
# -*- coding: utf-8 -*-
# @Time : 2020/11/27 17:20
# @Author : chensi
# @File : Recommender.py
# @Software : PyCharm
# @Desciption : None
import os
# os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# def available_GPU():
# import subprocess
# import numpy as np
# nDevice = int(subprocess.getoutput("nvidia-smi -L | grep GPU |wc -l"))
# total_GPU_str = subprocess.getoutput("nvidia-smi -q -d Memory | grep -A4 GPU | grep Total | grep -o '[0-9]\+'")
# total_GPU = total_GPU_str.split('\n')
# total_GPU = np.array([int(device_i) for device_i in total_GPU])
# avail_GPU_str = subprocess.getoutput("nvidia-smi -q -d Memory | grep -A4 GPU | grep Free | grep -o '[0-9]\+'")
# avail_GPU = avail_GPU_str.split('\n')
# avail_GPU = np.array([int(device_i) for device_i in avail_GPU])
# avail_GPU = avail_GPU / total_GPU
# return np.argmax(avail_GPU)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1, 2, 3"
# try:
# os.environ["CUDA_VISIBLE_DEVICES"] = str(available_GPU())
# except:
# os.environ["CUDA_VISIBLE_DEVICES"] = '0'
import random
import numpy as np
import torch
tf = None
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except:
import tensorflow as tf
seed = 1234
random.seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
from utils.data_loader import DataLoader
import numpy as np
import pandas as pd
import argparse, scipy, math
import surprise
from surprise import Dataset, Reader, accuracy
from surprise.model_selection import PredefinedKFold
class Recommender(object):
def __init__(self):
self.args = self.parse_args()
# 路径
self.train_path = self.args.train_path
self.test_path = self.args.test_path
self.model_path = self.args.model_path
self.target_prediction_path_prefix = self.args.target_prediction_path_prefix
# 攻击
self.target_id_list = list(map(int, self.args.target_ids.split(',')))
self.topk_list = list(map(int, self.args.topk.split(',')))
#
# os.environ["CUDA_VISIBLE_DEVICES"] = str(self.args.cuda_id)
pass
@staticmethod
def parse_args():
parser = argparse.ArgumentParser(description="Run Recommender.")
parser.add_argument('--data_set', type=str, default='ml100k') # , required=True)
# 路径
parser.add_argument('--train_path', type=str,
default='./data/ml100k/ml100k_train.dat') # , required=True)
parser.add_argument('--test_path', type=str,
default='./data/ml100k/ml100k_test.dat') # , required=True)
parser.add_argument('--model_path', type=str,
default='./results/model_saved/automotive/automotive_NeuMF_AUSHplus_round_119') # , required=True)
parser.add_argument('--target_prediction_path_prefix', type=str,
default='./results/performance/mid_results/ml100k_Recommender') # , required=True)
# 攻击
parser.add_argument('--target_ids', type=str, default='0') # , required=True)
parser.add_argument('--topk', type=str, default='5,10,20,50')
#
parser.add_argument('--cuda_id', type=int, default=0)
return parser
def prepare_data(self):
self.dataset_class = DataLoader(self.train_path, self.test_path)
self.train_data_df, self.test_data_df, self.n_users, self.n_items = self.dataset_class.load_file_as_dataFrame()
self.train_matrix, _ = self.dataset_class.dataFrame_to_matrix(self.train_data_df, self.n_users, self.n_items)
self.test_matrix, _ = self.dataset_class.dataFrame_to_matrix(self.test_data_df, self.n_users, self.n_items)
pass
def build_network(self):
print('build Recommender model graph.')
raise NotImplemented
def train(self):
print('train.')
raise NotImplemented
def test(self):
print('test.')
raise NotImplemented
def execute(self):
print('generate target item performace on a trained Recommender model.')
raise NotImplemented
def save(self, path):
saver = tf.train.Saver()
saver.save(self.sess, path)
def restore(self, path):
saver = tf.train.Saver()
saver.restore(self.sess, path)
def predict(self, user_id, item_id):
raise NotImplemented
def generate_target_result(self):
train_data_array = self.train_matrix.toarray()
for target_id in self.target_id_list:
# mask掉已评分用户以及未评分用户的已评分商品
mask = np.zeros_like(train_data_array)
mask[np.where(train_data_array[:, target_id])[0]] = float('inf')
# 找到测试数据
test_uids, test_iids = np.where((train_data_array + mask) == 0)
# 预测
test_predRatings = self.predict(test_uids, test_iids)
# 构建dataframe
predResults = pd.DataFrame({'user_id': test_uids,
'item_id': test_iids,
'rating': test_predRatings
})
# 为每个未评分计算预测分和HR
predResults_target = np.zeros([len(predResults.user_id.unique()), len(self.topk_list) + 2])
for idx, (user_id, pred_result) in enumerate(predResults.groupby('user_id')):
pred_value = pred_result[pred_result.item_id == target_id].rating.values[0]
sorted_recommend_list = pred_result.sort_values('rating', ascending=False).item_id.values
new_line = [user_id, pred_value] + [1 if target_id in sorted_recommend_list[:k] else 0 for k in
self.topk_list]
predResults_target[idx] = new_line
np.save('%s_%d' % (self.target_prediction_path_prefix, target_id), predResults_target)
class AutoRec(Recommender):
def __init__(self):
super(AutoRec, self).__init__()
self.restore_model = self.args.restore_model
self.learning_rate = self.args.learning_rate
self.epochs = self.args.epoch
self.batch_size = self.args.batch_size
self.reg_rate = self.args.reg_rate
self.verbose = self.args.verbose
self.T = self.args.T
#
self.hidden_neuron = self.args.hidden_neuron
#
print("AutoRec.", end=' ')
@staticmethod
def parse_args():
parser = Recommender.parse_args()
#
parser.add_argument('--restore_model', type=int, default=0)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--reg_rate', type=float, default=0.1)
parser.add_argument('--epoch', type=int, default=500)
parser.add_argument('--batch_size', type=int, default=500)
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--T', type=int, default=5)
parser.add_argument('--display_step', type=int, default=1000)
#
parser.add_argument('--hidden_neuron', type=int, default=500)
#
return parser
def prepare_data(self):
super(AutoRec, self).prepare_data()
self.train_data_array = self.train_matrix.toarray()
self.train_data_mask_array = scipy.sign(self.train_data_array)
def build_network(self):
raise NotImplemented
def predict(self, user_id, item_id):
raise NotImplemented
def train(self):
raise NotImplemented
def test(self):
raise NotImplemented
def execute(self):
# 数据准备
self.prepare_data()
# tensorflow session
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
self.sess = sess
self.build_network()
init = tf.global_variables_initializer()
sess.run(init)
if self.restore_model:
self.restore(self.model_path)
print("loading done.")
else:
loss_prev = float('inf')
for epoch in range(self.epochs):
loss_cur = self.train()
if self.verbose and epoch % self.T == 0:
print("epoch:\t", epoch, "\tloss:\t", loss_cur)
if abs(loss_cur - loss_prev) < math.exp(-5):
break
loss_prev = loss_cur
self.save(self.model_path)
print("training done.")
rmse, mae = self.test()
print("RMSE : %.4f,\tMAE : %.4f" % (rmse, mae))
self.generate_target_result()
return
class IAutoRec(AutoRec):
def __init__(self):
super(IAutoRec, self).__init__()
print("IAutoRec.", end=' ')
@staticmethod
def parse_args():
parser = AutoRec.parse_args()
# return parser.parse_args()
args, _ = parser.parse_known_args()
return args
def prepare_data(self):
super(IAutoRec, self).prepare_data()
def build_network(self):
# placeholder
self.rating_matrix = tf.placeholder(dtype=tf.float32, shape=[self.n_users, None])
self.rating_matrix_mask = tf.placeholder(dtype=tf.float32, shape=[self.n_users, None])
self.keep_rate_net = tf.placeholder(tf.float32)
self.keep_rate_input = tf.placeholder(tf.float32)
# Variable
V = tf.Variable(tf.random_normal([self.hidden_neuron, self.n_users], stddev=0.01))
W = tf.Variable(tf.random_normal([self.n_users, self.hidden_neuron], stddev=0.01))
mu = tf.Variable(tf.random_normal([self.hidden_neuron], stddev=0.01))
b = tf.Variable(tf.random_normal([self.n_users], stddev=0.01))
# forward
layer_1 = tf.nn.dropout(tf.sigmoid(tf.expand_dims(mu, 1) + tf.matmul(V, self.rating_matrix)),
self.keep_rate_net)
self.layer_2 = tf.matmul(W, layer_1) + tf.expand_dims(b, 1)
# backward
self.loss = tf.reduce_mean(tf.square(
tf.norm(tf.multiply((self.rating_matrix - self.layer_2), self.rating_matrix_mask)))) + self.reg_rate * (
tf.square(tf.norm(W)) + tf.square(tf.norm(V)))
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def predict(self, user_id, item_id):
self.reconstruction = self.sess.run(self.layer_2, feed_dict={self.rating_matrix: self.train_data_array,
self.rating_matrix_mask: self.train_data_mask_array,
self.keep_rate_net: 1})
return self.reconstruction[user_id, item_id]
def train(self):
total_batch = int(self.n_items / self.batch_size)
idxs = np.random.permutation(self.n_items) # shuffled ordering
loss = []
for i in range(total_batch):
batch_set_idx = idxs[i * self.batch_size: (i + 1) * self.batch_size]
_, loss_ = self.sess.run(
[self.optimizer, self.loss],
feed_dict={
self.rating_matrix: self.train_matrix[:, batch_set_idx].toarray(),
self.rating_matrix_mask: scipy.sign(self.train_matrix[:, batch_set_idx].toarray()),
self.keep_rate_net: 1 # 0.95
})
loss.append(loss_)
return np.mean(loss)
def test(self):
self.reconstruction = self.sess.run(self.layer_2,
feed_dict={self.rating_matrix: self.train_data_array,
self.rating_matrix_mask: self.train_data_mask_array,
self.keep_rate_net: 1})
test_data = self.test_matrix.toarray()
test_data_mask = test_data > 0
test_data_num = np.sum(test_data_mask)
#
mae_matrix = np.abs(test_data - self.reconstruction) * test_data_mask
rmse_matrix = mae_matrix ** 2
rmse, mae = np.sum(rmse_matrix) / test_data_num, np.sum(mae_matrix) / test_data_num
return rmse, mae
def execute(self):
super(IAutoRec, self).execute()
class UAutoRec(AutoRec):
def __init__(self):
super(UAutoRec, self).__init__()
#
self.layer = self.args.layer
#
print("UAutoRec.", end=' ')
@staticmethod
def parse_args():
parser = AutoRec.parse_args()
#
parser.add_argument('--layer', type=int, default=1)
#
# return parser.parse_args()
args, _ = parser.parse_known_args()
return args
def prepare_data(self):
super(UAutoRec, self).prepare_data()
def build_network(self):
# placeholder
self.rating_matrix = tf.placeholder(dtype=tf.float32, shape=[self.n_items, None])
self.rating_matrix_mask = tf.placeholder(dtype=tf.float32, shape=[self.n_items, None])
if self.layer == 1:
# Variable
V = tf.Variable(tf.random_normal([self.hidden_neuron, self.n_items], stddev=0.01))
W = tf.Variable(tf.random_normal([self.n_items, self.hidden_neuron], stddev=0.01))
mu = tf.Variable(tf.random_normal([self.hidden_neuron], stddev=0.01))
b = tf.Variable(tf.random_normal([self.n_items], stddev=0.01))
layer_1 = tf.sigmoid(tf.expand_dims(mu, 1) + tf.matmul(V, self.rating_matrix))
self.layer_2 = tf.matmul(W, layer_1) + tf.expand_dims(b, 1)
Loss_norm = tf.square(tf.norm(W)) + tf.square(tf.norm(V))
elif self.layer == 3:
V_1 = tf.Variable(tf.random_normal([self.hidden_neuron, self.n_items], stddev=0.01))
V_2 = tf.Variable(tf.random_normal([self.hidden_neuron // 2, self.hidden_neuron], stddev=0.01))
V_3 = tf.Variable(tf.random_normal([self.hidden_neuron, self.hidden_neuron // 2], stddev=0.01))
W = tf.Variable(tf.random_normal([self.n_items, self.hidden_neuron], stddev=0.01))
mu_1 = tf.Variable(tf.random_normal([self.hidden_neuron], stddev=0.01))
mu_2 = tf.Variable(tf.random_normal([self.hidden_neuron // 2], stddev=0.01))
mu_3 = tf.Variable(tf.random_normal([self.hidden_neuron], stddev=0.01))
b = tf.Variable(tf.random_normal([self.n_items], stddev=0.01))
#
layer_1 = tf.sigmoid(tf.matmul(V_1, self.rating_matrix) + tf.expand_dims(mu_1, 1))
layer_2 = tf.sigmoid(tf.matmul(V_2, layer_1) + tf.expand_dims(mu_2, 1))
layer_3 = tf.sigmoid(tf.matmul(V_3, layer_2) + tf.expand_dims(mu_3, 1))
self.layer_2 = tf.matmul(W, layer_3) + tf.expand_dims(b, 1)
Loss_norm = tf.square(tf.norm(W)) + tf.square(tf.norm(V_1)) + tf.square(tf.norm(V_3)) + tf.square(
tf.norm(V_3))
self.loss = tf.reduce_mean(tf.square(
tf.norm(tf.multiply((self.rating_matrix - self.layer_2),
self.rating_matrix_mask)))) + self.reg_rate + Loss_norm
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def predict(self, user_id, item_id):
self.reconstruction = self.sess.run(self.layer_2,
feed_dict={self.rating_matrix: self.train_data_array.transpose(),
self.rating_matrix_mask: self.train_data_mask_array.transpose()})
return self.reconstruction.transpose()[user_id, item_id]
def train(self):
total_batch = int(self.n_users / self.batch_size)
idxs = np.random.permutation(self.n_users) # shuffled ordering
loss = []
for i in range(total_batch):
batch_set_idx = idxs[i * self.batch_size: (i + 1) * self.batch_size]
_, loss_ = self.sess.run(
[self.optimizer, self.loss],
feed_dict={self.rating_matrix: self.train_data_array.transpose()[:, batch_set_idx],
self.rating_matrix_mask: self.train_data_mask_array.transpose()[:, batch_set_idx]
})
loss.append(loss_)
return np.mean(loss)
def test(self):
self.reconstruction = self.sess.run(self.layer_2,
feed_dict={self.rating_matrix: self.train_data_array.transpose(),
self.rating_matrix_mask:
self.train_data_mask_array.transpose()})
test_data = self.test_matrix.toarray().transpose()
test_data_mask = test_data > 0
test_data_num = np.sum(test_data_mask)
#
mae_matrix = np.abs(test_data - self.reconstruction) * test_data_mask
rmse_matrix = mae_matrix ** 2
rmse, mae = np.sum(rmse_matrix) / test_data_num, np.sum(mae_matrix) / test_data_num
return rmse, mae
def execute(self):
super(UAutoRec, self).execute()
class NeuMF(Recommender):
def __init__(self):
super(NeuMF, self).__init__()
self.restore_model = self.args.restore_model
self.learning_rate = self.args.learning_rate
self.epochs = self.args.epoch
self.batch_size = self.args.batch_size
self.reg_rate = self.args.reg_rate
self.verbose = self.args.verbose
self.T = self.args.T
#
self.num_factor = self.args.num_factor
self.num_factor_mlp = self.args.num_factor_mlp
self.hidden_dimension = self.args.hidden_dimension
#
print("NeuMF.")
@staticmethod
def parse_args():
parser = Recommender.parse_args()
#
parser.add_argument('--restore_model', type=int, default=0)
parser.add_argument('--learning_rate', type=float, default=0.5)
parser.add_argument('--reg_rate', type=float, default=0.01)
parser.add_argument('--epoch', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=256)
#
parser.add_argument('--num_factor', type=int, default=10)
parser.add_argument('--num_factor_mlp', type=int, default=64)
parser.add_argument('--hidden_dimension', type=int, default=10)
#
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--T', type=int, default=5)
parser.add_argument('--display_step', type=int, default=1000)
#
# return parser.parse_args()
args, _ = parser.parse_known_args()
return args
def build_network(self):
# self.num_neg_sample = num_neg_sample
self.user_id = tf.placeholder(dtype=tf.int32, shape=[None], name='user_id')
self.item_id = tf.placeholder(dtype=tf.int32, shape=[None], name='item_id')
self.y = tf.placeholder(dtype=tf.float32, shape=[None], name='y')
self.P = tf.Variable(tf.random_normal([self.n_users, self.num_factor], stddev=0.01), dtype=tf.float32)
self.Q = tf.Variable(tf.random_normal([self.n_items, self.num_factor], stddev=0.01), dtype=tf.float32)
self.mlp_P = tf.Variable(tf.random_normal([self.n_users, self.num_factor_mlp], stddev=0.01), dtype=tf.float32)
self.mlp_Q = tf.Variable(tf.random_normal([self.n_items, self.num_factor_mlp], stddev=0.01), dtype=tf.float32)
user_latent_factor = tf.nn.embedding_lookup(self.P, self.user_id)
item_latent_factor = tf.nn.embedding_lookup(self.Q, self.item_id)
mlp_user_latent_factor = tf.nn.embedding_lookup(self.mlp_P, self.user_id)
mlp_item_latent_factor = tf.nn.embedding_lookup(self.mlp_Q, self.item_id)
_GMF = tf.multiply(user_latent_factor, item_latent_factor)
regularizer = tf.keras.regularizers.l2(self.reg_rate)
layer_1 = tf.layers.dense(
inputs=tf.concat([mlp_item_latent_factor, mlp_user_latent_factor], axis=1),
units=self.num_factor_mlp * 2,
kernel_initializer=tf.random_normal_initializer,
activation=tf.nn.relu,
kernel_regularizer=regularizer)
layer_2 = tf.layers.dense(
inputs=layer_1,
units=self.hidden_dimension * 8,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
layer_3 = tf.layers.dense(
inputs=layer_2,
units=self.hidden_dimension * 4,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
layer_4 = tf.layers.dense(
inputs=layer_3,
units=self.hidden_dimension * 2,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
_MLP = tf.layers.dense(
inputs=layer_4,
units=self.hidden_dimension,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
# self.pred_y = tf.nn.sigmoid(tf.reduce_sum(tf.concat([_GMF, _MLP], axis=1), 1))
self.pred_rating = tf.reduce_sum(tf.concat([_GMF, _MLP], axis=1), 1)
self.loss = tf.reduce_sum(tf.square(self.y - self.pred_rating)) \
+ tf.losses.get_regularization_loss() + \
self.reg_rate * (tf.nn.l2_loss(self.P) + tf.nn.l2_loss(self.Q) +
tf.nn.l2_loss(self.mlp_P) + tf.nn.l2_loss(self.mlp_Q))
#
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss)
return self
def prepare_data(self):
super(NeuMF, self).prepare_data()
#
self.train_matrix_coo = self.train_matrix.tocoo()
#
self.user = self.train_matrix_coo.row.reshape(-1)
self.item = self.train_matrix_coo.col.reshape(-1)
self.rating = self.train_matrix_coo.data
def train(self):
self.num_training = len(self.rating)
total_batch = int(self.num_training / self.batch_size)
idxs = np.random.permutation(self.num_training) # shuffled ordering
user_random = list(self.user[idxs])
item_random = list(self.item[idxs])
rating_random = list(self.rating[idxs])
# train
loss = []
for i in range(total_batch):
batch_user = user_random[i * self.batch_size:(i + 1) * self.batch_size]
batch_item = item_random[i * self.batch_size:(i + 1) * self.batch_size]
batch_rating = rating_random[i * self.batch_size:(i + 1) * self.batch_size]
_, loss_ = self.sess.run(
[self.optimizer, self.loss],
feed_dict={self.user_id: batch_user,
self.item_id: batch_item,
self.y: batch_rating})
loss.append(loss_)
return np.mean(loss)
def test(self):
test_data = self.test_matrix.todok()
#
uids = np.array(list(test_data.keys()))[:, 0]
iids = np.array(list(test_data.keys()))[:, 1]
ground_truth = np.array(list(test_data.values()))
#
pred_rating = self.predict(uids, iids)
#
rmse = np.sqrt(np.mean((pred_rating - ground_truth) ** 2))
mae = np.mean(np.abs(pred_rating - ground_truth))
return rmse, mae
def predict(self, user_ids, item_ids):
if len(user_ids) < self.batch_size:
return self.sess.run(self.pred_rating,
feed_dict={
self.user_id: user_ids,
self.item_id: item_ids}
)
# predict by batch
total_batch = math.ceil(len(user_ids) / self.batch_size)
user_ids, item_ids = list(user_ids), list(item_ids)
pred_rating = []
for i in range(total_batch):
batch_user = user_ids[i * self.batch_size:(i + 1) * self.batch_size]
batch_item = item_ids[i * self.batch_size:(i + 1) * self.batch_size]
# predict
batch_pred_rating = self.sess.run(self.pred_rating,
feed_dict={
self.user_id: batch_user,
self.item_id: batch_item}
)
pred_rating += list(batch_pred_rating)
return pred_rating
def restore_user_embedding(self):
# 数据准备
self.prepare_data()
self.n_users += 50
# ================
attackers = ['AUSHplus_Dis_xiaorong', 'AUSHplus', 'SegmentAttacker', 'BandwagonAttacker',
'AverageAttacker', 'RandomAttacker',
'AUSH', 'RecsysAttacker',
'DCGAN', 'WGAN']
#
targets = [62] # [119, 422, 594, 884, 1593]
with tf.Session() as sess:
self.sess = sess
self.build_network()
sess.run(tf.global_variables_initializer())
for target in targets:
for attacker in attackers:
self.model_path = './results/model_saved/ml100k/ml100k_NeuMF_%s_%d' % (attacker, target)
if not os.path.exists(self.model_path + '.meta'):
continue
self.restore(self.model_path)
print("loading done.")
user_embedding, user_embedding_mlp = self.sess.run([self.P, self.mlp_P])
save_path = self.model_path + '_user_embed'
save_path = save_path.replace('model_saved', 'performance\mid_results')
np.save(save_path, user_embedding)
np.save(save_path + '_mlp', user_embedding_mlp)
return
def execute(self):
self.prepare_data()
# ================
# tensorflow session
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
self.sess = sess
self.build_network()
init = tf.global_variables_initializer()
sess.run(init)
if self.restore_model:
self.restore(self.model_path)
print("loading done.")
else:
loss_prev = float('inf')
for epoch in range(self.epochs):
loss_cur = self.train()
if True: # self.verbose and epoch % self.T == 0:
print("epoch:\t", epoch, "\tloss:\t", loss_cur, flush=True)
if abs(loss_cur - loss_prev) < math.exp(-5):
break
loss_prev = loss_cur
self.save(self.model_path)
print("training done.")
rmse, mae = self.test()
print("RMSE : %.4f,\tMAE : %.4f" % (rmse, mae))
self.generate_target_result()
return
class NNMF(Recommender):
def __init__(self):
super(NNMF, self).__init__()
self.restore_model = self.args.restore_model
self.learning_rate = self.args.learning_rate
self.epochs = self.args.epoch
self.batch_size = self.args.batch_size
self.reg_rate = self.args.reg_rate
self.verbose = self.args.verbose
self.T = self.args.T
#
self.num_factor_1 = self.args.num_factor_1
self.num_factor_2 = self.args.num_factor_2
self.hidden_dimension = self.args.hidden_dimension
#
print("NNMF.")
@staticmethod
def parse_args():
parser = Recommender.parse_args()
#
parser.add_argument('--restore_model', type=int, default=0)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--reg_rate', type=float, default=0.1)
parser.add_argument('--epoch', type=int, default=500)
parser.add_argument('--batch_size', type=int, default=500)
#
parser.add_argument('--num_factor_1', type=int, default=100)
parser.add_argument('--num_factor_2', type=int, default=10)
parser.add_argument('--hidden_dimension', type=int, default=50)
#
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--T', type=int, default=5)
parser.add_argument('--display_step', type=int, default=1000)
#
# return parser.parse_args()
args, _ = parser.parse_known_args()
return args
def prepare_data(self):
super(NNMF, self).prepare_data()
#
self.train_matrix_coo = self.train_matrix.tocoo()
#
self.user = self.train_matrix_coo.row.reshape(-1)
self.item = self.train_matrix_coo.col.reshape(-1)
self.rating = self.train_matrix_coo.data
def build_network(self):
print("num_factor_1=%d, num_factor_2=%d, hidden_dimension=%d" % (
self.num_factor_1, self.num_factor_2, self.hidden_dimension))
# placeholder
self.user_id = tf.placeholder(dtype=tf.int32, shape=[None], name='user_id')
self.item_id = tf.placeholder(dtype=tf.int32, shape=[None], name='item_id')
self.y = tf.placeholder("float", [None], 'rating')
# Variable
P = tf.Variable(tf.random_normal([self.n_users, self.num_factor_1], stddev=0.01))
Q = tf.Variable(tf.random_normal([self.n_items, self.num_factor_1], stddev=0.01))
U = tf.Variable(tf.random_normal([self.n_users, self.num_factor_2], stddev=0.01))
V = tf.Variable(tf.random_normal([self.n_items, self.num_factor_2], stddev=0.01))
# forward
input = tf.concat(values=[tf.nn.embedding_lookup(P, self.user_id),
tf.nn.embedding_lookup(Q, self.item_id),
tf.multiply(tf.nn.embedding_lookup(U, self.user_id),
tf.nn.embedding_lookup(V, self.item_id))
], axis=1)
# tf1->tf2
# regularizer = tf.contrib.layers.l2_regularizer(scale=self.reg_rate)
regularizer = tf.keras.regularizers.l2(self.reg_rate)
layer_1 = tf.layers.dense(inputs=input, units=2 * self.num_factor_1 + self.num_factor_2,
bias_initializer=tf.random_normal_initializer,
kernel_initializer=tf.random_normal_initializer, activation=tf.sigmoid,
kernel_regularizer=regularizer)
layer_2 = tf.layers.dense(inputs=layer_1, units=self.hidden_dimension, activation=tf.sigmoid,
bias_initializer=tf.random_normal_initializer,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
layer_3 = tf.layers.dense(inputs=layer_2, units=self.hidden_dimension, activation=tf.sigmoid,
bias_initializer=tf.random_normal_initializer,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
layer_4 = tf.layers.dense(inputs=layer_3, units=self.hidden_dimension, activation=tf.sigmoid,
bias_initializer=tf.random_normal_initializer,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
output = tf.layers.dense(inputs=layer_4, units=1, activation=None,
bias_initializer=tf.random_normal_initializer,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
self.pred_rating = tf.reshape(output, [-1])
# backward
self.loss = tf.reduce_sum(tf.square(self.y - self.pred_rating)) \
+ tf.losses.get_regularization_loss() + self.reg_rate * (
tf.norm(U) + tf.norm(V) + tf.norm(P) + tf.norm(Q))
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def train(self):
self.num_training = len(self.rating)
total_batch = int(self.num_training / self.batch_size)
idxs = np.random.permutation(self.num_training) # shuffled ordering
user_random = list(self.user[idxs])
item_random = list(self.item[idxs])
rating_random = list(self.rating[idxs])
# train
loss = []
for i in range(total_batch):
batch_user = user_random[i * self.batch_size:(i + 1) * self.batch_size]
batch_item = item_random[i * self.batch_size:(i + 1) * self.batch_size]
batch_rating = rating_random[i * self.batch_size:(i + 1) * self.batch_size]
_, loss_ = self.sess.run(
[self.optimizer, self.loss],
feed_dict={self.user_id: batch_user,
self.item_id: batch_item,
self.y: batch_rating})
loss.append(loss_)
return np.mean(loss)
def test(self):
test_data = self.test_matrix.todok()
#
uids = np.array(list(test_data.keys()))[:, 0]
iids = np.array(list(test_data.keys()))[:, 1]
ground_truth = np.array(list(test_data.values()))
#
pred_rating = self.predict(uids, iids)
#
rmse = np.sqrt(np.mean((pred_rating - ground_truth) ** 2))
mae = np.mean(np.abs(pred_rating - ground_truth))
return rmse, mae
def predict(self, user_ids, item_ids):
if len(user_ids) < self.batch_size:
return self.sess.run(self.pred_rating,
feed_dict={
self.user_id: user_ids,
self.item_id: item_ids}
)
# predict by batch
total_batch = math.ceil(len(user_ids) / self.batch_size)
user_ids, item_ids = list(user_ids), list(item_ids)
pred_rating = []
for i in range(total_batch):
batch_user = user_ids[i * self.batch_size:(i + 1) * self.batch_size]
batch_item = item_ids[i * self.batch_size:(i + 1) * self.batch_size]
# predict
batch_pred_rating = self.sess.run(self.pred_rating,
feed_dict={
self.user_id: batch_user,
self.item_id: batch_item}
)
pred_rating += list(batch_pred_rating)
return pred_rating
def execute(self):
self.prepare_data()
# tensorflow session
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
self.sess = sess
self.build_network()
init = tf.global_variables_initializer()
sess.run(init)
if self.restore_model:
self.restore(self.model_path)
print("loading done.")
else:
loss_prev = float('inf')
for epoch in range(self.epochs):
loss_cur = self.train()
if self.verbose and epoch % self.T == 0:
print("epoch:\t", epoch, "\tloss:\t", loss_cur)
if abs(loss_cur - loss_prev) < math.exp(-5):
break
loss_prev = loss_cur
self.save(self.model_path)
print("training done.")
rmse, mae = self.test()
print("RMSE : %.4f,\tMAE : %.4f" % (rmse, mae))
self.generate_target_result()
return
class NRR(Recommender):
def __init__(self):
super(NRR, self).__init__()
self.restore_model = self.args.restore_model
self.learning_rate = self.args.learning_rate
self.epochs = self.args.epoch
self.batch_size = self.args.batch_size
self.reg_rate = self.args.reg_rate
self.verbose = self.args.verbose
self.T = self.args.T
#
self.num_factor_user = self.args.num_factor_user
self.num_factor_item = self.args.num_factor_item
self.d = self.args.d
self.hidden_dimension = self.args.hidden_dimension
#
print("NRR.")
@staticmethod
def parse_args():
parser = Recommender.parse_args()
#
parser.add_argument('--restore_model', type=int, default=0)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--reg_rate', type=float, default=0.1)
parser.add_argument('--epoch', type=int, default=500)
parser.add_argument('--batch_size', type=int, default=256)
#
parser.add_argument('--num_factor_user', type=int, default=40)
parser.add_argument('--num_factor_item', type=int, default=40)
parser.add_argument('--d', type=int, default=50)
parser.add_argument('--hidden_dimension', type=int, default=40)
#
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--T', type=int, default=5)
parser.add_argument('--display_step', type=int, default=1000)
#
# return parser.parse_args()
args, _ = parser.parse_known_args()
return args
def build_network(self):
# model dependent arguments
self.user_id = tf.placeholder(dtype=tf.int32, shape=[None], name='user_id')
self.item_id = tf.placeholder(dtype=tf.int32, shape=[None], name='item_id')
self.y = tf.placeholder("float", [None], 'rating')
U = tf.Variable(tf.random_normal([self.n_users, self.num_factor_user], stddev=0.01))
V = tf.Variable(tf.random_normal([self.n_items, self.num_factor_item], stddev=0.01))
b = tf.Variable(tf.random_normal([self.d]))
user_latent_factor = tf.nn.embedding_lookup(U, self.user_id)
item_latent_factor = tf.nn.embedding_lookup(V, self.item_id)
W_User = tf.Variable(tf.random_normal([self.num_factor_user, self.d], stddev=0.01))
W_Item = tf.Variable(tf.random_normal([self.num_factor_item, self.d], stddev=0.01))
input = tf.matmul(user_latent_factor, W_User) + tf.matmul(item_latent_factor, W_Item) + b
regularizer = tf.keras.regularizers.l2(self.reg_rate)
layer_1 = tf.layers.dense(inputs=input, units=self.d, bias_initializer=tf.random_normal_initializer,
kernel_initializer=tf.random_normal_initializer, activation=tf.sigmoid,
kernel_regularizer=regularizer)
layer_2 = tf.layers.dense(inputs=layer_1, units=self.hidden_dimension, activation=tf.sigmoid,
bias_initializer=tf.random_normal_initializer,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
layer_3 = tf.layers.dense(inputs=layer_2, units=self.hidden_dimension, activation=tf.sigmoid,
bias_initializer=tf.random_normal_initializer,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
layer_4 = tf.layers.dense(inputs=layer_3, units=self.hidden_dimension, activation=tf.sigmoid,
bias_initializer=tf.random_normal_initializer,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
output = tf.layers.dense(inputs=layer_4, units=1, activation=None,
bias_initializer=tf.random_normal_initializer,
kernel_initializer=tf.random_normal_initializer,
kernel_regularizer=regularizer)
self.pred_rating = tf.reshape(output, [-1])
# print(np.shape(output))
reg_losses = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
self.loss = tf.reduce_sum(tf.square(self.y - self.pred_rating)) \
+ tf.losses.get_regularization_loss() + self.reg_rate * (
tf.norm(U) + tf.norm(V) + tf.norm(b) + tf.norm(W_Item) + tf.norm(W_User))
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def prepare_data(self):
super(NRR, self).prepare_data()
#
self.train_matrix_coo = self.train_matrix.tocoo()
#
self.user = self.train_matrix_coo.row.reshape(-1)
self.item = self.train_matrix_coo.col.reshape(-1)
self.rating = self.train_matrix_coo.data
def train(self):
self.num_training = len(self.rating)
total_batch = int(self.num_training / self.batch_size)
idxs = np.random.permutation(self.num_training) # shuffled ordering
user_random = list(self.user[idxs])
item_random = list(self.item[idxs])
rating_random = list(self.rating[idxs])
# train
loss = []
for i in range(total_batch):
batch_user = user_random[i * self.batch_size:(i + 1) * self.batch_size]
batch_item = item_random[i * self.batch_size:(i + 1) * self.batch_size]
batch_rating = rating_random[i * self.batch_size:(i + 1) * self.batch_size]
_, loss_ = self.sess.run([self.optimizer, self.loss],
feed_dict={self.user_id: batch_user,
self.item_id: batch_item,
self.y: batch_rating})
loss.append(loss_)
return np.mean(loss)
def test(self):
test_data = self.test_matrix.todok()
#
uids = np.array(list(test_data.keys()))[:, 0]
iids = np.array(list(test_data.keys()))[:, 1]
ground_truth = np.array(list(test_data.values()))
#
pred_rating = self.predict(uids, iids)
#
rmse = np.sqrt(np.mean((pred_rating - ground_truth) ** 2))
mae = np.mean(np.abs(pred_rating - ground_truth))
return rmse, mae
def predict(self, user_ids, item_ids):
if len(user_ids) < self.batch_size:
return self.sess.run(self.pred_rating,
feed_dict={
self.user_id: user_ids,
self.item_id: item_ids}
)
# predict by batch
total_batch = math.ceil(len(user_ids) / self.batch_size)
user_ids, item_ids = list(user_ids), list(item_ids)
pred_rating = []
for i in range(total_batch):
batch_user = user_ids[i * self.batch_size:(i + 1) * self.batch_size]
batch_item = item_ids[i * self.batch_size:(i + 1) * self.batch_size]
# predict
batch_pred_rating = self.sess.run(self.pred_rating,
feed_dict={
self.user_id: batch_user,
self.item_id: batch_item}
)
pred_rating += list(batch_pred_rating)
return pred_rating
def execute(self):
self.prepare_data()
# tensorflow session
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
self.sess = sess
self.build_network()
init = tf.global_variables_initializer()
sess.run(init)
if self.restore_model:
self.restore(self.model_path)
print("loading done.")
else:
loss_prev = float('inf')
for epoch in range(self.epochs):
loss_cur = self.train()
if True: # self.verbose and epoch % self.T == 0:
print("epoch:\t", epoch, "\tloss:\t", loss_cur)
if abs(loss_cur - loss_prev) < math.exp(-5):
break
loss_prev = loss_cur
self.save(self.model_path)
print("training done.")
rmse, mae = self.test()
print("RMSE : %.4f,\tMAE : %.4f" % (rmse, mae))
self.generate_target_result()
return
class RecommenderOnSurprice(Recommender):
def __init__(self):
super(RecommenderOnSurprice, self).__init__()
print("CF build by surprise.")
def prepare_data(self):
super(RecommenderOnSurprice, self).prepare_data()
reader = Reader(line_format='user item rating', sep='\t', rating_scale=(1, 5))
data = Dataset.load_from_folds([(self.train_path, self.test_path)], reader=reader)
trainset, testset = None, None
pkf = PredefinedKFold()
for trainset_, testset_ in pkf.split(data):
trainset, testset = trainset_, testset_
self.trainset, self.testset = trainset, testset
def build_network(self):
print('build_network')
self.model = None
raise NotImplemented
def predict(self, user_ids, item_ids):
fn_pred = lambda x: self.model.predict(str(x[0]), str(x[1]), r_ui=0).est
pred_ratings = list(map(fn_pred, zip(user_ids, item_ids)))
return pred_ratings
def train(self):
self.model.fit(self.trainset)
return
def test(self):
preds = self.model.test(self.testset)
rmse = accuracy.rmse(preds, verbose=True)
print("RMSE : %.4f" % (rmse))
return
def execute(self):
self.prepare_data()
self.build_network()
self.train()
self.test()
self.generate_target_result()
return
class KNN(RecommenderOnSurprice):
def __init__(self):
super(KNN, self).__init__()
self.user_based = self.args.user_based
self.dis_method = self.args.dis_method
self.k = self.args.k
print("KNN.")
@staticmethod
def parse_args():
parser = Recommender.parse_args()
#
parser.add_argument('--user_based', type=int, default=0) # 1
parser.add_argument('--dis_method', type=str, default='msd')
parser.add_argument('--k', type=int, default=50) # 20
#
args, _ = parser.parse_known_args()
return args
def build_network(self):
sim_options = {'user_based': self.user_based, 'name': self.dis_method}
self.model = surprise.KNNBasic(sim_options=sim_options, k=self.k)
class NMF(RecommenderOnSurprice):
def __init__(self):
super(NMF, self).__init__()
self.n_factors = self.args.n_factors
print("NMF.")
@staticmethod
def parse_args():
parser = Recommender.parse_args()
#
parser.add_argument('--n_factors', type=int, default=25)
# return parser.parse_args()
args, _ = parser.parse_known_args()
return args
def build_network(self):
self.model = surprise.NMF(n_factors=self.n_factors)
class SVD(RecommenderOnSurprice):
def __init__(self):
super(SVD, self).__init__()
self.n_factors = self.args.n_factors
print('SVD.')
@staticmethod
def parse_args():
parser = Recommender.parse_args()
#
parser.add_argument('--n_factors', type=int, default=25)
#
# return parser.parse_args()
args, _ = parser.parse_known_args()
return args
def build_network(self):
self.model = surprise.SVD(n_factors=self.n_factors)
class SlopeOne(RecommenderOnSurprice):
def __init__(self):
super(SlopeOne, self).__init__()
# self.n_factors = self.args.n_factors
print('SlopeOne.')
@staticmethod
def parse_args():
parser = Recommender.parse_args()
#
# parser.add_argument('--n_factors', type=int, default=25)
#
# return parser.parse_args()
args, _ = parser.parse_known_args()
return args
def build_network(self):
self.model = surprise.SlopeOne()
class CoClustering(RecommenderOnSurprice):
def __init__(self):
super(CoClustering, self).__init__()
print('CoClustering.')
@staticmethod
def parse_args():
parser = Recommender.parse_args()
args, _ = parser.parse_known_args()
return args
def build_network(self):
self.model = surprise.CoClustering()
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES",
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES", "KMP_DUPLICATE_LIB_OK"]
|
python
| 3 | 0 | |
util.go
|
package main
import (
"os"
"strings"
)
func stripPrefix(name string) string {
if strings.HasPrefix(name, "goske-") {
name = name[6:]
}
return name
}
func goskeRepo() string {
s := os.Getenv("GITHUB_GOSKE")
if s != "" {
return s
}
return "goske"
}
|
[
"\"GITHUB_GOSKE\""
] |
[] |
[
"GITHUB_GOSKE"
] |
[]
|
["GITHUB_GOSKE"]
|
go
| 1 | 0 | |
meshLog.go
|
package meshLog
import (
"fmt"
"os"
"runtime"
"strings"
"github.com/fatih/color"
)
// LogLevel indicates the level of logging
type LogLevel int
const (
debug LogLevel = iota
info
warn
fatal
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 34
gray = 37
)
// Date / Time format
const dtFormat = "Jan 2 15:04:05"
// Logger is the wrapper around a given logging message
type Logger struct {
envVar string
messages []loggedMessage
}
type loggedMessage struct {
level LogLevel
message string
}
/**
* String Logging
*/
// Write allows the logger to conform to io.Writer. Assume
// info level logging
func Write(data []byte) (int, error) {
Info(string(data))
return len(data), nil
}
// Debug is a convenience method appending a debug message to the logger
func Debug(obj interface{}) {
// Get the line number and calling func sig
_, fn, line, _ := runtime.Caller(1)
msg := fmt.Sprintf("%+v\n%s:%d\n\n", obj, fn, line)
formattedMessage := formattedLogMessage("DEBUG", msg)
color.Green(formattedMessage)
}
// Info is a convenience method appending a info style message to the logger
func Info(obj interface{}) {
// Get the line number and calling func sig
_, fn, line, _ := runtime.Caller(1)
msg := fmt.Sprintf("%+v\n%s:%d\n\n", obj, fn, line)
formattedMessage := formattedLogMessage("INFO", msg)
color.White(formattedMessage)
}
// Warn is a convenience method appending a warning message to the logger
func Warn(obj interface{}) {
// Get the line number and calling func sig
_, fn, line, _ := runtime.Caller(1)
msg := fmt.Sprintf("%+v\n%s:%d\n\n", obj, fn, line)
formattedMessage := formattedLogMessage("WARN", msg)
color.Yellow(formattedMessage)
}
// Fatal is a convenience method appending a fatal message to the logger
func Fatal(obj interface{}) {
// Get the line number and calling func sig
_, fn, line, _ := runtime.Caller(1)
msg := fmt.Sprintf("%+v\n%s:%d\n\n", obj, fn, line)
formattedMessage := formattedLogMessage("ERROR", msg)
color.Red(formattedMessage)
}
/**
* Formatted Strings
*/
// Debugf is a convenience method appending a debug message to the logger
func Debugf(msg string, a ...interface{}) {
_, fn, line, _ := runtime.Caller(1)
msg = fmt.Sprintf(msg, a...)
msg = fmt.Sprintf("%+v%s:%d\n\n", msg, fn, line)
formattedMessage := formattedLogMessage("DEBUG", msg)
color.Green(formattedMessage)
}
// Infof is a convenience method appending a info style message to the logger
func Infof(msg string, a ...interface{}) {
_, fn, line, _ := runtime.Caller(1)
msg = fmt.Sprintf(msg, a...)
msg = fmt.Sprintf("%+v%s:%d\n\n", msg, fn, line)
formattedMessage := formattedLogMessage("INFO", msg)
color.White(formattedMessage)
}
// Warnf is a convenience method appending a warning message to the logger
func Warnf(msg string, a ...interface{}) {
_, fn, line, _ := runtime.Caller(1)
msg = fmt.Sprintf(msg, a...)
msg = fmt.Sprintf("%+v%s:%d\n\n", msg, fn, line)
formattedMessage := formattedLogMessage("WARN", msg)
color.Yellow(formattedMessage)
}
// Fatalf is a convenience method appending a fatal message to the logger
func Fatalf(msg string, a ...interface{}) {
_, fn, line, _ := runtime.Caller(1)
msg = fmt.Sprintf(msg, a...)
msg = fmt.Sprintf("%+v%s:%d\n\n", msg, fn, line)
formattedMessage := formattedLogMessage("ERROR", msg)
color.Red(formattedMessage)
}
/**
* Internal Formatting
*/
func formattedLogMessage(level string, logMessage string) string {
// Set ENB
env := "LOCAL"
if len(os.Getenv("ENV")) > 0 {
env = strings.ToUpper(os.Getenv("ENV"))
}
return fmt.Sprintf("[%s] - %s: %s", env, level, logMessage)
}
func formatColoredMessage(message string, level LogLevel) string {
var levelColor int
switch level {
case debug:
levelColor = yellow
case info:
levelColor = gray
case warn:
levelColor = green
case fatal:
levelColor = red
}
// levelText := strings.ToUpper(message)[0:4]
return fmt.Sprintf("\x1b[%dm%s\x1b", levelColor, message)
}
func stringValueForLogLevel(level LogLevel) string {
switch level {
case debug:
return "DEBUG"
case info:
return "INFO"
case warn:
return "WARN"
case fatal:
return "FATAL"
}
return "INFO"
}
/**
* Convenience for panic / err
*/
// Perror is Syntax Sugga for panicing on error
func Perror(err error) {
if err != nil {
Fatal(err)
panic(err)
}
}
|
[
"\"ENV\"",
"\"ENV\""
] |
[] |
[
"ENV"
] |
[]
|
["ENV"]
|
go
| 1 | 0 | |
tsdb/index/tsi1/index.go
|
package tsi1
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"runtime"
"strconv"
"sync"
"sync/atomic"
"unsafe"
"github.com/cespare/xxhash"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/estimator/hll"
"github.com/influxdata/influxdb/pkg/slices"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxql"
"go.uber.org/zap"
)
// IndexName is the name of the index.
const IndexName = tsdb.TSI1IndexName
// DefaultSeriesIDSetCacheSize is the default number of series ID sets to cache.
const DefaultSeriesIDSetCacheSize = 100
// ErrCompactionInterrupted is returned if compactions are disabled or
// an index is closed while a compaction is occurring.
var ErrCompactionInterrupted = errors.New("tsi1: compaction interrupted")
func init() {
if os.Getenv("INFLUXDB_EXP_TSI_PARTITIONS") != "" {
i, err := strconv.Atoi(os.Getenv("INFLUXDB_EXP_TSI_PARTITIONS"))
if err != nil {
panic(err)
}
DefaultPartitionN = uint64(i)
}
// TODO(edd): To remove when feature finalised.
var err error
if os.Getenv("INFLUXDB_EXP_TSI_CACHING") != "" {
EnableBitsetCache, err = strconv.ParseBool(os.Getenv("INFLUXDB_EXP_TSI_CACHING"))
if err != nil {
panic(err)
}
}
tsdb.RegisterIndex(IndexName, func(_ uint64, db, path string, _ *tsdb.SeriesIDSet, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Index {
idx := NewIndex(sfile, db, WithPath(path), WithMaximumLogFileSize(int64(opt.Config.MaxIndexLogFileSize)))
return idx
})
}
// DefaultPartitionN determines how many shards the index will be partitioned into.
//
// NOTE: Currently, this must not be change once a database is created. Further,
// it must also be a power of 2.
//
var DefaultPartitionN uint64 = 8
// EnableBitsetCache determines if bitsets are cached.
var EnableBitsetCache = true
// An IndexOption is a functional option for changing the configuration of
// an Index.
type IndexOption func(i *Index)
// WithPath sets the root path of the Index
var WithPath = func(path string) IndexOption {
return func(i *Index) {
i.path = path
}
}
// DisableCompactions disables compactions on the Index.
var DisableCompactions = func() IndexOption {
return func(i *Index) {
i.disableCompactions = true
}
}
// WithLogger sets the logger for the Index.
var WithLogger = func(l zap.Logger) IndexOption {
return func(i *Index) {
i.logger = l.With(zap.String("index", "tsi"))
}
}
// WithMaximumLogFileSize sets the maximum size of LogFiles before they're
// compacted into IndexFiles.
var WithMaximumLogFileSize = func(size int64) IndexOption {
return func(i *Index) {
i.maxLogFileSize = size
}
}
// DisableFsync disables flushing and syncing of underlying files. Primarily this
// impacts the LogFiles. This option can be set when working with the index in
// an offline manner, for cases where a hard failure can be overcome by re-running the tooling.
var DisableFsync = func() IndexOption {
return func(i *Index) {
i.disableFsync = true
}
}
// WithLogFileBufferSize sets the size of the buffer used within LogFiles.
// Typically appending an entry to a LogFile involves writing 11 or 12 bytes, so
// depending on how many new series are being created within a batch, it may
// be appropriate to set this.
var WithLogFileBufferSize = func(sz int) IndexOption {
return func(i *Index) {
if sz > 1<<17 { // 128K
sz = 1 << 17
} else if sz < 1<<12 {
sz = 1 << 12 // 4K (runtime default)
}
i.logfileBufferSize = sz
}
}
// Index represents a collection of layered index files and WAL.
type Index struct {
mu sync.RWMutex
partitions []*Partition
opened bool
tagValueCache *TagValueSeriesIDCache
// The following may be set when initializing an Index.
path string // Root directory of the index partitions.
disableCompactions bool // Initially disables compactions on the index.
maxLogFileSize int64 // Maximum size of a LogFile before it's compacted.
logfileBufferSize int // The size of the buffer used by the LogFile.
disableFsync bool // Disables flushing buffers and fsyning files. Used when working with indexes offline.
logger *zap.Logger // Index's logger.
// The following must be set when initializing an Index.
sfile *tsdb.SeriesFile // series lookup file
database string // Name of database.
// Cached sketches.
mSketch, mTSketch estimator.Sketch // Measurement sketches
sSketch, sTSketch estimator.Sketch // Series sketches
// Index's version.
version int
// Number of partitions used by the index.
PartitionN uint64
}
func (i *Index) UniqueReferenceID() uintptr {
return uintptr(unsafe.Pointer(i))
}
// NewIndex returns a new instance of Index.
func NewIndex(sfile *tsdb.SeriesFile, database string, options ...IndexOption) *Index {
idx := &Index{
tagValueCache: NewTagValueSeriesIDCache(DefaultSeriesIDSetCacheSize),
maxLogFileSize: tsdb.DefaultMaxIndexLogFileSize,
logger: zap.NewNop(),
version: Version,
sfile: sfile,
database: database,
mSketch: hll.NewDefaultPlus(),
mTSketch: hll.NewDefaultPlus(),
sSketch: hll.NewDefaultPlus(),
sTSketch: hll.NewDefaultPlus(),
PartitionN: DefaultPartitionN,
}
for _, option := range options {
option(idx)
}
return idx
}
// Bytes estimates the memory footprint of this Index, in bytes.
func (i *Index) Bytes() int {
var b int
i.mu.RLock()
b += 24 // mu RWMutex is 24 bytes
b += int(unsafe.Sizeof(i.partitions))
for _, p := range i.partitions {
b += int(unsafe.Sizeof(p)) + p.bytes()
}
b += int(unsafe.Sizeof(i.opened))
b += int(unsafe.Sizeof(i.path)) + len(i.path)
b += int(unsafe.Sizeof(i.disableCompactions))
b += int(unsafe.Sizeof(i.maxLogFileSize))
b += int(unsafe.Sizeof(i.logger))
b += int(unsafe.Sizeof(i.sfile))
// Do not count SeriesFile because it belongs to the code that constructed this Index.
b += int(unsafe.Sizeof(i.mSketch)) + i.mSketch.Bytes()
b += int(unsafe.Sizeof(i.mTSketch)) + i.mTSketch.Bytes()
b += int(unsafe.Sizeof(i.sSketch)) + i.sSketch.Bytes()
b += int(unsafe.Sizeof(i.sTSketch)) + i.sTSketch.Bytes()
b += int(unsafe.Sizeof(i.database)) + len(i.database)
b += int(unsafe.Sizeof(i.version))
b += int(unsafe.Sizeof(i.PartitionN))
i.mu.RUnlock()
return b
}
// Database returns the name of the database the index was initialized with.
func (i *Index) Database() string {
return i.database
}
// WithLogger sets the logger on the index after it's been created.
//
// It's not safe to call WithLogger after the index has been opened, or before
// it has been closed.
func (i *Index) WithLogger(l *zap.Logger) {
i.mu.Lock()
defer i.mu.Unlock()
i.logger = l.With(zap.String("index", "tsi"))
}
// Type returns the type of Index this is.
func (i *Index) Type() string { return IndexName }
// SeriesFile returns the series file attached to the index.
func (i *Index) SeriesFile() *tsdb.SeriesFile { return i.sfile }
// SeriesIDSet returns the set of series ids associated with series in this
// index. Any series IDs for series no longer present in the index are filtered out.
func (i *Index) SeriesIDSet() *tsdb.SeriesIDSet {
seriesIDSet := tsdb.NewSeriesIDSet()
others := make([]*tsdb.SeriesIDSet, 0, i.PartitionN)
for _, p := range i.partitions {
others = append(others, p.seriesIDSet)
}
seriesIDSet.Merge(others...)
return seriesIDSet
}
// Open opens the index.
func (i *Index) Open() error {
i.mu.Lock()
defer i.mu.Unlock()
if i.opened {
return errors.New("index already open")
}
// Ensure root exists.
if err := os.MkdirAll(i.path, 0777); err != nil {
return err
}
// Initialize index partitions.
i.partitions = make([]*Partition, i.PartitionN)
for j := 0; j < len(i.partitions); j++ {
p := NewPartition(i.sfile, filepath.Join(i.path, fmt.Sprint(j)))
p.MaxLogFileSize = i.maxLogFileSize
p.nosync = i.disableFsync
p.logbufferSize = i.logfileBufferSize
p.logger = i.logger.With(zap.String("tsi1_partition", fmt.Sprint(j+1)))
i.partitions[j] = p
}
// Open all the Partitions in parallel.
partitionN := len(i.partitions)
n := i.availableThreads()
// Store results.
errC := make(chan error, partitionN)
// Run fn on each partition using a fixed number of goroutines.
var pidx uint32 // Index of maximum Partition being worked on.
for k := 0; k < n; k++ {
go func(k int) {
for {
idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on.
if idx >= partitionN {
return // No more work.
}
err := i.partitions[idx].Open()
errC <- err
}
}(k)
}
// Check for error
for i := 0; i < partitionN; i++ {
if err := <-errC; err != nil {
return err
}
}
// Refresh cached sketches.
if err := i.updateSeriesSketches(); err != nil {
return err
} else if err := i.updateMeasurementSketches(); err != nil {
return err
}
// Mark opened.
i.opened = true
i.logger.Info(fmt.Sprintf("index opened with %d partitions", partitionN))
return nil
}
// Compact requests a compaction of partitions.
func (i *Index) Compact() {
i.mu.Lock()
defer i.mu.Unlock()
for _, p := range i.partitions {
p.Compact()
}
}
func (i *Index) EnableCompactions() {
for _, p := range i.partitions {
p.EnableCompactions()
}
}
func (i *Index) DisableCompactions() {
for _, p := range i.partitions {
p.DisableCompactions()
}
}
// Wait blocks until all outstanding compactions have completed.
func (i *Index) Wait() {
for _, p := range i.partitions {
p.Wait()
}
}
// Close closes the index.
func (i *Index) Close() error {
// Lock index and close partitions.
i.mu.Lock()
defer i.mu.Unlock()
for _, p := range i.partitions {
if err := p.Close(); err != nil {
return err
}
}
// Mark index as closed.
i.opened = false
return nil
}
// Path returns the path the index was opened with.
func (i *Index) Path() string { return i.path }
// PartitionAt returns the partition by index.
func (i *Index) PartitionAt(index int) *Partition {
return i.partitions[index]
}
// partition returns the appropriate Partition for a provided series key.
func (i *Index) partition(key []byte) *Partition {
return i.partitions[int(xxhash.Sum64(key)&(i.PartitionN-1))]
}
// partitionIdx returns the index of the partition that key belongs in.
func (i *Index) partitionIdx(key []byte) int {
return int(xxhash.Sum64(key) & (i.PartitionN - 1))
}
// availableThreads returns the minimum of GOMAXPROCS and the number of
// partitions in the Index.
func (i *Index) availableThreads() int {
n := runtime.GOMAXPROCS(0)
if len(i.partitions) < n {
return len(i.partitions)
}
return n
}
// updateMeasurementSketches rebuilds the cached measurement sketches.
func (i *Index) updateMeasurementSketches() error {
i.mSketch, i.mTSketch = hll.NewDefaultPlus(), hll.NewDefaultPlus()
for j := 0; j < int(i.PartitionN); j++ {
if s, t, err := i.partitions[j].MeasurementsSketches(); err != nil {
return err
} else if i.mSketch.Merge(s); err != nil {
return err
} else if i.mTSketch.Merge(t); err != nil {
return err
}
}
return nil
}
// updateSeriesSketches rebuilds the cached series sketches.
func (i *Index) updateSeriesSketches() error {
i.sSketch, i.sTSketch = hll.NewDefaultPlus(), hll.NewDefaultPlus()
for j := 0; j < int(i.PartitionN); j++ {
if s, t, err := i.partitions[j].SeriesSketches(); err != nil {
return err
} else if i.sSketch.Merge(s); err != nil {
return err
} else if i.sTSketch.Merge(t); err != nil {
return err
}
}
return nil
}
// SetFieldSet sets a shared field set from the engine.
func (i *Index) SetFieldSet(fs *tsdb.MeasurementFieldSet) {
for _, p := range i.partitions {
p.SetFieldSet(fs)
}
}
// FieldSet returns the assigned fieldset.
func (i *Index) FieldSet() *tsdb.MeasurementFieldSet {
if len(i.partitions) == 0 {
return nil
}
return i.partitions[0].FieldSet()
}
// ForEachMeasurementName iterates over all measurement names in the index,
// applying fn. It returns the first error encountered, if any.
//
// ForEachMeasurementName does not call fn on each partition concurrently so the
// call may provide a non-goroutine safe fn.
func (i *Index) ForEachMeasurementName(fn func(name []byte) error) error {
itr, err := i.MeasurementIterator()
if err != nil {
return err
} else if itr == nil {
return nil
}
defer itr.Close()
// Iterate over all measurements.
for {
e, err := itr.Next()
if err != nil {
return err
} else if e == nil {
break
}
if err := fn(e); err != nil {
return err
}
}
return nil
}
// MeasurementExists returns true if a measurement exists.
func (i *Index) MeasurementExists(name []byte) (bool, error) {
n := i.availableThreads()
// Store errors
var found uint32 // Use this to signal we found the measurement.
errC := make(chan error, i.PartitionN)
// Check each partition for the measurement concurrently.
var pidx uint32 // Index of maximum Partition being worked on.
for k := 0; k < n; k++ {
go func() {
for {
idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check
if idx >= len(i.partitions) {
return // No more work.
}
// Check if the measurement has been found. If it has don't
// need to check this partition and can just move on.
if atomic.LoadUint32(&found) == 1 {
errC <- nil
continue
}
b, err := i.partitions[idx].MeasurementExists(name)
if b {
atomic.StoreUint32(&found, 1)
}
errC <- err
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return false, err
}
}
// Check if we found the measurement.
return atomic.LoadUint32(&found) == 1, nil
}
// MeasurementHasSeries returns true if a measurement has non-tombstoned series.
func (i *Index) MeasurementHasSeries(name []byte) (bool, error) {
for _, p := range i.partitions {
if v, err := p.MeasurementHasSeries(name); err != nil {
return false, err
} else if v {
return true, nil
}
}
return false, nil
}
// fetchByteValues is a helper for gathering values from each partition in the index,
// based on some criteria.
//
// fn is a function that works on partition idx and calls into some method on
// the partition that returns some ordered values.
func (i *Index) fetchByteValues(fn func(idx int) ([][]byte, error)) ([][]byte, error) {
n := i.availableThreads()
// Store results.
names := make([][][]byte, i.PartitionN)
errC := make(chan error, i.PartitionN)
var pidx uint32 // Index of maximum Partition being worked on.
for k := 0; k < n; k++ {
go func() {
for {
idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on.
if idx >= len(i.partitions) {
return // No more work.
}
pnames, err := fn(idx)
// This is safe since there are no readers on names until all
// the writers are done.
names[idx] = pnames
errC <- err
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return nil, err
}
}
// It's now safe to read from names.
return slices.MergeSortedBytes(names[:]...), nil
}
// MeasurementIterator returns an iterator over all measurements.
func (i *Index) MeasurementIterator() (tsdb.MeasurementIterator, error) {
itrs := make([]tsdb.MeasurementIterator, 0, len(i.partitions))
for _, p := range i.partitions {
itr, err := p.MeasurementIterator()
if err != nil {
tsdb.MeasurementIterators(itrs).Close()
return nil, err
} else if itr != nil {
itrs = append(itrs, itr)
}
}
return tsdb.MergeMeasurementIterators(itrs...), nil
}
// MeasurementSeriesIDIterator returns an iterator over all series in a measurement.
func (i *Index) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) {
itrs := make([]tsdb.SeriesIDIterator, 0, len(i.partitions))
for _, p := range i.partitions {
itr, err := p.MeasurementSeriesIDIterator(name)
if err != nil {
tsdb.SeriesIDIterators(itrs).Close()
return nil, err
} else if itr != nil {
itrs = append(itrs, itr)
}
}
return tsdb.MergeSeriesIDIterators(itrs...), nil
}
// MeasurementNamesByRegex returns measurement names for the provided regex.
func (i *Index) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
return i.fetchByteValues(func(idx int) ([][]byte, error) {
return i.partitions[idx].MeasurementNamesByRegex(re)
})
}
// DropMeasurement deletes a measurement from the index. It returns the first
// error encountered, if any.
func (i *Index) DropMeasurement(name []byte) error {
n := i.availableThreads()
// Store results.
errC := make(chan error, i.PartitionN)
var pidx uint32 // Index of maximum Partition being worked on.
for k := 0; k < n; k++ {
go func() {
for {
idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on.
if idx >= len(i.partitions) {
return // No more work.
}
errC <- i.partitions[idx].DropMeasurement(name)
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return err
}
}
// Update sketches under lock.
i.mu.Lock()
defer i.mu.Unlock()
i.mTSketch.Add(name)
if err := i.updateSeriesSketches(); err != nil {
return err
}
return nil
}
// CreateSeriesListIfNotExists creates a list of series if they doesn't exist in bulk.
func (i *Index) CreateSeriesListIfNotExists(keys [][]byte, names [][]byte, tagsSlice []models.Tags) error {
// All slices must be of equal length.
if len(names) != len(tagsSlice) {
return errors.New("names/tags length mismatch in index")
}
// We need to move different series into collections for each partition
// to process.
pNames := make([][][]byte, i.PartitionN)
pTags := make([][]models.Tags, i.PartitionN)
// Determine partition for series using each series key.
for ki, key := range keys {
pidx := i.partitionIdx(key)
pNames[pidx] = append(pNames[pidx], names[ki])
pTags[pidx] = append(pTags[pidx], tagsSlice[ki])
}
// Process each subset of series on each partition.
n := i.availableThreads()
// Store errors.
errC := make(chan error, i.PartitionN)
var pidx uint32 // Index of maximum Partition being worked on.
for k := 0; k < n; k++ {
go func() {
for {
idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on.
if idx >= len(i.partitions) {
return // No more work.
}
ids, err := i.partitions[idx].createSeriesListIfNotExists(pNames[idx], pTags[idx])
var updateCache bool
for _, id := range ids {
if id != 0 {
updateCache = true
break
}
}
if !updateCache {
errC <- err
continue
}
// Some cached bitset results may need to be updated.
i.tagValueCache.RLock()
for j, id := range ids {
if id == 0 {
continue
}
name := pNames[idx][j]
tags := pTags[idx][j]
if i.tagValueCache.measurementContainsSets(name) {
for _, pair := range tags {
// TODO(edd): It's not clear to me yet whether it will be better to take a lock
// on every series id set, or whether to gather them all up under the cache rlock
// and then take the cache lock and update them all at once (without invoking a lock
// on each series id set).
//
// Taking the cache lock will block all queries, but is one lock. Taking each series set
// lock might be many lock/unlocks but will only block a query that needs that particular set.
//
// Need to think on it, but I think taking a lock on each series id set is the way to go.
//
// One other option here is to take a lock on the series id set when we first encounter it
// and then keep it locked until we're done with all the ids.
//
// Note: this will only add `id` to the set if it exists.
i.tagValueCache.addToSet(name, pair.Key, pair.Value, id) // Takes a lock on the series id set
}
}
}
i.tagValueCache.RUnlock()
errC <- err
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return err
}
}
// Update sketches under lock.
i.mu.Lock()
defer i.mu.Unlock()
for _, key := range keys {
i.sSketch.Add(key)
}
for _, name := range names {
i.mSketch.Add(name)
}
return nil
}
// CreateSeriesIfNotExists creates a series if it doesn't exist or is deleted.
func (i *Index) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {
ids, err := i.partition(key).createSeriesListIfNotExists([][]byte{name}, []models.Tags{tags})
if err != nil {
return err
}
i.mu.Lock()
i.sSketch.Add(key)
i.mSketch.Add(name)
i.mu.Unlock()
if ids[0] == 0 {
return nil // No new series, nothing further to update.
}
// If there are cached sets for any of the tag pairs, they will need to be
// updated with the series id.
i.tagValueCache.RLock()
if i.tagValueCache.measurementContainsSets(name) {
for _, pair := range tags {
// TODO(edd): It's not clear to me yet whether it will be better to take a lock
// on every series id set, or whether to gather them all up under the cache rlock
// and then take the cache lock and update them all at once (without invoking a lock
// on each series id set).
//
// Taking the cache lock will block all queries, but is one lock. Taking each series set
// lock might be many lock/unlocks but will only block a query that needs that particular set.
//
// Need to think on it, but I think taking a lock on each series id set is the way to go.
//
// Note this will only add `id` to the set if it exists.
i.tagValueCache.addToSet(name, pair.Key, pair.Value, ids[0]) // Takes a lock on the series id set
}
}
i.tagValueCache.RUnlock()
return nil
}
// InitializeSeries is a no-op. This only applies to the in-memory index.
func (i *Index) InitializeSeries(keys, names [][]byte, tags []models.Tags) error {
return nil
}
// DropSeries drops the provided series from the index. If cascade is true
// and this is the last series to the measurement, the measurment will also be dropped.
func (i *Index) DropSeries(seriesID uint64, key []byte, cascade bool) error {
// Remove from partition.
if err := i.partition(key).DropSeries(seriesID); err != nil {
return err
}
// Add sketch tombstone.
i.mu.Lock()
i.sTSketch.Add(key)
i.mu.Unlock()
if !cascade {
return nil
}
// Extract measurement name & tags.
name, tags := models.ParseKeyBytes(key)
// If there are cached sets for any of the tag pairs, they will need to be
// updated with the series id.
i.tagValueCache.RLock()
if i.tagValueCache.measurementContainsSets(name) {
for _, pair := range tags {
i.tagValueCache.delete(name, pair.Key, pair.Value, seriesID) // Takes a lock on the series id set
}
}
i.tagValueCache.RUnlock()
// Check if that was the last series for the measurement in the entire index.
if ok, err := i.MeasurementHasSeries(name); err != nil {
return err
} else if ok {
return nil
}
// If no more series exist in the measurement then delete the measurement.
if err := i.DropMeasurement(name); err != nil {
return err
}
return nil
}
// DropSeriesGlobal is a no-op on the tsi1 index.
func (i *Index) DropSeriesGlobal(key []byte) error { return nil }
// DropMeasurementIfSeriesNotExist drops a measurement only if there are no more
// series for the measurment.
func (i *Index) DropMeasurementIfSeriesNotExist(name []byte) error {
// Check if that was the last series for the measurement in the entire index.
if ok, err := i.MeasurementHasSeries(name); err != nil {
return err
} else if ok {
return nil
}
// If no more series exist in the measurement then delete the measurement.
return i.DropMeasurement(name)
}
// MeasurementsSketches returns the two measurement sketches for the index.
func (i *Index) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
i.mu.RLock()
defer i.mu.RUnlock()
return i.mSketch.Clone(), i.mTSketch.Clone(), nil
}
// SeriesSketches returns the two series sketches for the index.
func (i *Index) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
i.mu.RLock()
defer i.mu.RUnlock()
return i.sSketch.Clone(), i.sTSketch.Clone(), nil
}
// Since indexes are not shared across shards, the count returned by SeriesN
// cannot be combined with other shard's results. If you need to count series
// across indexes then use either the database-wide series file, or merge the
// index-level bitsets or sketches.
func (i *Index) SeriesN() int64 {
return int64(i.SeriesIDSet().Cardinality())
}
// HasTagKey returns true if tag key exists. It returns the first error
// encountered if any.
func (i *Index) HasTagKey(name, key []byte) (bool, error) {
n := i.availableThreads()
// Store errors
var found uint32 // Use this to signal we found the tag key.
errC := make(chan error, i.PartitionN)
// Check each partition for the tag key concurrently.
var pidx uint32 // Index of maximum Partition being worked on.
for k := 0; k < n; k++ {
go func() {
for {
idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check
if idx >= len(i.partitions) {
return // No more work.
}
// Check if the tag key has already been found. If it has, we
// don't need to check this partition and can just move on.
if atomic.LoadUint32(&found) == 1 {
errC <- nil
continue
}
b, err := i.partitions[idx].HasTagKey(name, key)
if b {
atomic.StoreUint32(&found, 1)
}
errC <- err
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return false, err
}
}
// Check if we found the tag key.
return atomic.LoadUint32(&found) == 1, nil
}
// HasTagValue returns true if tag value exists.
func (i *Index) HasTagValue(name, key, value []byte) (bool, error) {
n := i.availableThreads()
// Store errors
var found uint32 // Use this to signal we found the tag key.
errC := make(chan error, i.PartitionN)
// Check each partition for the tag key concurrently.
var pidx uint32 // Index of maximum Partition being worked on.
for k := 0; k < n; k++ {
go func() {
for {
idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check
if idx >= len(i.partitions) {
return // No more work.
}
// Check if the tag key has already been found. If it has, we
// don't need to check this partition and can just move on.
if atomic.LoadUint32(&found) == 1 {
errC <- nil
continue
}
b, err := i.partitions[idx].HasTagValue(name, key, value)
if b {
atomic.StoreUint32(&found, 1)
}
errC <- err
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return false, err
}
}
// Check if we found the tag key.
return atomic.LoadUint32(&found) == 1, nil
}
// TagKeyIterator returns an iterator for all keys across a single measurement.
func (i *Index) TagKeyIterator(name []byte) (tsdb.TagKeyIterator, error) {
a := make([]tsdb.TagKeyIterator, 0, len(i.partitions))
for _, p := range i.partitions {
itr := p.TagKeyIterator(name)
if itr != nil {
a = append(a, itr)
}
}
return tsdb.MergeTagKeyIterators(a...), nil
}
// TagValueIterator returns an iterator for all values across a single key.
func (i *Index) TagValueIterator(name, key []byte) (tsdb.TagValueIterator, error) {
a := make([]tsdb.TagValueIterator, 0, len(i.partitions))
for _, p := range i.partitions {
itr := p.TagValueIterator(name, key)
if itr != nil {
a = append(a, itr)
}
}
return tsdb.MergeTagValueIterators(a...), nil
}
// TagKeySeriesIDIterator returns a series iterator for all values across a single key.
func (i *Index) TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) {
a := make([]tsdb.SeriesIDIterator, 0, len(i.partitions))
for _, p := range i.partitions {
itr := p.TagKeySeriesIDIterator(name, key)
if itr != nil {
a = append(a, itr)
}
}
return tsdb.MergeSeriesIDIterators(a...), nil
}
// TagValueSeriesIDIterator returns a series iterator for a single tag value.
func (i *Index) TagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) {
// Check series ID set cache...
if EnableBitsetCache {
if ss := i.tagValueCache.Get(name, key, value); ss != nil {
// Return a clone because the set is mutable.
return tsdb.NewSeriesIDSetIterator(ss.Clone()), nil
}
}
a := make([]tsdb.SeriesIDIterator, 0, len(i.partitions))
for _, p := range i.partitions {
itr, err := p.TagValueSeriesIDIterator(name, key, value)
if err != nil {
return nil, err
} else if itr != nil {
a = append(a, itr)
}
}
itr := tsdb.MergeSeriesIDIterators(a...)
if !EnableBitsetCache {
return itr, nil
}
// Check if the iterator contains only series id sets. Cache them...
if ssitr, ok := itr.(tsdb.SeriesIDSetIterator); ok {
ss := ssitr.SeriesIDSet()
ss.SetCOW(true) // This is important to speed the clone up.
i.tagValueCache.Put(name, key, value, ss)
}
return itr, nil
}
// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression.
func (i *Index) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {
n := i.availableThreads()
// Store results.
keys := make([]map[string]struct{}, i.PartitionN)
errC := make(chan error, i.PartitionN)
var pidx uint32 // Index of maximum Partition being worked on.
for k := 0; k < n; k++ {
go func() {
for {
idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on.
if idx >= len(i.partitions) {
return // No more work.
}
// This is safe since there are no readers on keys until all
// the writers are done.
tagKeys, err := i.partitions[idx].MeasurementTagKeysByExpr(name, expr)
keys[idx] = tagKeys
errC <- err
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return nil, err
}
}
// Merge into single map.
result := keys[0]
for k := 1; k < len(i.partitions); k++ {
for k := range keys[k] {
result[k] = struct{}{}
}
}
return result, nil
}
// DiskSizeBytes returns the size of the index on disk.
func (i *Index) DiskSizeBytes() int64 {
fs, err := i.RetainFileSet()
if err != nil {
i.logger.Warn("Index is closing down")
return 0
}
defer fs.Release()
var manifestSize int64
// Get MANIFEST sizes from each partition.
for _, p := range i.partitions {
manifestSize += p.manifestSize
}
return fs.Size() + manifestSize
}
// TagKeyCardinality always returns zero.
// It is not possible to determine cardinality of tags across index files, and
// thus it cannot be done across partitions.
func (i *Index) TagKeyCardinality(name, key []byte) int {
return 0
}
// RetainFileSet returns the set of all files across all partitions.
// This is only needed when all files need to be retained for an operation.
func (i *Index) RetainFileSet() (*FileSet, error) {
i.mu.RLock()
defer i.mu.RUnlock()
fs, _ := NewFileSet(nil, i.sfile, nil)
for _, p := range i.partitions {
pfs, err := p.RetainFileSet()
if err != nil {
fs.Close()
return nil, err
}
fs.files = append(fs.files, pfs.files...)
}
return fs, nil
}
// SetFieldName is a no-op on this index.
func (i *Index) SetFieldName(measurement []byte, name string) {}
// Rebuild rebuilds an index. It's a no-op for this index.
func (i *Index) Rebuild() {}
// IsIndexDir returns true if directory contains at least one partition directory.
func IsIndexDir(path string) (bool, error) {
fis, err := ioutil.ReadDir(path)
if err != nil {
return false, err
}
for _, fi := range fis {
if !fi.IsDir() {
continue
} else if ok, err := IsPartitionDir(filepath.Join(path, fi.Name())); err != nil {
return false, err
} else if ok {
return true, nil
}
}
return false, nil
}
|
[
"\"INFLUXDB_EXP_TSI_PARTITIONS\"",
"\"INFLUXDB_EXP_TSI_PARTITIONS\"",
"\"INFLUXDB_EXP_TSI_CACHING\"",
"\"INFLUXDB_EXP_TSI_CACHING\""
] |
[] |
[
"INFLUXDB_EXP_TSI_PARTITIONS",
"INFLUXDB_EXP_TSI_CACHING"
] |
[]
|
["INFLUXDB_EXP_TSI_PARTITIONS", "INFLUXDB_EXP_TSI_CACHING"]
|
go
| 2 | 0 | |
lib/multipart/multipart.go
|
package multipart
import (
"os"
//"fmt"
"errors"
"net/http"
"io/ioutil"
"mime/multipart"
//"gopkg.in/mgo.v2/bson"
)
// UploadFile uploads a file to the server
func UploadFile(w http.ResponseWriter, r *http.Request) (interface{}, error) {
file, handle, err := r.FormFile("file")
if err != nil {
return nil, err
}
defer file.Close()
//var path = nil
mimeType := handle.Header.Get("Content-Type")
switch mimeType {
case "image/jpeg":
return saveFile(w, file, handle)
case "image/jpg":
return saveFile(w, file, handle)
case "image/png":
return saveFile(w, file, handle)
default:
return nil, errors.New("The format file is not valid.")
}
return nil, nil
}
func saveFile(w http.ResponseWriter, file multipart.File, handle *multipart.FileHeader) (interface{}, error) {
data, err := ioutil.ReadAll(file);
if err != nil {
return nil, err
}
// ./upload does not exist
if _, err := os.Stat("./upload"); os.IsNotExist(err) {
_ = os.Mkdir("upload", os.ModePerm)
}
if err := ioutil.WriteFile(os.Getenv("UPLOAD_PATH") + handle.Filename, data, 0666); err != nil {
return nil, err
}
return map[string]string{"filename": handle.Filename}, nil
//return bson.M{"filename": handle.Filename}, nil
}
|
[
"\"UPLOAD_PATH\""
] |
[] |
[
"UPLOAD_PATH"
] |
[]
|
["UPLOAD_PATH"]
|
go
| 1 | 0 | |
pkg/subscriber/helmrepo/helmrepo_subscriber_suite_test.go
|
// Copyright 2019 The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helmrepo
import (
"os"
"testing"
"time"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
mgr "sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/open-cluster-management/multicloud-operators-subscription/pkg/apis"
)
const (
k8swait = time.Second * 3
StartTimeout = 30 // seconds
)
var testEnv *envtest.Environment
var k8sManager mgr.Manager
var k8sClient client.Client
func TestSubscriptionNamespaceReconcile(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Helm Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
By("bootstrapping test environment")
t := true
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
testEnv = &envtest.Environment{
UseExistingCluster: &t,
}
} else {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "deploy", "crds"),
filepath.Join("..", "..", "..", "hack", "test")},
}
}
cfg, err := testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = apis.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
k8sManager, err = mgr.New(cfg, mgr.Options{MetricsBindAddress: "0"})
Expect(err).ToNot(HaveOccurred())
Expect(Add(k8sManager, k8sManager.GetConfig(), &types.NamespacedName{}, 2)).NotTo(HaveOccurred())
go func() {
err = k8sManager.Start(ctrl.SetupSignalHandler())
Expect(err).ToNot(HaveOccurred())
}()
k8sClient = k8sManager.GetClient()
Expect(k8sClient).ToNot(BeNil())
close(done)
}, StartTimeout)
var _ = AfterSuite(func() {
By("tearing down the test environment")
gexec.KillAndWait(5 * time.Second)
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
|
[
"\"TEST_USE_EXISTING_CLUSTER\""
] |
[] |
[
"TEST_USE_EXISTING_CLUSTER"
] |
[]
|
["TEST_USE_EXISTING_CLUSTER"]
|
go
| 1 | 0 | |
share/qt/extract_strings_qt.py
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/paparastrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *papara_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("papara-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
[] |
[] |
[
"XGETTEXT"
] |
[]
|
["XGETTEXT"]
|
python
| 1 | 0 | |
test/nlp_worker/unit/test_formats.py
|
import json
import os.path
import unittest
from io import BytesIO
from lxml import etree
from workers.nlp.formats.book_viewer_json import BookViewerJsonBuilder, Kind, parse_reference_from_url
from workers.nlp.formats.xmi import Annotation, DaiNlpXmiReader, DaiNlpXmiBuilder, DaiNlpFormatError
resources_dir = os.environ["RESOURCES_DIR"]
path_typesystem_dai = os.path.join(resources_dir, "nlp_typesystem_dai.xml")
class XPathAsserting:
@staticmethod
def _eval_xpath(xml: str, xpath: str, **kwargs):
return etree.parse(BytesIO(xml.encode("utf-8"))).xpath(xpath, **kwargs)
def assert_xpath_present(self, xml: str, xpath, **kwargs):
result = self._eval_xpath(xml, xpath, **kwargs)
error = ""
# list, bool, str, float are the possible return types
# as per lxml's documentation
if isinstance(result, list) and len(result) == 0:
error = "Empty list."
elif isinstance(result, bool) and not result:
error = "False."
elif isinstance(result, str) and len(result) == 0:
error = "Empty string."
elif isinstance(result, float):
pass
else:
"Unknown return type."
if error:
raise AssertionError(f"XPath {xpath} returned: {error}")
pass
def assert_xpath_exactly_n_times(self, xml: str, xpath: str, n: int, **kwargs):
result = self._eval_xpath(xml, xpath, **kwargs)
if not isinstance(result, list):
raise AssertionError(f"XPath does not return a list: '{xpath}' returns: {result}")
if not len(result) == n:
raise AssertionError(f"Xpath returns {len(result)} times, expected: {n}, is: '{xpath}'")
def assert_xpath_exactly_once(self, xml: str, xpath: str, **kwargs):
self.assert_xpath_exactly_n_times(xml=xml, xpath=xpath, n=1, **kwargs)
class DaiNlpXmiReaderTest(unittest.TestCase):
empty_input = """<?xml version='1.0' encoding='ASCII'?>
<xmi:XMI xmlns:xmi="http://www.omg.org/XMI" xmlns:cas="http:///uima/cas.ecore" xmlns:LayoutElement="http:///org/dainst/nlp/LayoutElement.ecore" xmi:version="2.0">
<cas:NULL xmi:id="0"/>
<cas:Sofa xmi:id="1" sofaNum="1" sofaID="_InitialView" mimeType="None" sofaString="Perikles war ein Grieche. Genauso wie Aristoteles aus Athen."/>
<cas:View sofa="1" members=""/>
</xmi:XMI>
"""
def test_get_sofa(self):
reader = DaiNlpXmiReader(xmi=self.empty_input)
expected_sofa = 'Perikles war ein Grieche. Genauso wie Aristoteles aus Athen.'
self.assertEqual(reader.get_sofa(), expected_sofa)
class DaiNlpXmiBuilderTest(unittest.TestCase, XPathAsserting):
text = 'Perikles war ein Grieche. Genauso wie Aristoteles aus Stageira.'
annotator = 'unittest-annotator'
ns = {'nlp': 'http:///org/dainst/nlp.ecore'}
entity_args = dict(kind=Annotation.named_entity, start=0, end=7)
def setUp(self) -> None:
self.builder = DaiNlpXmiBuilder(default_annotator_id=self.annotator)
self.builder.set_sofa(self.text)
def test_cannot_change_sofa_once_set(self):
with self.assertRaises(DaiNlpFormatError):
self.builder.set_sofa('xyz')
def test_can_add_simple_annotation(self):
self.builder.add_annotation(**self.entity_args)
self.assert_xpath_exactly_once(self.builder.xmi(), '//nlp:NamedEntity[@begin=0 and @end=7]',
namespaces=self.ns)
def test_setting_the_default_annotator_id_works(self):
# using the default annotator defined during setup
self.builder.add_annotation(**self.entity_args)
self.assert_xpath_exactly_once(self.builder.xmi(), f"//nlp:NamedEntity[@annotatorId='{self.annotator}']",
namespaces=self.ns)
# changing the id should change it for the next annotation in the xmi
self.builder.default_annotator_id = 'xyz'
self.builder.add_annotation(**self.entity_args)
self.assert_xpath_exactly_once(self.builder.xmi(), "//nlp:NamedEntity[@annotatorId='xyz']",
namespaces=self.ns)
def test_can_add_annotation_with_references(self):
args = dict(
**self.entity_args,
references=[
'https://example.com/ref1',
'https://example.com/ref2'
]
)
self.builder.add_annotation(**args)
for ref in args["references"]:
with self.subTest(ref=ref):
self.assert_xpath_exactly_once(self.builder.xmi(), f"//references[text()='{ref}']",
namespaces=self.ns)
def test_setting_an_annotator_id_works(self):
args = dict(**self.entity_args, annotatorId='custom')
self.builder.add_annotation(**args)
self.assert_xpath_exactly_once(self.builder.xmi(), "//nlp:NamedEntity[@annotatorId='custom']",
namespaces=self.ns)
def test_fails_when_annotator_id_set_empty_on_annotation(self):
args = dict(**self.entity_args, annotatorId='')
with self.assertRaises(DaiNlpFormatError):
self.builder.add_annotation(**args)
def test_cannot_add_attributes_not_defined_in_typesystem(self):
args = dict(**self.entity_args, fail_attr="some_value")
with self.assertRaises(DaiNlpFormatError):
self.builder.add_annotation(**args)
def test_cannot_add_empty_or_bogus_annotation_type(self):
for name in ['', 'bogusEntity']:
with self.subTest(name=name):
args = {**self.entity_args, 'type_name': name}
with self.assertRaises(DaiNlpFormatError):
self.builder.add_annotation(**args)
class BookViewerJsonTest(unittest.TestCase):
def setUp(self) -> None:
self.builder = BookViewerJsonBuilder()
self.builder.add_occurence(Kind.location, "Rom", page=2, term="Roma")
self.builder.add_occurence(Kind.location, "Rom", page=3, term="Roms")
self.builder.add_occurence(Kind.location, "Rom", page=2, term="Rom")
self.builder.add_occurence(Kind.location, "Athen", page=4, term="Athen")
def _result(self):
return json.loads(self.builder.to_json())
def _location_items(self):
# return the location items as tuple: (rome, athens)
locations = self._result()['locations']['items']
if locations[0]['lemma'] != 'Rom':
locations = reversed(locations)
return tuple(locations)
def _rome(self):
return self._location_items()[0]
def test_adding_occurences(self):
result = self._result()
self.assertIsInstance(result, dict)
locations = result['locations']['items']
self.assertEqual(2, len(locations))
rome, athens = self._location_items()
self.assertListEqual(rome['pages'], [2, 3])
self.assertListEqual(athens['pages'], [4])
self.assertEqual(rome['count'], 3)
self.assertEqual(athens['count'], 1)
# There should be empty fields for the other keys
for key in ['persons', 'keyterms', 'time_expressions']:
self.assertIsInstance(result[key], dict)
self.assertListEqual(result[key]['items'], [])
def test_adding_references(self):
self.assertListEqual(self._rome()['references'], [])
inputs = [
dict(id='2323295', url='https://gazetteer.dainst.org/place/2323295', type='gazetteer'),
dict(id='fU6rkJhWHGsd', url='http://chronontology.dainst.org/period/fU6rkJhWHGsd', type='chronontology')
]
self.builder.add_reference(Kind.location, 'Rom', **inputs[0])
self.builder.add_reference(Kind.location, 'Rom', **inputs[1])
rome = self._rome()
self.assertEqual(len(rome['references']), 2)
self.assertIn(inputs[0], rome['references'])
self.assertIn(inputs[1], rome['references'])
def test_setting_the_score(self):
self.assertIsNone(self._rome()['score'])
self.builder.set_score(Kind.location, 'Rom', 12.345)
self.assertEqual(self._rome()['score'], 12.345, 'Should set score field')
self.builder.set_score(Kind.location, 'Rom', 23.456)
self.assertEqual(self._rome()['score'], 23.456, 'Should override score field')
def test_adding_coordinates(self):
self.assertIsNone(self._rome()['coordinates'], 'Should be None initially')
self.builder.set_coordinates(Kind.location, 'Rom', (1.23456, 12.3456))
self.assertListEqual(self._rome()['coordinates'], [1.23456, 12.3456], 'Should set coords as list')
self.builder.set_coordinates(Kind.location, 'Rom', (2.34567, 23.4567))
self.assertListEqual(self._rome()['coordinates'], [2.34567, 23.4567], 'Should override coords')
class UrlToReferenceParseTest(unittest.TestCase):
def test_parsing_known_url(self):
result = parse_reference_from_url('https://gazetteer.dainst.org/place/2128554')
expecting = ('2128554', 'https://gazetteer.dainst.org/place/2128554', 'gazetteer')
self.assertEqual(result, expecting)
result = parse_reference_from_url('http://chronontology.dainst.org/period/NAAfB2FfP3Rj')
expecting = ('NAAfB2FfP3Rj', 'http://chronontology.dainst.org/period/NAAfB2FfP3Rj', 'chronontology')
self.assertEqual(result, expecting)
def test_known_urls_non_standard(self):
# trailing slash
expecting = ('2128554', 'https://gazetteer.dainst.org/place/2128554', 'gazetteer')
self.assertEqual(expecting, parse_reference_from_url('https://gazetteer.dainst.org/place/2128554/'))
# with params
expecting = ('2128554', 'https://gazetteer.dainst.org/place/2128554?foo=bar', 'gazetteer')
self.assertEqual(expecting, parse_reference_from_url('https://gazetteer.dainst.org/place/2128554?foo=bar'))
def test_parsing_unknonw_url(self):
result = parse_reference_from_url('https://xyz.example.com/some/path?param=123')
expecting = ('', 'https://xyz.example.com/some/path?param=123', 'xyz.example.com')
self.assertEqual(result, expecting)
def test_parsing_invalid_url(self):
result = parse_reference_from_url('not-a-scheme://///bla-bla')
self.assertEqual(result, ('', '', ''))
result = parse_reference_from_url(None)
self.assertEqual(result, ('', '', ''))
|
[] |
[] |
[
"RESOURCES_DIR"
] |
[]
|
["RESOURCES_DIR"]
|
python
| 1 | 0 | |
blog/__init__.py
|
import os
from flask import Flask
app = Flask(__name__)
config_path = os.environ.get("CONFIG_PATH", "blog.config.DevelopmentConfig")
app.config.from_object(config_path)
from . import views
from . import filters
from . import login
|
[] |
[] |
[
"CONFIG_PATH"
] |
[]
|
["CONFIG_PATH"]
|
python
| 1 | 0 | |
config/config.go
|
package config
import (
"os"
"github.com/requaos/qorfun/config/bindatafs"
"github.com/qor/assetfs"
"github.com/jinzhu/configor"
"github.com/unrolled/render"
amazonpay "github.com/qor/amazon-pay-sdk-go"
"github.com/qor/auth/providers/facebook"
"github.com/qor/auth/providers/github"
"github.com/qor/auth/providers/google"
"github.com/qor/auth/providers/twitter"
"github.com/qor/gomerchant"
"github.com/qor/location"
"github.com/qor/mailer"
"github.com/qor/mailer/logger"
"github.com/qor/media/oss"
"github.com/qor/oss/s3"
"github.com/qor/redirect_back"
"github.com/qor/session/manager"
)
type SMTPConfig struct {
Host string
Port string
User string
Password string
}
var Config = struct {
HTTPS bool `default:"false" env:"HTTPS"`
Port uint `default:"7000" env:"PORT"`
DB struct {
Name string `env:"DBName" default:"qor_example"`
Adapter string `env:"DBAdapter" default:"mysql"`
Host string `env:"DBHost" default:"localhost"`
Port string `env:"DBPort" default:"3306"`
User string `env:"DBUser"`
Password string `env:"DBPassword"`
}
S3 struct {
AccessKeyID string `env:"AWS_ACCESS_KEY_ID"`
SecretAccessKey string `env:"AWS_SECRET_ACCESS_KEY"`
Region string `env:"AWS_Region"`
S3Bucket string `env:"AWS_Bucket"`
}
AmazonPay struct {
MerchantID string `env:"AmazonPayMerchantID"`
AccessKey string `env:"AmazonPayAccessKey"`
SecretKey string `env:"AmazonPaySecretKey"`
ClientID string `env:"AmazonPayClientID"`
ClientSecret string `env:"AmazonPayClientSecret"`
Sandbox bool `env:"AmazonPaySandbox"`
CurrencyCode string `env:"AmazonPayCurrencyCode" default:"USD"`
}
SMTP SMTPConfig
Github github.Config
Google google.Config
Facebook facebook.Config
Twitter twitter.Config
GoogleAPIKey string `env:"GoogleAPIKey"`
BaiduAPIKey string `env:"BaiduAPIKey"`
}{}
var (
Root = os.Getenv("GOPATH") + "/src/github.com/requaos/qorfun"
Mailer *mailer.Mailer
Render = render.New()
AmazonPay *amazonpay.AmazonPay
PaymentGateway gomerchant.PaymentGateway
RedirectBack = redirect_back.New(&redirect_back.Config{
SessionManager: manager.SessionManager,
IgnoredPrefixes: []string{"/auth"},
})
)
func init() {
if err := configor.Load(&Config, "config/database.yml", "config/smtp.yml", "config/application.yml"); err != nil {
panic(err)
}
assetfs.SetAssetFS(bindatafs.AssetFS)
location.GoogleAPIKey = Config.GoogleAPIKey
location.BaiduAPIKey = Config.BaiduAPIKey
if Config.S3.AccessKeyID != "" {
oss.Storage = s3.New(&s3.Config{
AccessID: Config.S3.AccessKeyID,
AccessKey: Config.S3.SecretAccessKey,
Region: Config.S3.Region,
Bucket: Config.S3.S3Bucket,
})
}
AmazonPay = amazonpay.New(&amazonpay.Config{
MerchantID: Config.AmazonPay.MerchantID,
AccessKey: Config.AmazonPay.AccessKey,
SecretKey: Config.AmazonPay.SecretKey,
Sandbox: true,
Region: "jp",
})
// dialer := gomail.NewDialer(Config.SMTP.Host, Config.SMTP.Port, Config.SMTP.User, Config.SMTP.Password)
// sender, err := dialer.Dial()
// Mailer = mailer.New(&mailer.Config{
// Sender: gomailer.New(&gomailer.Config{Sender: sender}),
// })
Mailer = mailer.New(&mailer.Config{
AssetFS: assetfs.AssetFS(),
Sender: logger.New(&logger.Config{}),
})
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
src/cmd/dist/buildtool.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Build toolchain using Go 1.4.
//
// The general strategy is to copy the source files we need into
// a new GOPATH workspace, adjust import paths appropriately,
// invoke the Go 1.4 go command to build those sources,
// and then copy the binaries back.
package main
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
// bootstrapDirs is a list of directories holding code that must be
// compiled with a Go 1.4 toolchain to produce the bootstrapTargets.
// All directories in this list are relative to and must be below $GOROOT/src.
//
// The list has have two kinds of entries: names beginning with cmd/ with
// no other slashes, which are commands, and other paths, which are packages
// supporting the commands. Packages in the standard library can be listed
// if a newer copy needs to be substituted for the Go 1.4 copy when used
// by the command packages.
// These will be imported during bootstrap as bootstrap/name, like bootstrap/math/big.
var bootstrapDirs = []string{
"cmd/asm",
"cmd/asm/internal/arch",
"cmd/asm/internal/asm",
"cmd/asm/internal/flags",
"cmd/asm/internal/lex",
"cmd/cgo",
"cmd/compile",
"cmd/compile/internal/amd64",
"cmd/compile/internal/arm",
"cmd/compile/internal/arm64",
"cmd/compile/internal/gc",
"cmd/compile/internal/logopt",
"cmd/compile/internal/mips",
"cmd/compile/internal/mips64",
"cmd/compile/internal/ppc64",
"cmd/compile/internal/riscv64",
"cmd/compile/internal/s390x",
"cmd/compile/internal/ssa",
"cmd/compile/internal/syntax",
"cmd/compile/internal/types",
"cmd/compile/internal/x86",
"cmd/compile/internal/wasm",
"cmd/internal/bio",
"cmd/internal/gcprog",
"cmd/internal/dwarf",
"cmd/internal/edit",
"cmd/internal/goobj",
"cmd/internal/objabi",
"cmd/internal/obj",
"cmd/internal/obj/arm",
"cmd/internal/obj/arm64",
"cmd/internal/obj/mips",
"cmd/internal/obj/ppc64",
"cmd/internal/obj/riscv",
"cmd/internal/obj/s390x",
"cmd/internal/obj/x86",
"cmd/internal/obj/wasm",
"cmd/internal/src",
"cmd/internal/sys",
"cmd/link",
"cmd/link/internal/amd64",
"cmd/link/internal/arm",
"cmd/link/internal/arm64",
"cmd/link/internal/benchmark",
"cmd/link/internal/ld",
"cmd/link/internal/loadelf",
"cmd/link/internal/loader",
"cmd/link/internal/loadmacho",
"cmd/link/internal/loadpe",
"cmd/link/internal/loadxcoff",
"cmd/link/internal/mips",
"cmd/link/internal/mips64",
"cmd/link/internal/ppc64",
"cmd/link/internal/riscv64",
"cmd/link/internal/s390x",
"cmd/link/internal/sym",
"cmd/link/internal/x86",
"compress/flate",
"compress/zlib",
"cmd/link/internal/wasm",
"container/heap",
"debug/dwarf",
"debug/elf",
"debug/macho",
"debug/pe",
"internal/goversion",
"internal/race",
"internal/unsafeheader",
"internal/xcoff",
"math/big",
"math/bits",
"sort",
}
// File prefixes that are ignored by go/build anyway, and cause
// problems with editor generated temporary files (#18931).
var ignorePrefixes = []string{
".",
"_",
}
// File suffixes that use build tags introduced since Go 1.4.
// These must not be copied into the bootstrap build directory.
// Also ignore test files.
var ignoreSuffixes = []string{
"_arm64.s",
"_arm64.go",
"_riscv64.s",
"_riscv64.go",
"_wasm.s",
"_wasm.go",
"_test.s",
}
func bootstrapBuildTools() {
goroot_bootstrap := os.Getenv("GOROOT_BOOTSTRAP")
if goroot_bootstrap == "" {
goroot_bootstrap = pathf("%s/go1.4", os.Getenv("HOME"))
}
xprintf("Building Go toolchain1 using %s.\n", goroot_bootstrap)
mkzbootstrap(pathf("%s/src/cmd/internal/objabi/zbootstrap.go", goroot))
// Use $GOROOT/pkg/bootstrap as the bootstrap workspace root.
// We use a subdirectory of $GOROOT/pkg because that's the
// space within $GOROOT where we store all generated objects.
// We could use a temporary directory outside $GOROOT instead,
// but it is easier to debug on failure if the files are in a known location.
workspace := pathf("%s/pkg/bootstrap", goroot)
xremoveall(workspace)
xatexit(func() { xremoveall(workspace) })
base := pathf("%s/src/bootstrap", workspace)
xmkdirall(base)
// Copy source code into $GOROOT/pkg/bootstrap and rewrite import paths.
writefile("module bootstrap\n", pathf("%s/%s", base, "go.mod"), 0)
for _, dir := range bootstrapDirs {
src := pathf("%s/src/%s", goroot, dir)
dst := pathf("%s/%s", base, dir)
xmkdirall(dst)
if dir == "cmd/cgo" {
// Write to src because we need the file both for bootstrap
// and for later in the main build.
mkzdefaultcc("", pathf("%s/zdefaultcc.go", src))
}
Dir:
for _, name := range xreaddirfiles(src) {
for _, pre := range ignorePrefixes {
if strings.HasPrefix(name, pre) {
continue Dir
}
}
for _, suf := range ignoreSuffixes {
if strings.HasSuffix(name, suf) {
continue Dir
}
}
srcFile := pathf("%s/%s", src, name)
dstFile := pathf("%s/%s", dst, name)
text := bootstrapRewriteFile(srcFile)
writefile(text, dstFile, 0)
}
}
// Set up environment for invoking Go 1.4 go command.
// GOROOT points at Go 1.4 GOROOT,
// GOPATH points at our bootstrap workspace,
// GOBIN is empty, so that binaries are installed to GOPATH/bin,
// and GOOS, GOHOSTOS, GOARCH, and GOHOSTOS are empty,
// so that Go 1.4 builds whatever kind of binary it knows how to build.
// Restore GOROOT, GOPATH, and GOBIN when done.
// Don't bother with GOOS, GOHOSTOS, GOARCH, and GOHOSTARCH,
// because setup will take care of those when bootstrapBuildTools returns.
defer os.Setenv("GOROOT", os.Getenv("GOROOT"))
os.Setenv("GOROOT", goroot_bootstrap)
defer os.Setenv("GOPATH", os.Getenv("GOPATH"))
os.Setenv("GOPATH", workspace)
defer os.Setenv("GOBIN", os.Getenv("GOBIN"))
os.Setenv("GOBIN", "")
os.Setenv("GOOS", "")
os.Setenv("GOHOSTOS", "")
os.Setenv("GOARCH", "")
os.Setenv("GOHOSTARCH", "")
// Run Go 1.4 to build binaries. Use -gcflags=-l to disable inlining to
// workaround bugs in Go 1.4's compiler. See discussion thread:
// https://groups.google.com/d/msg/golang-dev/Ss7mCKsvk8w/Gsq7VYI0AwAJ
// Use the math_big_pure_go build tag to disable the assembly in math/big
// which may contain unsupported instructions.
// Note that if we are using Go 1.10 or later as bootstrap, the -gcflags=-l
// only applies to the final cmd/go binary, but that's OK: if this is Go 1.10
// or later we don't need to disable inlining to work around bugs in the Go 1.4 compiler.
cmd := []string{
pathf("%s/bin/go", goroot_bootstrap),
"install",
"-gcflags=-l",
"-tags=math_big_pure_go compiler_bootstrap",
}
if vflag > 0 {
cmd = append(cmd, "-v")
}
if tool := os.Getenv("GOBOOTSTRAP_TOOLEXEC"); tool != "" {
cmd = append(cmd, "-toolexec="+tool)
}
cmd = append(cmd, "bootstrap/cmd/...")
run(base, ShowOutput|CheckExit, cmd...)
// Copy binaries into tool binary directory.
for _, name := range bootstrapDirs {
if !strings.HasPrefix(name, "cmd/") {
continue
}
name = name[len("cmd/"):]
if !strings.Contains(name, "/") {
copyfile(pathf("%s/%s%s", tooldir, name, exe), pathf("%s/bin/%s%s", workspace, name, exe), writeExec)
}
}
if vflag > 0 {
xprintf("\n")
}
}
var ssaRewriteFileSubstring = filepath.FromSlash("src/cmd/compile/internal/ssa/rewrite")
// isUnneededSSARewriteFile reports whether srcFile is a
// src/cmd/compile/internal/ssa/rewriteARCHNAME.go file for an
// architecture that isn't for the current runtime.GOARCH.
//
// When unneeded is true archCaps is the rewrite base filename without
// the "rewrite" prefix or ".go" suffix: AMD64, 386, ARM, ARM64, etc.
func isUnneededSSARewriteFile(srcFile string) (archCaps string, unneeded bool) {
if !strings.Contains(srcFile, ssaRewriteFileSubstring) {
return "", false
}
fileArch := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(srcFile), "rewrite"), ".go")
if fileArch == "" {
return "", false
}
b := fileArch[0]
if b == '_' || ('a' <= b && b <= 'z') {
return "", false
}
archCaps = fileArch
fileArch = strings.ToLower(fileArch)
fileArch = strings.TrimSuffix(fileArch, "splitload")
if fileArch == os.Getenv("GOHOSTARCH") {
return "", false
}
if fileArch == strings.TrimSuffix(runtime.GOARCH, "le") {
return "", false
}
if fileArch == strings.TrimSuffix(os.Getenv("GOARCH"), "le") {
return "", false
}
return archCaps, true
}
func bootstrapRewriteFile(srcFile string) string {
// During bootstrap, generate dummy rewrite files for
// irrelevant architectures. We only need to build a bootstrap
// binary that works for the current runtime.GOARCH.
// This saves 6+ seconds of bootstrap.
if archCaps, ok := isUnneededSSARewriteFile(srcFile); ok {
return fmt.Sprintf(`// Code generated by go tool dist; DO NOT EDIT.
package ssa
func rewriteValue%s(v *Value) bool { panic("unused during bootstrap") }
func rewriteBlock%s(b *Block) bool { panic("unused during bootstrap") }
`, archCaps, archCaps)
}
return bootstrapFixImports(srcFile)
}
func bootstrapFixImports(srcFile string) string {
lines := strings.SplitAfter(readfile(srcFile), "\n")
inBlock := false
for i, line := range lines {
if strings.HasPrefix(line, "import (") {
inBlock = true
continue
}
if inBlock && strings.HasPrefix(line, ")") {
inBlock = false
continue
}
if strings.HasPrefix(line, `import "`) || strings.HasPrefix(line, `import . "`) ||
inBlock && (strings.HasPrefix(line, "\t\"") || strings.HasPrefix(line, "\t. \"")) {
line = strings.Replace(line, `"cmd/`, `"bootstrap/cmd/`, -1)
for _, dir := range bootstrapDirs {
if strings.HasPrefix(dir, "cmd/") {
continue
}
line = strings.Replace(line, `"`+dir+`"`, `"bootstrap/`+dir+`"`, -1)
}
lines[i] = line
}
}
lines[0] = "// Code generated by go tool dist; DO NOT EDIT.\n// This is a bootstrap copy of " + srcFile + "\n\n//line " + srcFile + ":1\n" + lines[0]
return strings.Join(lines, "")
}
|
[
"\"GOROOT_BOOTSTRAP\"",
"\"HOME\"",
"\"GOROOT\"",
"\"GOPATH\"",
"\"GOBIN\"",
"\"GOBOOTSTRAP_TOOLEXEC\"",
"\"GOHOSTARCH\"",
"\"GOARCH\""
] |
[] |
[
"GOHOSTARCH",
"GOBIN",
"GOROOT",
"GOPATH",
"GOROOT_BOOTSTRAP",
"GOBOOTSTRAP_TOOLEXEC",
"GOARCH",
"HOME"
] |
[]
|
["GOHOSTARCH", "GOBIN", "GOROOT", "GOPATH", "GOROOT_BOOTSTRAP", "GOBOOTSTRAP_TOOLEXEC", "GOARCH", "HOME"]
|
go
| 8 | 0 | |
pkg/certinstall/certinstall_linux.go
|
// +build linux, !darwin
package certinstall
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
"github.com/craftcms/nitro/pkg/sudo"
)
var (
certificatePaths = map[string]string{
"arch": "/etc/ca-certificates/trust-source/anchors",
"debian": "/usr/local/share/ca-certificates/",
"fedora": "/etc/pki/ca-trust/source/anchors",
}
certificateTools = map[string]string{
"arch": "update-ca-trust",
"debian": "update-ca-certificates",
"fedora": "update-ca-trust",
}
)
// Install is responsible for taking a path to a root certificate and the runtime.GOOS as the system
// and finding the distribution and tools to install a root certificate.
func Install(file, system string) error {
// find the release tool
lsb, _ := exec.LookPath("lsb_release")
var dist string
switch lsb == "" {
// lsb_release is not installed, so assume fedora or RHEL
case true:
dist = "fedora"
default:
// setup the command
cmd := exec.Command(lsb, "--description")
// capture the output into a temp file
buf := bytes.NewBufferString("")
cmd.Stdout = buf
if err := cmd.Start(); err != nil {
return err
}
if err := cmd.Wait(); err != nil {
return err
}
// find the linux distro
found, err := identify(buf.String())
if err != nil {
return err
}
dist = found
}
// get the certpath
certPath, ok := certificatePaths[dist]
if !ok {
return fmt.Errorf("unable to find the certificate path for %s", dist)
}
// get the cert tool
certTool, ok := certificateTools[dist]
if !ok {
return fmt.Errorf("unable to find the certificate tool for %s", dist)
}
if err := sudo.Run("mv", "mv", file, fmt.Sprintf("%s%s.crt", certPath, "nitro")); err != nil {
return fmt.Errorf("unable to move the certificate, %w", err)
}
// update the ca certs
if err := sudo.Run(certTool, certTool); err != nil {
return err
}
// is this a wsl machine?
if dist, exists := os.LookupEnv("WSL_DISTRO_NAME"); exists {
user := os.Getenv("USER")
fmt.Println("Users on WSL will need to open an elevated (run as administrator) Command Prompt or terminal on Windows and run the following command:")
fmt.Println(fmt.Printf(`certutil -addstore -f "Root" \\wsl$\%s\home\%s\.nitro\nitro.crt`, dist, user))
}
return nil
}
func identify(description string) (string, error) {
// detect arch systems
if strings.Contains(description, "Manjaro") || strings.Contains(description, "Arch Linux") {
return "arch", nil
}
// detect debian systems
if strings.Contains(description, "Ubuntu") || strings.Contains(description, "Pop!_OS") || strings.Contains(description, "Mint") || strings.Contains(description, "elementary") || strings.Contains(description, "Debian") {
return "debian", nil
}
return "", fmt.Errorf("unable to find the distribution from the description: %s", description)
}
|
[
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
pkg/kubectl/cmd/util/factory_builder.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// this file contains factories with no other dependencies
package util
import (
"os"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/dynamic"
scaleclient "k8s.io/client-go/scale"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/plugins"
)
type ring2Factory struct {
clientAccessFactory ClientAccessFactory
objectMappingFactory ObjectMappingFactory
}
func NewBuilderFactory(clientAccessFactory ClientAccessFactory, objectMappingFactory ObjectMappingFactory) BuilderFactory {
f := &ring2Factory{
clientAccessFactory: clientAccessFactory,
objectMappingFactory: objectMappingFactory,
}
return f
}
// PluginLoader loads plugins from a path set by the KUBECTL_PLUGINS_PATH env var.
// If this env var is not set, it defaults to
// "~/.kube/plugins", plus
// "./kubectl/plugins" directory under the "data dir" directory specified by the XDG
// system directory structure spec for the given platform.
func (f *ring2Factory) PluginLoader() plugins.PluginLoader {
if len(os.Getenv("KUBECTL_PLUGINS_PATH")) > 0 {
return plugins.KubectlPluginsPathPluginLoader()
}
return plugins.TolerantMultiPluginLoader{
plugins.XDGDataDirsPluginLoader(),
plugins.UserDirPluginLoader(),
}
}
func (f *ring2Factory) PluginRunner() plugins.PluginRunner {
return &plugins.ExecPluginRunner{}
}
func (f *ring2Factory) ScaleClient() (scaleclient.ScalesGetter, error) {
discoClient, err := f.clientAccessFactory.DiscoveryClient()
if err != nil {
return nil, err
}
restClient, err := f.clientAccessFactory.RESTClient()
if err != nil {
return nil, err
}
resolver := scaleclient.NewDiscoveryScaleKindResolver(discoClient)
mapper, err := f.clientAccessFactory.RESTMapper()
if err != nil {
return nil, err
}
return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil
}
func (f *ring2Factory) Scaler() (kubectl.Scaler, error) {
scalesGetter, err := f.ScaleClient()
if err != nil {
return nil, err
}
return kubectl.NewScaler(scalesGetter), nil
}
func (f *ring2Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
clientset, clientsetErr := f.clientAccessFactory.ClientSet()
if clientsetErr != nil {
return nil, clientsetErr
}
scaler, err := f.ScaleClient()
if err != nil {
return nil, err
}
reaper, reaperErr := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset, scaler)
if kubectl.IsNoSuchReaperError(reaperErr) {
return nil, reaperErr
}
return reaper, reaperErr
}
|
[
"\"KUBECTL_PLUGINS_PATH\""
] |
[] |
[
"KUBECTL_PLUGINS_PATH"
] |
[]
|
["KUBECTL_PLUGINS_PATH"]
|
go
| 1 | 0 | |
tests/hazmat/backends/test_openssl.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import itertools
import os
import subprocess
import sys
import textwrap
from pkg_resources import parse_version
import pytest
from cryptography import utils, x509
from cryptography.exceptions import InternalError, _Reasons
from cryptography.hazmat.backends.interfaces import RSABackend
from cryptography.hazmat.backends.openssl.backend import (
Backend, backend
)
from cryptography.hazmat.backends.openssl.ec import _sn_to_elliptic_curve
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dsa, ec, padding
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from ..primitives.fixtures_rsa import RSA_KEY_2048, RSA_KEY_512
from ...doubles import (
DummyAsymmetricPadding, DummyCipherAlgorithm, DummyHashAlgorithm, DummyMode
)
from ...test_x509 import _load_cert
from ...utils import load_vectors_from_file, raises_unsupported_algorithm
def skip_if_libre_ssl(openssl_version):
if u'LibreSSL' in openssl_version:
pytest.skip("LibreSSL hard-codes RAND_bytes to use arc4random.")
class TestLibreSkip(object):
def test_skip_no(self):
assert skip_if_libre_ssl(u"OpenSSL 1.0.2h 3 May 2016") is None
def test_skip_yes(self):
with pytest.raises(pytest.skip.Exception):
skip_if_libre_ssl(u"LibreSSL 2.1.6")
class DummyMGF(object):
_salt_length = 0
class TestOpenSSL(object):
def test_backend_exists(self):
assert backend
def test_openssl_version_text(self):
"""
This test checks the value of OPENSSL_VERSION_TEXT.
Unfortunately, this define does not appear to have a
formal content definition, so for now we'll test to see
if it starts with OpenSSL or LibreSSL as that appears
to be true for every OpenSSL-alike.
"""
assert (
backend.openssl_version_text().startswith("OpenSSL") or
backend.openssl_version_text().startswith("LibreSSL")
)
def test_openssl_version_number(self):
assert backend.openssl_version_number() > 0
def test_supports_cipher(self):
assert backend.cipher_supported(None, None) is False
def test_register_duplicate_cipher_adapter(self):
with pytest.raises(ValueError):
backend.register_cipher_adapter(AES, CBC, None)
@pytest.mark.parametrize("mode", [DummyMode(), None])
def test_nonexistent_cipher(self, mode):
b = Backend()
b.register_cipher_adapter(
DummyCipherAlgorithm,
type(mode),
lambda backend, cipher, mode: backend._ffi.NULL
)
cipher = Cipher(
DummyCipherAlgorithm(), mode, backend=b,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
def test_openssl_assert(self):
backend.openssl_assert(True)
with pytest.raises(InternalError):
backend.openssl_assert(False)
def test_consume_errors(self):
for i in range(10):
backend._lib.ERR_put_error(backend._lib.ERR_LIB_EVP, 0, 0,
b"test_openssl.py", -1)
assert backend._lib.ERR_peek_error() != 0
errors = backend._consume_errors()
assert backend._lib.ERR_peek_error() == 0
assert len(errors) == 10
def test_ssl_ciphers_registered(self):
meth = backend._lib.TLSv1_method()
ctx = backend._lib.SSL_CTX_new(meth)
assert ctx != backend._ffi.NULL
backend._lib.SSL_CTX_free(ctx)
def test_evp_ciphers_registered(self):
cipher = backend._lib.EVP_get_cipherbyname(b"aes-256-cbc")
assert cipher != backend._ffi.NULL
def test_error_strings_loaded(self):
# returns a value in a static buffer
err = backend._lib.ERR_error_string(101183626, backend._ffi.NULL)
assert backend._ffi.string(err) == (
b"error:0607F08A:digital envelope routines:EVP_EncryptFinal_ex:"
b"data not multiple of block length"
)
def test_unknown_error_in_cipher_finalize(self):
cipher = Cipher(AES(b"\0" * 16), CBC(b"\0" * 16), backend=backend)
enc = cipher.encryptor()
enc.update(b"\0")
backend._lib.ERR_put_error(0, 0, 1,
b"test_openssl.py", -1)
with pytest.raises(InternalError):
enc.finalize()
def test_large_key_size_on_new_openssl(self):
parameters = dsa.generate_parameters(2048, backend)
param_num = parameters.parameter_numbers()
assert utils.bit_length(param_num.p) == 2048
parameters = dsa.generate_parameters(3072, backend)
param_num = parameters.parameter_numbers()
assert utils.bit_length(param_num.p) == 3072
def test_int_to_bn(self):
value = (2 ** 4242) - 4242
bn = backend._int_to_bn(value)
assert bn != backend._ffi.NULL
bn = backend._ffi.gc(bn, backend._lib.BN_free)
assert bn
assert backend._bn_to_int(bn) == value
def test_int_to_bn_inplace(self):
value = (2 ** 4242) - 4242
bn_ptr = backend._lib.BN_new()
assert bn_ptr != backend._ffi.NULL
bn_ptr = backend._ffi.gc(bn_ptr, backend._lib.BN_free)
bn = backend._int_to_bn(value, bn_ptr)
assert bn == bn_ptr
assert backend._bn_to_int(bn_ptr) == value
def test_bn_to_int(self):
bn = backend._int_to_bn(0)
assert backend._bn_to_int(bn) == 0
class TestOpenSSLRandomEngine(object):
def setup(self):
# The default RAND engine is global and shared between
# tests. We make sure that the default engine is osrandom
# before we start each test and restore the global state to
# that engine in teardown.
current_default = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(current_default)
assert name == backend._binding._osrandom_engine_name
def teardown(self):
# we need to reset state to being default. backend is a shared global
# for all these tests.
backend.activate_osrandom_engine()
current_default = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(current_default)
assert name == backend._binding._osrandom_engine_name
@pytest.mark.skipif(sys.executable is None,
reason="No Python interpreter available.")
def test_osrandom_engine_is_default(self, tmpdir):
engine_printer = textwrap.dedent(
"""
import sys
from cryptography.hazmat.backends.openssl.backend import backend
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
sys.stdout.write(backend._ffi.string(name).decode('ascii'))
res = backend._lib.ENGINE_free(e)
assert res == 1
"""
)
engine_name = tmpdir.join('engine_name')
# If we're running tests via ``python setup.py test`` in a clean
# environment then all of our dependencies are going to be installed
# into either the current directory or the .eggs directory. However the
# subprocess won't know to activate these dependencies, so we'll get it
# to do so by passing our entire sys.path into the subprocess via the
# PYTHONPATH environment variable.
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(sys.path)
with engine_name.open('w') as out:
subprocess.check_call(
[sys.executable, "-c", engine_printer],
env=env,
stdout=out,
stderr=subprocess.PIPE,
)
osrandom_engine_name = backend._ffi.string(
backend._binding._osrandom_engine_name
)
assert engine_name.read().encode('ascii') == osrandom_engine_name
def test_osrandom_sanity_check(self):
# This test serves as a check against catastrophic failure.
buf = backend._ffi.new("unsigned char[]", 500)
res = backend._lib.RAND_bytes(buf, 500)
assert res == 1
assert backend._ffi.buffer(buf)[:] != "\x00" * 500
def test_activate_osrandom_no_default(self):
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
backend.activate_osrandom_engine()
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
def test_activate_builtin_random(self):
e = backend._lib.ENGINE_get_default_RAND()
assert e != backend._ffi.NULL
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
def test_activate_builtin_random_already_active(self):
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
def test_osrandom_engine_implementation(self):
name = backend.osrandom_engine_implementation()
assert name in ['/dev/urandom', 'CryptGenRandom', 'getentropy',
'getrandom']
if sys.platform.startswith('linux'):
assert name in ['getrandom', '/dev/urandom']
if sys.platform == 'darwin':
# macOS 10.12+ supports getentropy
if parse_version(os.uname()[2]) >= parse_version("16.0"):
assert name == 'getentropy'
else:
assert name == '/dev/urandom'
if 'bsd' in sys.platform:
assert name in ['getentropy', '/dev/urandom']
if sys.platform == 'win32':
assert name == 'CryptGenRandom'
def test_activate_osrandom_already_default(self):
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
backend.activate_osrandom_engine()
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
class TestOpenSSLRSA(object):
def test_generate_rsa_parameters_supported(self):
assert backend.generate_rsa_parameters_supported(1, 1024) is False
assert backend.generate_rsa_parameters_supported(4, 1024) is False
assert backend.generate_rsa_parameters_supported(3, 1024) is True
assert backend.generate_rsa_parameters_supported(3, 511) is False
def test_generate_bad_public_exponent(self):
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=1, key_size=2048)
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=4, key_size=2048)
def test_cant_generate_insecure_tiny_key(self):
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=65537,
key_size=511)
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=65537,
key_size=256)
def test_rsa_padding_unsupported_pss_mgf1_hash(self):
assert backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(DummyHashAlgorithm()), salt_length=0)
) is False
def test_rsa_padding_unsupported(self):
assert backend.rsa_padding_supported(DummyAsymmetricPadding()) is False
def test_rsa_padding_supported_pkcs1v15(self):
assert backend.rsa_padding_supported(padding.PKCS1v15()) is True
def test_rsa_padding_supported_pss(self):
assert backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
) is True
def test_rsa_padding_supported_oaep(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
),
) is True
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 0,
reason="Requires OpenSSL with rsa_oaep_md (1.0.2+)"
)
def test_rsa_padding_supported_oaep_sha2_combinations(self):
hashalgs = [
hashes.SHA1(),
hashes.SHA224(),
hashes.SHA256(),
hashes.SHA384(),
hashes.SHA512(),
]
for mgf1alg, oaepalg in itertools.product(hashalgs, hashalgs):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=mgf1alg),
algorithm=oaepalg,
label=None
),
) is True
def test_rsa_padding_unsupported_oaep_ripemd160_sha1(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.RIPEMD160()),
algorithm=hashes.SHA1(),
label=None
),
) is False
def test_rsa_padding_unsupported_oaep_sha1_ripemd160(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.RIPEMD160(),
label=None
),
) is False
def test_rsa_padding_unsupported_mgf(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=DummyMGF(),
algorithm=hashes.SHA1(),
label=None
),
) is False
assert backend.rsa_padding_supported(
padding.PSS(mgf=DummyMGF(), salt_length=0)
) is False
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 1,
reason="Requires OpenSSL without rsa_oaep_md (< 1.0.2)"
)
def test_unsupported_mgf1_hash_algorithm_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA1(),
label=None
)
)
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 1,
reason="Requires OpenSSL without rsa_oaep_md (< 1.0.2)"
)
def test_unsupported_oaep_hash_algorithm_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA256(),
label=None
)
)
def test_unsupported_mgf1_hash_algorithm_ripemd160_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.RIPEMD160()),
algorithm=hashes.RIPEMD160(),
label=None
)
)
def test_unsupported_mgf1_hash_algorithm_whirlpool_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.Whirlpool()),
algorithm=hashes.Whirlpool(),
label=None
)
)
def test_unsupported_oaep_label_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=b"label"
)
)
class TestOpenSSLCMAC(object):
def test_unsupported_cipher(self):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
backend.create_cmac_ctx(DummyCipherAlgorithm())
class TestOpenSSLSignX509Certificate(object):
def test_requires_certificate_builder(self):
private_key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
backend.create_x509_certificate(
object(), private_key, DummyHashAlgorithm()
)
class TestOpenSSLSignX509CertificateRevocationList(object):
def test_invalid_builder(self):
private_key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
backend.create_x509_crl(object(), private_key, hashes.SHA256())
class TestOpenSSLCreateRevokedCertificate(object):
def test_invalid_builder(self):
with pytest.raises(TypeError):
backend.create_x509_revoked_certificate(object())
class TestOpenSSLSerializationWithOpenSSL(object):
def test_pem_password_cb(self):
userdata = backend._ffi.new("CRYPTOGRAPHY_PASSWORD_DATA *")
pw = b"abcdefg"
password = backend._ffi.new("char []", pw)
userdata.password = password
userdata.length = len(pw)
buflen = 10
buf = backend._ffi.new("char []", buflen)
res = backend._lib.Cryptography_pem_password_cb(
buf, buflen, 0, userdata
)
assert res == len(pw)
assert userdata.called == 1
assert backend._ffi.buffer(buf, len(pw))[:] == pw
assert userdata.maxsize == buflen
assert userdata.error == 0
def test_pem_password_cb_no_password(self):
userdata = backend._ffi.new("CRYPTOGRAPHY_PASSWORD_DATA *")
buflen = 10
buf = backend._ffi.new("char []", buflen)
res = backend._lib.Cryptography_pem_password_cb(
buf, buflen, 0, userdata
)
assert res == 0
assert userdata.error == -1
def test_unsupported_evp_pkey_type(self):
key = backend._create_evp_pkey_gc()
with raises_unsupported_algorithm(None):
backend._evp_pkey_to_private_key(key)
with raises_unsupported_algorithm(None):
backend._evp_pkey_to_public_key(key)
def test_very_long_pem_serialization_password(self):
password = b"x" * 1024
with pytest.raises(ValueError):
load_vectors_from_file(
os.path.join(
"asymmetric", "Traditional_OpenSSL_Serialization",
"key1.pem"
),
lambda pemfile: (
backend.load_pem_private_key(
pemfile.read().encode(), password
)
)
)
class DummyLibrary(object):
Cryptography_HAS_EC = 0
class TestOpenSSLEllipticCurve(object):
def test_elliptic_curve_supported(self, monkeypatch):
monkeypatch.setattr(backend, "_lib", DummyLibrary())
assert backend.elliptic_curve_supported(None) is False
def test_elliptic_curve_signature_algorithm_supported(self, monkeypatch):
monkeypatch.setattr(backend, "_lib", DummyLibrary())
assert backend.elliptic_curve_signature_algorithm_supported(
None, None
) is False
def test_sn_to_elliptic_curve_not_supported(self):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE):
_sn_to_elliptic_curve(backend, b"fake")
def test_elliptic_curve_exchange_algorithm_supported(self, monkeypatch):
monkeypatch.setattr(backend, "_lib", DummyLibrary())
assert not backend.elliptic_curve_exchange_algorithm_supported(
ec.ECDH(), ec.SECP256R1()
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAPEMSerialization(object):
def test_password_length_limit(self):
password = b"x" * 1024
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(password)
)
class TestGOSTCertificate(object):
def test_numeric_string_x509_name_entry(self):
cert = _load_cert(
os.path.join("x509", "e-trust.ru.der"),
x509.load_der_x509_certificate,
backend
)
if (
backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102I or
backend._lib.CRYPTOGRAPHY_IS_LIBRESSL
):
with pytest.raises(ValueError) as exc:
cert.subject
# We assert on the message in this case because if the certificate
# fails to load it will also raise a ValueError and this test could
# erroneously pass.
assert str(exc.value) == "Unsupported ASN1 string type. Type: 18"
else:
assert cert.subject.get_attributes_for_oid(
x509.ObjectIdentifier("1.2.643.3.131.1.1")
)[0].value == "007710474375"
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
helper/genStrSession.py
|
# pylint: disable=invalid-name, missing-module-docstring
#
# Copyright (C) 2020-2022 by UsergeTeam@Github, < https://github.com/UsergeTeam >.
#
# This file is part of < https://github.com/UsergeTeam/Userge > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/UsergeTeam/Userge/blob/master/LICENSE >
#
# All rights reserved.
import os
import asyncio
from pyrogram import Client
from pyrogram.errors import UserIsBot
from dotenv import load_dotenv
if os.path.isfile("config.env"):
load_dotenv("config.env")
async def genStrSession() -> None: # pylint: disable=missing-function-docstring
async with Client(
"Userge",
api_id=int(os.environ.get("API_ID") or input("Enter Telegram APP ID: ")),
api_hash=os.environ.get("API_HASH") or input("Enter Telegram API HASH: "),
) as userge:
print("\nprocessing...")
doneStr = "sent to saved messages!"
try:
await userge.send_message(
"me", f"#USERGE #SESSION_STRING\n\n```{await userge.export_session_string()}```"
)
except UserIsBot:
doneStr = "successfully printed!"
print(await userge.export_session_string())
print(f"Done !, session string has been {doneStr}")
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(genStrSession())
|
[] |
[] |
[
"API_ID",
"API_HASH"
] |
[]
|
["API_ID", "API_HASH"]
|
python
| 2 | 0 | |
napari/_vispy/_tests/test_vispy_multiscale.py
|
import os
import sys
import numpy as np
import pytest
def test_multiscale(make_test_viewer):
"""Test rendering of multiscale data."""
viewer = make_test_viewer()
shapes = [(4000, 3000), (2000, 1500), (1000, 750), (500, 375)]
np.random.seed(0)
data = [np.random.random(s) for s in shapes]
_ = viewer.add_image(data, multiscale=True, contrast_limits=[0, 1])
layer = viewer.layers[0]
# Set canvas size to target amount
viewer.window.qt_viewer.view.canvas.size = (800, 600)
viewer.window.qt_viewer.on_draw(None)
# Check that current level is first large enough to fill the canvas with
# a greater than one pixel depth
assert layer.data_level == 2
# Check that full field of view is currently requested
assert np.all(layer.corner_pixels[0] <= [0, 0])
assert np.all(layer.corner_pixels[1] >= np.subtract(shapes[2], 1))
# Test value at top left corner of image
viewer.cursor.position = (0, 0)
value = layer.get_value()
np.testing.assert_allclose(value, (2, data[2][(0, 0)]))
# Test value at bottom right corner of image
viewer.cursor.position = (3995, 2995)
value = layer.get_value()
np.testing.assert_allclose(value, (2, data[2][(999, 749)]))
# Test value outside image
viewer.cursor.position = (4000, 3000)
value = layer.get_value()
assert value[1] is None
def test_3D_multiscale_image(make_test_viewer):
"""Test rendering of 3D multiscale image uses lowest resolution."""
viewer = make_test_viewer()
data = [np.random.random((128,) * 3), np.random.random((64,) * 3)]
viewer.add_image(data)
# Check that this doesn't crash.
viewer.dims.ndisplay = 3
# Check lowest resolution is used
assert viewer.layers[0].data_level == 1
# Note that draw command must be explicitly triggered in our tests
viewer.window.qt_viewer.on_draw(None)
@pytest.mark.skipif(
sys.platform.startswith('win') or not os.getenv("CI"),
reason='Screenshot tests are not supported on napari windows CI.',
)
def test_multiscale_screenshot(make_test_viewer):
"""Test rendering of multiscale data with screenshot."""
viewer = make_test_viewer(show=True)
shapes = [(4000, 3000), (2000, 1500), (1000, 750), (500, 375)]
data = [np.ones(s) for s in shapes]
_ = viewer.add_image(data, multiscale=True, contrast_limits=[0, 1])
# Set canvas size to target amount
viewer.window.qt_viewer.view.canvas.size = (800, 600)
screenshot = viewer.screenshot(canvas_only=True)
center_coord = np.round(np.array(screenshot.shape[:2]) / 2).astype(np.int)
target_center = np.array([255, 255, 255, 255], dtype='uint8')
target_edge = np.array([0, 0, 0, 255], dtype='uint8')
screen_offset = 3 # Offset is needed as our screenshots have black borders
np.testing.assert_allclose(screenshot[tuple(center_coord)], target_center)
np.testing.assert_allclose(
screenshot[screen_offset, screen_offset], target_edge
)
np.testing.assert_allclose(
screenshot[-screen_offset, -screen_offset], target_edge
)
@pytest.mark.skipif(
sys.platform.startswith('win') or not os.getenv("CI"),
reason='Screenshot tests are not supported on napari windows CI.',
)
def test_multiscale_screenshot_zoomed(make_test_viewer):
"""Test rendering of multiscale data with screenshot after zoom."""
viewer = make_test_viewer(show=True)
view = viewer.window.qt_viewer
shapes = [(4000, 3000), (2000, 1500), (1000, 750), (500, 375)]
data = [np.ones(s) for s in shapes]
_ = viewer.add_image(data, multiscale=True, contrast_limits=[0, 1])
# Set canvas size to target amount
view.view.canvas.size = (800, 600)
# Set zoom of camera to show highest resolution tile
view.view.camera.rect = [1000, 1000, 200, 150]
viewer.window.qt_viewer.on_draw(None)
# Check that current level is bottom level of multiscale
assert viewer.layers[0].data_level == 0
screenshot = viewer.screenshot(canvas_only=True)
center_coord = np.round(np.array(screenshot.shape[:2]) / 2).astype(np.int)
target_center = np.array([255, 255, 255, 255], dtype='uint8')
screen_offset = 3 # Offset is needed as our screenshots have black borders
np.testing.assert_allclose(screenshot[tuple(center_coord)], target_center)
np.testing.assert_allclose(
screenshot[screen_offset, screen_offset], target_center
)
np.testing.assert_allclose(
screenshot[-screen_offset, -screen_offset], target_center
)
@pytest.mark.skipif(
sys.platform.startswith('win') or not os.getenv("CI"),
reason='Screenshot tests are not supported on napari windows CI.',
)
def test_image_screenshot_zoomed(make_test_viewer):
"""Test rendering of image data with screenshot after zoom."""
viewer = make_test_viewer(show=True)
view = viewer.window.qt_viewer
data = np.ones((4000, 3000))
_ = viewer.add_image(data, multiscale=False, contrast_limits=[0, 1])
# Set canvas size to target amount
view.view.canvas.size = (800, 600)
# Set zoom of camera to show highest resolution tile
view.view.camera.rect = [1000, 1000, 200, 150]
viewer.window.qt_viewer.on_draw(None)
screenshot = viewer.screenshot(canvas_only=True)
center_coord = np.round(np.array(screenshot.shape[:2]) / 2).astype(np.int)
target_center = np.array([255, 255, 255, 255], dtype='uint8')
screen_offset = 3 # Offset is needed as our screenshots have black borders
np.testing.assert_allclose(screenshot[tuple(center_coord)], target_center)
np.testing.assert_allclose(
screenshot[screen_offset, screen_offset], target_center
)
np.testing.assert_allclose(
screenshot[-screen_offset, -screen_offset], target_center
)
@pytest.mark.skipif(
sys.platform.startswith('win') or not os.getenv("CI"),
reason='Screenshot tests are not supported on napari windows CI.',
)
def test_5D_multiscale(make_test_viewer):
"""Test 5D multiscale data."""
# Show must be true to trigger multiscale draw and corner estimation
viewer = make_test_viewer(show=True)
shapes = [(1, 2, 5, 20, 20), (1, 2, 5, 10, 10), (1, 2, 5, 5, 5)]
np.random.seed(0)
data = [np.random.random(s) for s in shapes]
layer = viewer.add_image(data, multiscale=True)
assert layer.data == data
assert layer.multiscale is True
assert layer.ndim == len(shapes[0])
|
[] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
python
| 1 | 0 | |
_vendor/src/golang.org/x/tools/cmd/ssadump/main.go
|
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// ssadump: a tool for displaying and interpreting the SSA form of Go programs.
package main
import (
"flag"
"fmt"
"go/build"
"os"
"runtime"
"runtime/pprof"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/interp"
"golang.org/x/tools/go/types"
)
var buildFlag = flag.String("build", "", `Options controlling the SSA builder.
The value is a sequence of zero or more of these letters:
C perform sanity [C]hecking of the SSA form.
D include [D]ebug info for every function.
P print [P]ackage inventory.
F print [F]unction SSA code.
S log [S]ource locations as SSA builder progresses.
G use binary object files from gc to provide imports (no code).
L build distinct packages seria[L]ly instead of in parallel.
N build [N]aive SSA form: don't replace local loads/stores with registers.
I build bare [I]nit functions: no init guards or calls to dependent inits.
`)
var testFlag = flag.Bool("test", false, "Loads test code (*_test.go) for imported packages.")
var runFlag = flag.Bool("run", false, "Invokes the SSA interpreter on the program.")
var interpFlag = flag.String("interp", "", `Options controlling the SSA test interpreter.
The value is a sequence of zero or more more of these letters:
R disable [R]ecover() from panic; show interpreter crash instead.
T [T]race execution of the program. Best for single-threaded programs!
`)
const usage = `SSA builder and interpreter.
Usage: ssadump [<flag> ...] <args> ...
Use -help flag to display options.
Examples:
% ssadump -build=FPG hello.go # quickly dump SSA form of a single package
% ssadump -run -interp=T hello.go # interpret a program, with tracing
% ssadump -run -test unicode -- -test.v # interpret the unicode package's tests, verbosely
` + loader.FromArgsUsage +
`
When -run is specified, ssadump will run the program.
The entry point depends on the -test flag:
if clear, it runs the first package named main.
if set, it runs the tests of each package.
`
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
func init() {
// If $GOMAXPROCS isn't set, use the full capacity of the machine.
// For small machines, use at least 4 threads.
if os.Getenv("GOMAXPROCS") == "" {
n := runtime.NumCPU()
if n < 4 {
n = 4
}
runtime.GOMAXPROCS(n)
}
}
func main() {
if err := doMain(); err != nil {
fmt.Fprintf(os.Stderr, "ssadump: %s\n", err)
os.Exit(1)
}
}
func doMain() error {
flag.Parse()
args := flag.Args()
conf := loader.Config{
Build: &build.Default,
SourceImports: true,
}
// TODO(adonovan): make go/types choose its default Sizes from
// build.Default or a specified *build.Context.
var wordSize int64 = 8
switch conf.Build.GOARCH {
case "386", "arm":
wordSize = 4
}
conf.TypeChecker.Sizes = &types.StdSizes{
MaxAlign: 8,
WordSize: wordSize,
}
var mode ssa.BuilderMode
for _, c := range *buildFlag {
switch c {
case 'D':
mode |= ssa.GlobalDebug
case 'P':
mode |= ssa.PrintPackages
case 'F':
mode |= ssa.PrintFunctions
case 'S':
mode |= ssa.LogSource | ssa.BuildSerially
case 'C':
mode |= ssa.SanityCheckFunctions
case 'N':
mode |= ssa.NaiveForm
case 'G':
conf.SourceImports = false
case 'L':
mode |= ssa.BuildSerially
case 'I':
mode |= ssa.BareInits
default:
return fmt.Errorf("unknown -build option: '%c'", c)
}
}
var interpMode interp.Mode
for _, c := range *interpFlag {
switch c {
case 'T':
interpMode |= interp.EnableTracing
case 'R':
interpMode |= interp.DisableRecover
default:
return fmt.Errorf("unknown -interp option: '%c'", c)
}
}
if len(args) == 0 {
fmt.Fprint(os.Stderr, usage)
os.Exit(1)
}
// Profiling support.
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
// Use the initial packages from the command line.
args, err := conf.FromArgs(args, *testFlag)
if err != nil {
return err
}
// The interpreter needs the runtime package.
if *runFlag {
conf.Import("runtime")
}
// Load, parse and type-check the whole program.
iprog, err := conf.Load()
if err != nil {
return err
}
// Create and build SSA-form program representation.
prog := ssa.Create(iprog, mode)
prog.BuildAll()
// Run the interpreter.
if *runFlag {
var main *ssa.Package
pkgs := prog.AllPackages()
if *testFlag {
// If -test, run all packages' tests.
if len(pkgs) > 0 {
main = prog.CreateTestMainPackage(pkgs...)
}
if main == nil {
return fmt.Errorf("no tests")
}
} else {
// Otherwise, run main.main.
for _, pkg := range pkgs {
if pkg.Object.Name() == "main" {
main = pkg
if main.Func("main") == nil {
return fmt.Errorf("no func main() in main package")
}
break
}
}
if main == nil {
return fmt.Errorf("no main package")
}
}
if runtime.GOARCH != build.Default.GOARCH {
return fmt.Errorf("cross-interpretation is not yet supported (target has GOARCH %s, interpreter has %s)",
build.Default.GOARCH, runtime.GOARCH)
}
interp.Interpret(main, interpMode, conf.TypeChecker.Sizes, main.Object.Path(), args)
}
return nil
}
|
[
"\"GOMAXPROCS\""
] |
[] |
[
"GOMAXPROCS"
] |
[]
|
["GOMAXPROCS"]
|
go
| 1 | 0 | |
hacks/crawl/cmd/backend/main.go
|
package main
import (
"context"
"log"
"os"
server "sigs.k8s.io/kustomize/hacks/crawl/backend"
"strconv"
)
func main() {
portStr := os.Getenv("PORT")
port, err := strconv.Atoi(portStr)
if portStr == "" || err != nil {
log.Fatalf("$PORT(%s) must be set to an integer\n", portStr)
}
ctx := context.Background()
ks, err := server.NewKustomizeSearch(ctx)
if err != nil {
log.Fatalf("Error creating kustomize server: %v", ks)
}
err = ks.Serve(port)
if err != nil {
log.Fatalf("Error while running server: %v", err)
}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
backend/cli/watcher/main.go
|
package main
import (
"context"
"encoding/json"
"github.com/HydroProtocol/hydro-scaffold-dex/backend/cli"
"github.com/HydroProtocol/hydro-scaffold-dex/backend/connection"
"github.com/HydroProtocol/hydro-scaffold-dex/backend/models"
"github.com/HydroProtocol/hydro-sdk-backend/common"
"github.com/HydroProtocol/hydro-sdk-backend/sdk"
"github.com/HydroProtocol/hydro-sdk-backend/sdk/ethereum"
"github.com/HydroProtocol/hydro-sdk-backend/utils"
"github.com/HydroProtocol/hydro-sdk-backend/watcher"
"os"
_ "github.com/joho/godotenv/autoload"
)
type DBTransactionHandler struct {
w watcher.Watcher
}
func (handler DBTransactionHandler) Update(tx sdk.Transaction, timestamp uint64) {
launchLog := models.LaunchLogDao.FindByHash(tx.GetHash())
if launchLog == nil {
utils.Debug("Skip useless transaction %s", tx.GetHash())
return
}
if launchLog.Status != common.STATUS_PENDING {
utils.Info("LaunchLog is not pending %s, skip", launchLog.Hash.String)
return
}
if launchLog != nil {
txReceipt, _ := handler.w.Hydro.GetTransactionReceipt(tx.GetHash())
result := txReceipt.GetResult()
hash := tx.GetHash()
transaction := models.TransactionDao.FindTransactionByID(launchLog.ItemID)
utils.Info("Transaction %s result is %+v", tx.GetHash(), result)
var status string
if result {
status = common.STATUS_SUCCESSFUL
} else {
status = common.STATUS_FAILED
}
//approve event should not process with engine, so update and return
if launchLog.ItemType == "hydroApprove" {
launchLog.Status = status
err := models.LaunchLogDao.UpdateLaunchLog(launchLog)
if err != nil {
panic(err)
}
return
}
event := &common.ConfirmTransactionEvent{
Event: common.Event{
Type: common.EventConfirmTransaction,
MarketID: transaction.MarketID,
},
Hash: hash,
Status: status,
Timestamp: timestamp,
}
bts, _ := json.Marshal(event)
err := handler.w.QueueClient.Push(bts)
if err != nil {
utils.Error("Push event into Queue Error %v", err)
}
}
}
func main() {
ctx, stop := context.WithCancel(context.Background())
go cli.WaitExitSignal(stop)
// Init Database Client
models.Connect(os.Getenv("HSK_DATABASE_URL"))
// Init Redis client
client := connection.NewRedisClient(os.Getenv("HSK_REDIS_URL"))
// Init Blockchain Client
hydro := ethereum.NewEthereumHydro(os.Getenv("HSK_BLOCKCHAIN_RPC_URL"), os.Getenv("HSK_HYBRID_EXCHANGE_ADDRESS"))
if os.Getenv("HSK_LOG_LEVEL") == "DEBUG" {
hydro.EnableDebug(true)
}
// init Key/Value Store
kvStore, err := common.InitKVStore(&common.RedisKVStoreConfig{
Ctx: ctx,
Client: client,
})
if err != nil {
panic(err)
}
// Init Queue
// There is no block call of redis, so we share the client here.
queue, err := common.InitQueue(&common.RedisQueueConfig{
Name: common.HYDRO_ENGINE_EVENTS_QUEUE_KEY,
Client: client,
Ctx: ctx,
})
if err != nil {
panic(err)
}
w := watcher.Watcher{
Ctx: ctx,
Hydro: hydro,
KVClient: kvStore,
QueueClient: queue,
}
w.RegisterHandler(DBTransactionHandler{w})
go utils.StartMetrics()
w.Run()
utils.Info("Watcher Exit")
}
|
[
"\"HSK_DATABASE_URL\"",
"\"HSK_REDIS_URL\"",
"\"HSK_BLOCKCHAIN_RPC_URL\"",
"\"HSK_HYBRID_EXCHANGE_ADDRESS\"",
"\"HSK_LOG_LEVEL\""
] |
[] |
[
"HSK_BLOCKCHAIN_RPC_URL",
"HSK_REDIS_URL",
"HSK_LOG_LEVEL",
"HSK_HYBRID_EXCHANGE_ADDRESS",
"HSK_DATABASE_URL"
] |
[]
|
["HSK_BLOCKCHAIN_RPC_URL", "HSK_REDIS_URL", "HSK_LOG_LEVEL", "HSK_HYBRID_EXCHANGE_ADDRESS", "HSK_DATABASE_URL"]
|
go
| 5 | 0 | |
model_zoo/official/nlp/gru/src/gru.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""GRU cell"""
import mindspore.nn as nn
import mindspore.ops.operations as P
import mindspore.common.dtype as mstype
from src.weight_init import gru_default_state
class BidirectionGRU(nn.Cell):
'''
BidirectionGRU model
Args:
config: config of network
'''
def __init__(self, config, is_training=True):
super(BidirectionGRU, self).__init__()
if is_training:
self.batch_size = config.batch_size
else:
self.batch_size = config.eval_batch_size
self.embedding_size = config.encoder_embedding_size
self.hidden_size = config.hidden_size
self.weight_i, self.weight_h, self.bias_i, self.bias_h, self.init_h = gru_default_state(self.batch_size,
self.embedding_size,
self.hidden_size)
self.weight_bw_i, self.weight_bw_h, self.bias_bw_i, self.bias_bw_h, self.init_bw_h = \
gru_default_state(self.batch_size, self.embedding_size, self.hidden_size)
self.reverse = P.ReverseV2(axis=[1])
self.concat = P.Concat(axis=2)
self.squeeze = P.Squeeze(axis=0)
self.rnn = P.DynamicGRUV2()
self.text_len = config.max_length
self.cast = P.Cast()
def construct(self, x):
'''
BidirectionGRU construction
Args:
x(Tensor): BidirectionGRU input
Returns:
output(Tensor): rnn output
hidden(Tensor): hidden state
'''
x = self.cast(x, mstype.float16)
y1, _, _, _, _, _ = self.rnn(x, self.weight_i, self.weight_h, self.bias_i, self.bias_h, None, self.init_h)
bw_x = self.reverse(x)
y1_bw, _, _, _, _, _ = self.rnn(bw_x, self.weight_bw_i,
self.weight_bw_h, self.bias_bw_i, self.bias_bw_h, None, self.init_bw_h)
y1_bw = self.reverse(y1_bw)
output = self.concat((y1, y1_bw))
hidden = self.concat((y1[self.text_len-1:self.text_len:1, ::, ::],
y1_bw[self.text_len-1:self.text_len:1, ::, ::]))
hidden = self.squeeze(hidden)
return output, hidden
class GRU(nn.Cell):
'''
GRU model
Args:
config: config of network
'''
def __init__(self, config, is_training=True):
super(GRU, self).__init__()
if is_training:
self.batch_size = config.batch_size
else:
self.batch_size = config.eval_batch_size
self.embedding_size = config.encoder_embedding_size
self.hidden_size = config.hidden_size
self.weight_i, self.weight_h, self.bias_i, self.bias_h, self.init_h = \
gru_default_state(self.batch_size, self.embedding_size + self.hidden_size*2, self.hidden_size)
self.rnn = P.DynamicGRUV2()
self.cast = P.Cast()
def construct(self, x):
'''
GRU construction
Args:
x(Tensor): GRU input
Returns:
output(Tensor): rnn output
hidden(Tensor): hidden state
'''
x = self.cast(x, mstype.float16)
y1, h1, _, _, _, _ = self.rnn(x, self.weight_i, self.weight_h, self.bias_i, self.bias_h, None, self.init_h)
return y1, h1
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
internal/extsvc/bitbucketcloud/testing.go
|
package bitbucketcloud
import (
"net/url"
"os"
"path/filepath"
"testing"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/httptestutil"
"github.com/sourcegraph/sourcegraph/internal/lazyregexp"
)
func GetenvTestBitbucketCloudUsername() string {
username := os.Getenv("BITBUCKET_CLOUD_USERNAME")
if username == "" {
username = "unknwon"
}
return username
}
// NewTestClient returns a bitbucketcloud.Client that records its interactions
// to testdata/vcr/.
func NewTestClient(t testing.TB, name string, update bool, apiURL *url.URL) (*Client, func()) {
t.Helper()
cassete := filepath.Join("testdata/vcr/", normalize(name))
rec, err := httptestutil.NewRecorder(cassete, update)
if err != nil {
t.Fatal(err)
}
hc, err := httpcli.NewFactory(nil, httptestutil.NewRecorderOpt(rec)).Doer()
if err != nil {
t.Fatal(err)
}
cli := NewClient(apiURL, hc)
cli.Username = GetenvTestBitbucketCloudUsername()
cli.AppPassword = os.Getenv("BITBUCKET_CLOUD_APP_PASSWORD")
return cli, func() {
if err := rec.Stop(); err != nil {
t.Errorf("failed to update test data: %s", err)
}
}
}
var normalizer = lazyregexp.New("[^A-Za-z0-9-]+")
func normalize(path string) string {
return normalizer.ReplaceAllLiteralString(path, "-")
}
|
[
"\"BITBUCKET_CLOUD_USERNAME\"",
"\"BITBUCKET_CLOUD_APP_PASSWORD\""
] |
[] |
[
"BITBUCKET_CLOUD_USERNAME",
"BITBUCKET_CLOUD_APP_PASSWORD"
] |
[]
|
["BITBUCKET_CLOUD_USERNAME", "BITBUCKET_CLOUD_APP_PASSWORD"]
|
go
| 2 | 0 | |
ml-agents/mlagents/trainers/learn.py
|
# # Unity ML-Agents Toolkit
import logging
import argparse
from multiprocessing import Process, Queue
import os
import glob
import shutil
import numpy as np
from typing import Any, Callable, Optional, List, NamedTuple
import mlagents.trainers
import mlagents.envs
from mlagents import tf_utils
from mlagents.trainers.trainer_controller import TrainerController
from mlagents.trainers.exception import TrainerError
from mlagents.trainers.meta_curriculum import MetaCurriculum
from mlagents.trainers.trainer_util import load_config, TrainerFactory
from mlagents.envs.environment import UnityEnvironment
from mlagents.envs.sampler_class import SamplerManager
from mlagents.envs.exception import SamplerException
from mlagents.envs.base_unity_environment import BaseUnityEnvironment
from mlagents.envs.subprocess_env_manager import SubprocessEnvManager
class CommandLineOptions(NamedTuple):
debug: bool
num_runs: int
seed: int
env_path: str
run_id: str
load_model: bool
train_model: bool
save_freq: int
keep_checkpoints: int
base_port: int
num_envs: int
curriculum_folder: Optional[str]
lesson: int
slow: bool
no_graphics: bool
multi_gpu: bool # ?
trainer_config_path: str
sampler_file_path: Optional[str]
docker_target_name: Optional[str]
env_args: Optional[List[str]]
cpu: bool
@property
def fast_simulation(self) -> bool:
return not self.slow
@staticmethod
def from_argparse(args: Any) -> "CommandLineOptions":
return CommandLineOptions(**vars(args))
def get_version_string() -> str:
return f""" Version information:
ml-agents: {mlagents.trainers.__version__},
ml-agents-envs: {mlagents.envs.__version__},
Communicator API: {UnityEnvironment.API_VERSION},
TensorFlow: {tf_utils.tf.__version__}"""
def parse_command_line(argv: Optional[List[str]] = None) -> CommandLineOptions:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("trainer_config_path")
parser.add_argument(
"--env", default=None, dest="env_path", help="Name of the Unity executable "
)
parser.add_argument(
"--curriculum",
default=None,
dest="curriculum_folder",
help="Curriculum json directory for environment",
)
parser.add_argument(
"--sampler",
default=None,
dest="sampler_file_path",
help="Reset parameter yaml file for environment",
)
parser.add_argument(
"--keep-checkpoints",
default=5,
type=int,
help="How many model checkpoints to keep",
)
parser.add_argument(
"--lesson", default=0, type=int, help="Start learning from this lesson"
)
parser.add_argument(
"--load",
default=False,
dest="load_model",
action="store_true",
help="Whether to load the model or randomly initialize",
)
parser.add_argument(
"--run-id",
default="ppo",
help="The directory name for model and summary statistics",
)
parser.add_argument(
"--num-runs", default=1, type=int, help="Number of concurrent training sessions"
)
parser.add_argument(
"--save-freq", default=50000, type=int, help="Frequency at which to save model"
)
parser.add_argument(
"--seed", default=-1, type=int, help="Random seed used for training"
)
parser.add_argument(
"--slow", action="store_true", help="Whether to run the game at training speed"
)
parser.add_argument(
"--train",
default=False,
dest="train_model",
action="store_true",
help="Whether to train model, or only run inference",
)
parser.add_argument(
"--base-port",
default=5005,
type=int,
help="Base port for environment communication",
)
parser.add_argument(
"--num-envs",
default=1,
type=int,
help="Number of parallel environments to use for training",
)
parser.add_argument(
"--docker-target-name",
default=None,
dest="docker_target_name",
help="Docker volume to store training-specific files",
)
parser.add_argument(
"--no-graphics",
default=False,
action="store_true",
help="Whether to run the environment in no-graphics mode",
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="Whether to run ML-Agents in debug mode with detailed logging",
)
parser.add_argument(
"--multi-gpu",
default=False,
action="store_true",
help="Setting this flag enables the use of multiple GPU's (if available) during training",
)
parser.add_argument(
"--env-args",
default=None,
nargs=argparse.REMAINDER,
help="Arguments passed to the Unity executable.",
)
parser.add_argument(
"--cpu", default=False, action="store_true", help="Run with CPU only"
)
parser.add_argument("--version", action="version", version="")
args = parser.parse_args(argv)
return CommandLineOptions.from_argparse(args)
def run_training(
sub_id: int, run_seed: int, options: CommandLineOptions, process_queue: Queue
) -> None:
"""
Launches training session.
:param process_queue: Queue used to send signal back to main.
:param sub_id: Unique id for training session.
:param options: parsed command line arguments
:param run_seed: Random seed used for training.
:param run_options: Command line arguments for training.
"""
# Docker Parameters
trainer_config_path = options.trainer_config_path
curriculum_folder = options.curriculum_folder
# Recognize and use docker volume if one is passed as an argument
if not options.docker_target_name:
model_path = "./models/{run_id}-{sub_id}".format(
run_id=options.run_id, sub_id=sub_id
)
summaries_dir = "./summaries"
else:
trainer_config_path = "/{docker_target_name}/{trainer_config_path}".format(
docker_target_name=options.docker_target_name,
trainer_config_path=trainer_config_path,
)
if curriculum_folder is not None:
curriculum_folder = "/{docker_target_name}/{curriculum_folder}".format(
docker_target_name=options.docker_target_name,
curriculum_folder=curriculum_folder,
)
model_path = "/{docker_target_name}/models/{run_id}-{sub_id}".format(
docker_target_name=options.docker_target_name,
run_id=options.run_id,
sub_id=sub_id,
)
summaries_dir = "/{docker_target_name}/summaries".format(
docker_target_name=options.docker_target_name
)
trainer_config = load_config(trainer_config_path)
port = options.base_port + (sub_id * options.num_envs)
if options.env_path is None:
port = 5004 # This is the in Editor Training Port
env_factory = create_environment_factory(
options.env_path,
options.docker_target_name,
options.no_graphics,
run_seed,
port,
options.env_args,
)
env = SubprocessEnvManager(env_factory, options.num_envs)
maybe_meta_curriculum = try_create_meta_curriculum(
curriculum_folder, env, options.lesson
)
sampler_manager, resampling_interval = create_sampler_manager(
options.sampler_file_path, env.reset_parameters, run_seed
)
trainer_factory = TrainerFactory(
trainer_config,
summaries_dir,
options.run_id,
model_path,
options.keep_checkpoints,
options.train_model,
options.load_model,
run_seed,
maybe_meta_curriculum,
options.multi_gpu,
)
# Create controller and begin training.
tc = TrainerController(
trainer_factory,
model_path,
summaries_dir,
options.run_id + "-" + str(sub_id),
options.save_freq,
maybe_meta_curriculum,
options.train_model,
run_seed,
options.fast_simulation,
sampler_manager,
resampling_interval,
)
# Signal that environment has been launched.
process_queue.put(True)
# Begin training
tc.start_learning(env)
def create_sampler_manager(sampler_file_path, env_reset_params, run_seed=None):
sampler_config = None
resample_interval = None
if sampler_file_path is not None:
sampler_config = load_config(sampler_file_path)
if "resampling-interval" in sampler_config:
# Filter arguments that do not exist in the environment
resample_interval = sampler_config.pop("resampling-interval")
if (resample_interval <= 0) or (not isinstance(resample_interval, int)):
raise SamplerException(
"Specified resampling-interval is not valid. Please provide"
" a positive integer value for resampling-interval"
)
else:
raise SamplerException(
"Resampling interval was not specified in the sampler file."
" Please specify it with the 'resampling-interval' key in the sampler config file."
)
sampler_manager = SamplerManager(sampler_config, run_seed)
return sampler_manager, resample_interval
def try_create_meta_curriculum(
curriculum_folder: Optional[str], env: SubprocessEnvManager, lesson: int
) -> Optional[MetaCurriculum]:
if curriculum_folder is None:
return None
else:
meta_curriculum = MetaCurriculum(curriculum_folder, env.reset_parameters)
# TODO: Should be able to start learning at different lesson numbers
# for each curriculum.
meta_curriculum.set_all_curriculums_to_lesson_num(lesson)
return meta_curriculum
def prepare_for_docker_run(docker_target_name, env_path):
for f in glob.glob(
"/{docker_target_name}/*".format(docker_target_name=docker_target_name)
):
if env_path in f:
try:
b = os.path.basename(f)
if os.path.isdir(f):
shutil.copytree(f, "/ml-agents/{b}".format(b=b))
else:
src_f = "/{docker_target_name}/{b}".format(
docker_target_name=docker_target_name, b=b
)
dst_f = "/ml-agents/{b}".format(b=b)
shutil.copyfile(src_f, dst_f)
os.chmod(dst_f, 0o775) # Make executable
except Exception as e:
logging.getLogger("mlagents.trainers").info(e)
env_path = "/ml-agents/{env_path}".format(env_path=env_path)
return env_path
def create_environment_factory(
env_path: str,
docker_target_name: Optional[str],
no_graphics: bool,
seed: Optional[int],
start_port: int,
env_args: Optional[List[str]],
) -> Callable[[int], BaseUnityEnvironment]:
if env_path is not None:
# Strip out executable extensions if passed
env_path = (
env_path.strip()
.replace(".app", "")
.replace(".exe", "")
.replace(".x86_64", "")
.replace(".x86", "")
)
docker_training = docker_target_name is not None
if docker_training and env_path is not None:
# Comments for future maintenance:
# Some OS/VM instances (e.g. COS GCP Image) mount filesystems
# with COS flag which prevents execution of the Unity scene,
# to get around this, we will copy the executable into the
# container.
# Navigate in docker path and find env_path and copy it.
env_path = prepare_for_docker_run(docker_target_name, env_path)
seed_count = 10000
seed_pool = [np.random.randint(0, seed_count) for _ in range(seed_count)]
def create_unity_environment(worker_id: int) -> UnityEnvironment:
env_seed = seed
if not env_seed:
env_seed = seed_pool[worker_id % len(seed_pool)]
return UnityEnvironment(
file_name=env_path,
worker_id=worker_id,
seed=env_seed,
docker_training=docker_training,
no_graphics=no_graphics,
base_port=start_port,
args=env_args,
)
return create_unity_environment
def main():
try:
print(
"""
▄▄▄▓▓▓▓
╓▓▓▓▓▓▓█▓▓▓▓▓
,▄▄▄m▀▀▀' ,▓▓▓▀▓▓▄ ▓▓▓ ▓▓▌
▄▓▓▓▀' ▄▓▓▀ ▓▓▓ ▄▄ ▄▄ ,▄▄ ▄▄▄▄ ,▄▄ ▄▓▓▌▄ ▄▄▄ ,▄▄
▄▓▓▓▀ ▄▓▓▀ ▐▓▓▌ ▓▓▌ ▐▓▓ ▐▓▓▓▀▀▀▓▓▌ ▓▓▓ ▀▓▓▌▀ ^▓▓▌ ╒▓▓▌
▄▓▓▓▓▓▄▄▄▄▄▄▄▄▓▓▓ ▓▀ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▄ ▓▓▌
▀▓▓▓▓▀▀▀▀▀▀▀▀▀▀▓▓▄ ▓▓ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▐▓▓
^█▓▓▓ ▀▓▓▄ ▐▓▓▌ ▓▓▓▓▄▓▓▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▓▄ ▓▓▓▓`
'▀▓▓▓▄ ^▓▓▓ ▓▓▓ └▀▀▀▀ ▀▀ ^▀▀ `▀▀ `▀▀ '▀▀ ▐▓▓▌
▀▀▀▀▓▄▄▄ ▓▓▓▓▓▓, ▓▓▓▓▀
`▀█▓▓▓▓▓▓▓▓▓▌
¬`▀▀▀█▓
"""
)
except Exception:
print("\n\n\tUnity Technologies\n")
print(get_version_string())
options = parse_command_line()
trainer_logger = logging.getLogger("mlagents.trainers")
env_logger = logging.getLogger("mlagents.envs")
trainer_logger.info(options)
if options.debug:
trainer_logger.setLevel("DEBUG")
env_logger.setLevel("DEBUG")
else:
# disable noisy warnings from tensorflow.
tf_utils.set_warnings_enabled(False)
if options.env_path is None and options.num_runs > 1:
raise TrainerError(
"It is not possible to launch more than one concurrent training session "
"when training from the editor."
)
jobs = []
run_seed = options.seed
if options.cpu:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
if options.num_runs == 1:
if options.seed == -1:
run_seed = np.random.randint(0, 10000)
run_training(0, run_seed, options, Queue())
else:
for i in range(options.num_runs):
if options.seed == -1:
run_seed = np.random.randint(0, 10000)
process_queue = Queue()
p = Process(target=run_training, args=(i, run_seed, options, process_queue))
jobs.append(p)
p.start()
# Wait for signal that environment has successfully launched
while process_queue.get() is not True:
continue
# Wait for jobs to complete. Otherwise we'll have an extra
# unhandled KeyboardInterrupt if we end early.
try:
for job in jobs:
job.join()
except KeyboardInterrupt:
pass
# For python debugger to directly run this script
if __name__ == "__main__":
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
fusesoc/main.py
|
#!/usr/bin/env python
# Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import argparse
import os
import signal
import subprocess
import sys
import warnings
from fusesoc import __version__
# Check if this is run from a local installation
fusesocdir = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
)
if os.path.exists(os.path.join(fusesocdir, "fusesoc")):
sys.path[0:0] = [fusesocdir]
import logging
from edalize import get_edatool
from fusesoc.config import Config
from fusesoc.coreconverter import convert_core
from fusesoc.coremanager import CoreManager, DependencyError
from fusesoc.edalizer import Edalizer
from fusesoc.librarymanager import Library
from fusesoc.utils import Launcher, setup_logging, yaml_fread
from fusesoc.vlnv import Vlnv
logger = logging.getLogger(__name__)
REPOS = [
("orpsoc-cores", "https://github.com/openrisc/orpsoc-cores", "old base library"),
("fusesoc-cores", "https://github.com/fusesoc/fusesoc-cores", "new base library"),
]
def _get_core(cm, name):
core = None
try:
core = cm.get_core(Vlnv(name))
except RuntimeError as e:
logger.error(str(e))
exit(1)
except DependencyError as e:
logger.error(
"'"
+ name
+ "' or any of its dependencies requires '"
+ e.value
+ "', but this core was not found"
)
exit(1)
return core
def abort_handler(signal, frame):
print("")
logger.info("****************************")
logger.info("**** FuseSoC aborted ****")
logger.info("****************************")
print("")
sys.exit(0)
signal.signal(signal.SIGINT, abort_handler)
def pgm(cm, args):
warnings.warn(
"The 'pgm' subcommand is deprecated and will be removed in the next "
"release. Use 'fusesoc run --target=synth --run' instead.",
FutureWarning,
)
do_configure = False
do_build = False
do_run = True
flags = {"target": "synth", "tool": None}
run_backend(
cm,
"build",
do_configure,
do_build,
do_run,
flags,
None,
args.system,
args.backendargs,
None,
)
def fetch(cm, args):
core = _get_core(cm, args.core)
try:
core.setup()
except RuntimeError as e:
logger.error("Failed to fetch '{}': {}".format(core.name, str(e)))
exit(1)
def init(cm, args):
warnings.warn(
"The 'init' subcommand is deprecated and will be removed in the next "
"release. It was intended to fetch the FuseSoC standard library. This can be done with 'fusesoc library add fusesoc_cores https://github.com/fusesoc/fusesoc-cores' instead.",
FutureWarning,
)
# Fix Python 2.x.
global input
try:
input = raw_input
except NameError:
pass
xdg_config_home = os.environ.get("XDG_CONFIG_HOME") or os.path.join(
os.path.expanduser("~"), ".config"
)
config_file = os.path.join(xdg_config_home, "fusesoc", "fusesoc.conf")
if os.path.exists(config_file):
logger.warning("'{}' already exists. Aborting".format(config_file))
exit(1)
# TODO. Prepend cores_root to file if it doesn't exist
f = open(config_file, "w+")
else:
logger.info("Writing configuration file to '{}'".format(config_file))
if not os.path.exists(os.path.dirname(config_file)):
os.makedirs(os.path.dirname(config_file))
f = open(config_file, "w+")
config = Config(file=f)
_repo_paths = []
for repo in REPOS:
name = repo[0]
uri = repo[1]
default_dir = os.path.join(cm._lm.library_root, name)
prompt = "Directory to use for {} ({}) [{}] : "
if args.y:
location = None
else:
location = input(prompt.format(repo[0], repo[2], default_dir))
if not location:
location = default_dir
if os.path.exists(location):
logger.warning(
"'{}' already exists. This library will not be added to fusesoc.conf".format(
location
)
)
# TODO: Prompt for overwrite
else:
logger.info("Initializing {}".format(name))
try:
library = Library(name, location, "git", uri, None, True)
config.add_library(library)
except RuntimeError as e:
logger.error("Init failed: " + str(e))
exit(1)
logger.info("FuseSoC is ready to use!")
def list_paths(cm, args):
cores_root = [x.location for x in cm.get_libraries()]
print("\n".join(cores_root))
def add_library(cm, args):
sync_uri = vars(args)["sync-uri"]
if args.location:
location = args.location
elif vars(args).get("global", False):
location = os.path.join(cm._lm.library_root, args.name)
else:
location = os.path.join("fusesoc_libraries", args.name)
sync_type = vars(args).get("sync-type")
sync_version = vars(args).get("sync-version")
# Check if it's a dir. Otherwise fall back to git repo
if not sync_type:
if os.path.isdir(sync_uri):
sync_type = "local"
else:
sync_type = "git"
if sync_type == "local":
logger.info(
"Interpreting sync-uri '{}' as location for local provider.".format(
sync_uri
)
)
location = os.path.abspath(sync_uri)
auto_sync = not args.no_auto_sync
library = Library(args.name, location, sync_type, sync_uri, sync_version, auto_sync)
if args.config:
config = Config(file=args.config)
elif vars(args)["global"]:
xdg_config_home = os.environ.get("XDG_CONFIG_HOME") or os.path.join(
os.path.expanduser("~"), ".config"
)
config_file = os.path.join(xdg_config_home, "fusesoc", "fusesoc.conf")
config = Config(path=config_file)
else:
config = Config(path="fusesoc.conf")
try:
config.add_library(library)
except RuntimeError as e:
logger.error("`add library` failed: " + str(e))
exit(1)
def library_list(cm, args):
lengths = [4, 8, 9, 8, 12, 9]
for lib in cm.get_libraries():
lengths[0] = max(lengths[0], len(lib.name))
lengths[1] = max(lengths[1], len(lib.location))
lengths[2] = max(lengths[2], len(lib.sync_type))
lengths[3] = max(lengths[3], len(lib.sync_uri or ""))
lengths[4] = max(lengths[4], len(lib.sync_version))
print(
"{} : {} : {} : {} : {} : {}".format(
"Name".ljust(lengths[0]),
"Location".ljust(lengths[1]),
"Sync type".ljust(lengths[2]),
"Sync URI".ljust(lengths[3]),
"Sync version".ljust(lengths[4]),
"Auto sync".ljust(lengths[5]),
)
)
for lib in cm.get_libraries():
print(
"{} : {} : {} : {} : {} : {}".format(
lib.name.ljust(lengths[0]),
lib.location.ljust(lengths[1]),
lib.sync_type.ljust(lengths[2]),
(lib.sync_uri or "N/A").ljust(lengths[3]),
(lib.sync_version or "(none)").ljust(lengths[4]),
("y" if lib.auto_sync else "n").ljust(lengths[5]),
)
)
def list_cores(cm, args):
cores = cm.get_cores()
print("\nAvailable cores:\n")
if not cores:
cores_root = cm.get_libraries()
if cores_root:
logger.error("No cores found in any library")
else:
logger.error("No libraries registered")
exit(1)
maxlen = max(map(len, cores.keys()))
print("Core".ljust(maxlen) + " Cache status")
print("=" * 80)
for name in sorted(cores.keys()):
core = cores[name]
print(name.ljust(maxlen) + " : " + core.cache_status())
def gen_list(cm, args):
cores = cm.get_generators()
if not cores:
print("\nNo available generators\n")
else:
print("\nAvailable generators:\n")
maxlen = max(map(len, cores.keys()))
print("Core".ljust(maxlen) + " Generator")
print("=" * (maxlen + 12))
for core in sorted(cores.keys()):
for generator_name, generator_data in cores[core].items():
print(
"{} : {} : {}".format(
core.ljust(maxlen),
generator_name,
generator_data.description or "<No description>",
)
)
def gen_show(cm, args):
cores = cm.get_generators()
for core in sorted(cores.keys()):
for generator_name, generator_data in cores[core].items():
if generator_name == args.generator:
print(
"""
Core : {}
Generator : {}
Description : {}
Usage :
{}""".format(
core,
generator_name,
generator_data.description or "<No description>",
generator_data.usage or "",
)
)
def migrate_capi1_to_capi2(cm, args):
if not args.nowarn:
logger.warning(
"The CAPI1 -> CAPI2 core file conversion is best effort\n"
"and does not work in all situations. Carefully check the results\n"
"and manually fix problems. Refer to the CAPI2 reference manual at\n"
"https://fusesoc.readthedocs.io/en/master/ref/capi2.html for help."
)
if args.inplace:
output_file = args.input
if args.output:
logger.error(
"Argument error: --output and --inplace are mutually exclusive."
)
sys.exit(1)
convert_core(args.input, output_file)
def core_info(cm, args):
core = _get_core(cm, args.core)
print(core.info())
def run(cm, args):
stages = (args.setup, args.build, args.run)
# Always run setup if build is true
args.setup |= args.build
# Run all stages by default if no stage flags are set
if stages == (False, False, False):
do_configure = True
do_build = True
do_run = True
elif stages == (True, False, True):
logger.error("Configure and run without build is invalid")
exit(1)
else:
do_configure = args.setup
do_build = args.build
do_run = args.run
flags = {"tool": args.tool, "target": args.target}
for flag in args.flag:
if flag[0] == "+":
flags[flag[1:]] = True
elif flag[0] == "-":
flags[flag[1:]] = False
else:
flags[flag] = True
run_backend(
cm,
not args.no_export,
do_configure,
do_build,
do_run,
flags,
args.system_name,
args.system,
args.backendargs,
args.build_root,
)
def run_backend(
cm,
export,
do_configure,
do_build,
do_run,
flags,
system_name,
system,
backendargs,
build_root_arg,
):
tool_error = (
"No tool was supplied on command line or found in '{}' core description"
)
core = _get_core(cm, system)
try:
tool = core.get_tool(flags)
except SyntaxError as e:
logger.error(str(e))
exit(1)
if not tool:
logger.error(tool_error.format(system))
exit(1)
flags["tool"] = tool
build_root = build_root_arg or os.path.join(
cm.config.build_root, core.name.sanitized_name
)
logger.debug("Setting build_root to {}".format(build_root))
if export:
export_root = os.path.join(build_root, "src")
else:
export_root = None
try:
work_root = os.path.join(build_root, core.get_work_root(flags))
except SyntaxError as e:
logger.error(e.msg)
exit(1)
eda_api_file = os.path.join(work_root, core.name.sanitized_name + ".eda.yml")
if not os.path.exists(eda_api_file):
do_configure = True
try:
backend_class = get_edatool(tool)
except ImportError:
logger.error("Backend {!r} not found".format(tool))
exit(1)
edalizer = Edalizer(
toplevel=core.name,
flags=flags,
core_manager=cm,
cache_root=cm.config.cache_root,
work_root=work_root,
export_root=export_root,
system_name=system_name,
)
if do_configure:
try:
edalizer.run()
edam = edalizer.edalize
parsed_args = edalizer.parse_args(backend_class, backendargs, edam)
edalizer.add_parsed_args(backend_class, parsed_args)
except SyntaxError as e:
logger.error(e.msg)
exit(1)
except RuntimeError as e:
logger.error("Setup failed : {}".format(str(e)))
exit(1)
edalizer.to_yaml(eda_api_file)
else:
edam = yaml_fread(eda_api_file)
parsed_args = edalizer.parse_args(backend_class, backendargs, edam)
# Frontend/backend separation
try:
backend = backend_class(edam=edam, work_root=work_root)
except RuntimeError as e:
logger.error(str(e))
exit(1)
except FileNotFoundError as e:
logger.error('Could not find EDA API file "{}"'.format(e.filename))
exit(1)
if do_configure:
try:
backend.configure([])
print("")
except RuntimeError as e:
logger.error("Failed to configure the system")
logger.error(str(e))
exit(1)
if do_build:
try:
backend.build()
except RuntimeError as e:
logger.error("Failed to build {} : {}".format(str(core.name), str(e)))
exit(1)
if do_run:
try:
backend.run(parsed_args)
except RuntimeError as e:
logger.error("Failed to run {} : {}".format(str(core.name), str(e)))
exit(1)
def update(cm, args):
if "warn" in args:
logger.warning(args.warn)
cm._lm.update(args.libraries)
def init_logging(verbose, monochrome, log_file=None):
level = logging.DEBUG if verbose else logging.INFO
setup_logging(level, monochrome, log_file)
if verbose:
logger.debug("Verbose output")
else:
logger.debug("Concise output")
if monochrome:
logger.debug("Monochrome output")
else:
logger.debug("Colorful output")
def init_coremanager(config, args_cores_root):
logger.debug("Initializing core manager")
cm = CoreManager(config)
args_libs = [Library(acr, acr) for acr in args_cores_root]
# Add libraries from config file, env var and command-line
for library in config.libraries + args_libs:
try:
cm.add_library(library)
except (RuntimeError, OSError) as e:
_s = "Failed to register library '{}'"
logger.warning(_s.format(str(e)))
return cm
def get_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
# Global actions
parser.add_argument(
"--version",
help="Display the FuseSoC version",
action="version",
version=__version__,
)
# Global options
parser.add_argument(
"--cores-root",
help="Add additional directories containing cores",
default=[],
action="append",
)
parser.add_argument(
"--config", help="Specify the config file to use", type=argparse.FileType("r")
)
parser.add_argument(
"--monochrome",
help="Don't use color for messages",
action="store_true",
default=not sys.stdout.isatty(),
)
parser.add_argument("--verbose", help="More info messages", action="store_true")
parser.add_argument("--log-file", help="Write log messages to file")
# init subparser
parser_init = subparsers.add_parser(
"init", help="Initialize the FuseSoC core libraries. DEPRECATED"
)
parser_init.add_argument(
"-y", action="store_true", help="Skip user input and use default settings"
)
parser_init.set_defaults(func=init)
# pgm subparser
parser_pgm = subparsers.add_parser(
"pgm",
help="Program an FPGA with a system configuration. DEPRECATED, use 'run' instead.",
)
parser_pgm.add_argument("system")
parser_pgm.add_argument("backendargs", nargs=argparse.REMAINDER)
parser_pgm.set_defaults(func=pgm)
# fetch subparser
parser_fetch = subparsers.add_parser(
"fetch", help="Fetch a remote core and its dependencies to local cache"
)
parser_fetch.add_argument("core")
parser_fetch.set_defaults(func=fetch)
# core subparser
parser_core = subparsers.add_parser(
"core", help="Subcommands for dealing with cores"
)
core_subparsers = parser_core.add_subparsers()
parser_core.set_defaults(subparser=parser_core)
# core list subparser
parser_core_list = core_subparsers.add_parser("list", help="List available cores")
parser_core_list.set_defaults(func=list_cores)
# core show subparser
parser_core_show = core_subparsers.add_parser(
"show", help="Show information about a core"
)
parser_core_show.add_argument("core", help="Name of the core to show")
parser_core_show.set_defaults(func=core_info)
# list-cores subparser
parser_list_cores = subparsers.add_parser("list-cores", help="List available cores")
parser_list_cores.set_defaults(func=list_cores)
# migrate-capi1-to-capi2 subparser
parser_conv = subparsers.add_parser(
"migrate-capi1-to-capi2",
help="Convert a CAPI1 core file into the CAPI2 file format",
)
parser_conv.add_argument(
"input", metavar="CAPI1_FILE", type=str, help="CAPI1 input file"
)
parser_conv.add_argument("--output", "-o", type=str, help="CAPI2 output file.")
parser_conv.add_argument(
"--inplace", "-i", action="store_true", help="Convert the input file in place."
)
parser_conv.add_argument(
"--nowarn", action="store_true", help="Do not display warning banner."
)
parser_conv.set_defaults(func=migrate_capi1_to_capi2)
# core-info subparser
parser_core_info = subparsers.add_parser(
"core-info", help="Display details about a core"
)
parser_core_info.add_argument("core")
parser_core_info.set_defaults(func=core_info)
# gen subparser
parser_gen = subparsers.add_parser(
"gen", help="Run or show information about generators"
)
parser_gen.set_defaults(subparser=parser_gen)
gen_subparsers = parser_gen.add_subparsers()
# gen list subparser
parser_gen_list = gen_subparsers.add_parser(
"list", help="List available generators"
)
parser_gen_list.set_defaults(func=gen_list)
# gen show subparser
parser_gen_show = gen_subparsers.add_parser(
"show", help="Show information about a generator"
)
parser_gen_show.add_argument("generator", help="Name of the generator to show")
parser_gen_show.set_defaults(func=gen_show)
# list-paths subparser
parser_list_paths = subparsers.add_parser(
"list-paths", help="Display the search order for core root paths"
)
parser_list_paths.set_defaults(func=list_paths)
# library subparser
parser_library = subparsers.add_parser(
"library", help="Subcommands for dealing with library management"
)
library_subparsers = parser_library.add_subparsers()
parser_library.set_defaults(subparser=parser_library)
# library add subparser
parser_library_add = library_subparsers.add_parser(
"add", help="Add new library to fusesoc.conf"
)
parser_library_add.add_argument("name", help="A friendly name for the library")
parser_library_add.add_argument(
"sync-uri", help="The URI source for the library (can be a file system path)"
)
parser_library_add.add_argument(
"--sync-version",
help="Optionally specify the version of the library to use, for providers that support it",
dest="sync-version",
)
parser_library_add.add_argument(
"--location",
help="The location to store the library into (defaults to $XDG_DATA_HOME/[name])",
)
parser_library_add.add_argument(
"--sync-type",
help="The provider type for the library. Defaults to 'git'.",
choices=["git", "local"],
dest="sync-type",
)
parser_library_add.add_argument(
"--no-auto-sync",
action="store_true",
help="Disable automatic updates of the library",
)
parser_library_add.add_argument(
"--global",
action="store_true",
help="Use the global FuseSoc config file in $XDG_CONFIG_HOME/fusesoc/fusesoc.conf",
)
parser_library_add.set_defaults(func=add_library)
# library list subparser
parser_library_list = library_subparsers.add_parser(
"list", help="List core libraries"
)
parser_library_list.set_defaults(func=library_list)
# library update subparser
parser_library_update = library_subparsers.add_parser(
"update", help="Update the FuseSoC core libraries"
)
parser_library_update.add_argument(
"libraries", nargs="*", help="The libraries to update (defaults to all)"
)
parser_library_update.set_defaults(func=update)
# run subparser
parser_run = subparsers.add_parser("run", help="Start a tool flow")
parser_run.add_argument(
"--no-export",
action="store_true",
help="Reference source files from their current location instead of exporting to a build tree",
)
parser_run.add_argument(
"--build-root", help="Output directory for build. Defaults to build/$VLNV"
)
parser_run.add_argument("--setup", action="store_true", help="Execute setup stage")
parser_run.add_argument("--build", action="store_true", help="Execute build stage")
parser_run.add_argument("--run", action="store_true", help="Execute run stage")
parser_run.add_argument("--target", help="Override default target")
parser_run.add_argument("--tool", help="Override default tool for target")
parser_run.add_argument(
"--flag",
help="Set custom use flags. Can be specified multiple times",
action="append",
default=[],
)
parser_run.add_argument(
"--system-name", help="Override default VLNV name for system"
)
parser_run.add_argument("system", help="Select a system to operate on")
parser_run.add_argument(
"backendargs", nargs=argparse.REMAINDER, help="arguments to be sent to backend"
)
parser_run.set_defaults(func=run)
# update subparser
parser_update = subparsers.add_parser(
"update", help="Update the FuseSoC core libraries"
)
parser_update.add_argument(
"libraries",
nargs="*",
help="The libraries (or core roots) to update (defaults to all)",
)
parser_update.set_defaults(func=update)
parser_update.set_defaults(
warn="'fusesoc update' is deprecated. Use 'fusesoc library update' instead"
)
return parser
def parse_args(argv):
parser = get_parser()
args = parser.parse_args(argv)
if hasattr(args, "func"):
return args
if hasattr(args, "subparser"):
args.subparser.print_help()
else:
parser.print_help()
return None
def fusesoc(args):
init_logging(args.verbose, args.monochrome, args.log_file)
config = Config(file=args.config)
cm = init_coremanager(config, args.cores_root)
# Run the function
args.func(cm, args)
def main():
args = parse_args(sys.argv[1:])
if not args:
exit(0)
logger.debug("Command line arguments: " + str(sys.argv))
fusesoc(args)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_HOME"]
|
python
| 1 | 0 | |
21022022-3.py
|
"""
The class has been told about the following loop commands today:
Break: completely exits your loop at the point you set
Continue: goes back to the beginning of your loop at the point you set
Pass: Tells python that there is code rather than nothing
Exercise: Continue, Break, Pass commands:
-Create a list with at least 15 random entries
-Iterate over it with a for loop and add up the elements
-Create one if statement in your loop that can’t be true and
add a pass
-Include a second if statement in your loop that breaks the
loop if your sum gets bigger than a threshold ( you chose)
-Include a third if statement in your loop that uses continue if
the current entry of your list is in a specific range, eg. 50 to
60, and prints out something in all other cases (use else)
-Print out the sum and the number of
"""
# Allows our list of randomly-generated integers
import random
# Generate a list of random integers
randomlist = []
for i in range(0, 15):
n = random.randint(1, 300)
randomlist.append(n)
print(f"List generated is: {randomlist}")
# Define our function which performs a summation of the list entries
# and comments on the sixe of each integer
def list_break_pass_continue_demo(list):
# Set summation variable to starting level 0
sum = 0
# Prevent IndexError by converting length of list to range
newlist = range(len(list))
# for-loop subjects each list entry to if-else tree
for i in newlist:
sum = sum + list[i]
if list [i] < 0:
print(f"You're not going to see this one.")
pass
elif sum > 2000:
print(f"Sum exceeded 2000 at point {i + 1} ({list[i]}) so I stopped reading the list.")
break
elif 250 >= list[i] > 49:
print(f"{list[i]} is between 50 and 250.\nSum right now is {sum}")
continue
else:
print(f"{list[i]} is not between 50 and 250.\nSum right now is {sum}")
pass
# Informative message when loop is exited
print(f"Sum of list until point {i + 1} of {len(list)} is: {sum}")
# Summation/Assessment function is called
list_break_pass_continue_demo(randomlist)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "url_shortener.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
old/dccdc/train.py
|
import datetime
import math
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import dataset
import loss
import metrics
import model
import optimizer
FLAGS = flags.FLAGS
flags.DEFINE_string('model_path_prefix', 'models',
'Local path to saved models.')
def plot_ts(ts, mask):
mask = tf.cast(mask, tf.bool).numpy()
x = np.array(range(ts.shape[0]))[mask]
y = ts[mask]
with plt.xkcd():
plt.figure(figsize=(10, 3))
plt.scatter(x, y)
plt.show()
def _create_multistep_fn(model, optimizer, ds_iter, metrics, steps_n):
@tf.function
def multistep_fn():
loss_acc = 0.0
for _ in tf.range(steps_n):
ts, mask, affinity, weights = next(ds_iter)
with tf.GradientTape() as tape:
y_logits, y_mask = model((ts, mask), training=True)
constraint = loss.affinity_log_loss(affinity, y_logits, weights)
grads = tape.gradient(constraint, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
loss_acc += constraint
metrics.update_state(affinity, tf.sigmoid(y_logits))
return loss_acc / float(steps_n)
return multistep_fn
# 4 epochs
STEPS_N = int(8e6)
TS_LENGTH = 256
BATCH_N = 128
MULTISTEP_N = 4
def main(argv):
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
train_ds = dataset.create_dataset('data/ccdc_train_*.tfrecord.gz', BATCH_N)
validation_ds = dataset.create_dataset('data/ccdc_validation.tfrecord.gz',
BATCH_N)
train_iterations = math.floor(STEPS_N / BATCH_N / MULTISTEP_N)
dccdc = model.DCCDC(TS_LENGTH)
opt = optimizer.ranger_with_exp_decay(0.1, 300, 0.005,
train_iterations * MULTISTEP_N)
break_stats = metrics.BreakStats(BATCH_N, TS_LENGTH, max_breaks=64)
ds_iter = iter(train_ds)
step_fn = _create_multistep_fn(dccdc, opt, ds_iter, break_stats, MULTISTEP_N)
print(f'Training for {train_iterations} iterations.')
for iteration in range(train_iterations):
if iteration % 10 == 0:
print(break_stats.result())
print('Reset...')
break_stats.reset_state()
loss_value = step_fn()
step = opt.iterations
print('Step: {}'.format(step))
print(loss_value, opt.learning_rate(step))
print('Training complete!')
model_string_ts = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
model_name = FLAGS.model_path_prefix + '\\' + model_string_ts
dccdc.save(model_name, include_optimizer=False)
if __name__ == '__main__':
app.run(main)
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
session.go
|
package remotedialer
import (
"context"
"errors"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/gorilla/websocket"
)
type Session struct {
sync.Mutex
nextConnID int64
clientKey string
sessionKey int64
conn *wsConn
conns map[int64]*connection
remoteClientKeys map[string]map[int]bool
auth ConnectAuthorizer
pingCancel context.CancelFunc
pingWait sync.WaitGroup
dialer Dialer
client bool
}
// PrintTunnelData No tunnel logging by default
var PrintTunnelData bool
func init() {
if os.Getenv("CATTLE_TUNNEL_DATA_DEBUG") == "true" {
PrintTunnelData = true
}
}
func NewClientSession(auth ConnectAuthorizer, conn *websocket.Conn) *Session {
return NewClientSessionWithDialer(auth, conn, nil)
}
func NewClientSessionWithDialer(auth ConnectAuthorizer, conn *websocket.Conn, dialer Dialer) *Session {
return &Session{
clientKey: "client",
conn: newWSConn(conn),
conns: map[int64]*connection{},
auth: auth,
client: true,
dialer: dialer,
}
}
func newSession(sessionKey int64, clientKey string, conn *websocket.Conn) *Session {
return &Session{
nextConnID: 1,
clientKey: clientKey,
sessionKey: sessionKey,
conn: newWSConn(conn),
conns: map[int64]*connection{},
remoteClientKeys: map[string]map[int]bool{},
}
}
func (s *Session) startPings(rootCtx context.Context) {
ctx, cancel := context.WithCancel(rootCtx)
s.pingCancel = cancel
s.pingWait.Add(1)
go func() {
defer s.pingWait.Done()
t := time.NewTicker(PingWriteInterval)
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
s.conn.Lock()
if err := s.conn.conn.WriteControl(websocket.PingMessage, []byte(""), time.Now().Add(PingWaitDuration)); err != nil {
GetLogger().Errorf("Error writing ping %s", err)
}
GetLogger().Debugf("Wrote ping")
s.conn.Unlock()
}
}
}()
}
func (s *Session) stopPings() {
if s.pingCancel == nil {
return
}
s.pingCancel()
s.pingWait.Wait()
}
func (s *Session) Serve(ctx context.Context) (int, error) {
if s.client {
s.startPings(ctx)
}
for {
msType, reader, err := s.conn.NextReader()
if err != nil {
return 400, err
}
if msType != websocket.BinaryMessage {
return 400, errWrongMessageType
}
if err := s.serveMessage(ctx, reader); err != nil {
return 500, err
}
}
}
func (s *Session) serveMessage(ctx context.Context, reader io.Reader) error {
message, err := newServerMessage(reader)
if err != nil {
return err
}
if PrintTunnelData {
GetLogger().Debugf("REQUEST ", message)
}
if message.messageType == Connect {
if s.auth == nil || !s.auth(message.proto, message.address) {
return errors.New("connect not allowed")
}
s.clientConnect(ctx, message)
return nil
}
s.Lock()
if message.messageType == AddClient && s.remoteClientKeys != nil {
err := s.addRemoteClient(message.address)
s.Unlock()
return err
} else if message.messageType == RemoveClient {
err := s.removeRemoteClient(message.address)
s.Unlock()
return err
}
conn := s.conns[message.connID]
s.Unlock()
if conn == nil {
if message.messageType == Data {
err := fmt.Errorf("connection not found %s/%d/%d", s.clientKey, s.sessionKey, message.connID)
newErrorMessage(message.connID, err).WriteTo(defaultDeadline(), s.conn)
}
return nil
}
switch message.messageType {
case Data:
if err := conn.OnData(message); err != nil {
s.closeConnection(message.connID, err)
}
case Error:
s.closeConnection(message.connID, message.Err())
}
return nil
}
func defaultDeadline() time.Time {
return time.Now().Add(time.Minute)
}
func parseAddress(address string) (string, int, error) {
parts := strings.SplitN(address, "/", 2)
if len(parts) != 2 {
return "", 0, errors.New("not / separated")
}
v, err := strconv.Atoi(parts[1])
return parts[0], v, err
}
func (s *Session) addRemoteClient(address string) error {
clientKey, sessionKey, err := parseAddress(address)
if err != nil {
return fmt.Errorf("invalid remote Session %s: %v", address, err)
}
keys := s.remoteClientKeys[clientKey]
if keys == nil {
keys = map[int]bool{}
s.remoteClientKeys[clientKey] = keys
}
keys[sessionKey] = true
if PrintTunnelData {
GetLogger().Debugf("ADD REMOTE CLIENT %s, SESSION %d", address, s.sessionKey)
}
return nil
}
func (s *Session) removeRemoteClient(address string) error {
clientKey, sessionKey, err := parseAddress(address)
if err != nil {
return fmt.Errorf("invalid remote Session %s: %v", address, err)
}
keys := s.remoteClientKeys[clientKey]
delete(keys, int(sessionKey))
if len(keys) == 0 {
delete(s.remoteClientKeys, clientKey)
}
if PrintTunnelData {
GetLogger().Debugf("REMOVE REMOTE CLIENT %s, SESSION %d", address, s.sessionKey)
}
return nil
}
func (s *Session) closeConnection(connID int64, err error) {
s.Lock()
conn := s.conns[connID]
delete(s.conns, connID)
if PrintTunnelData {
GetLogger().Debugf("CONNECTIONS %d %d", s.sessionKey, len(s.conns))
}
s.Unlock()
if conn != nil {
conn.tunnelClose(err)
}
}
func (s *Session) clientConnect(ctx context.Context, message *message) {
conn := newConnection(message.connID, s, message.proto, message.address)
s.Lock()
s.conns[message.connID] = conn
if PrintTunnelData {
GetLogger().Debugf("CONNECTIONS %d %d", s.sessionKey, len(s.conns))
}
s.Unlock()
go clientDial(ctx, s.dialer, conn, message)
}
type connResult struct {
conn net.Conn
err error
}
func (s *Session) Dial(ctx context.Context, proto, address string) (net.Conn, error) {
return s.serverConnectContext(ctx, proto, address)
}
func (s *Session) serverConnectContext(ctx context.Context, proto, address string) (net.Conn, error) {
deadline, ok := ctx.Deadline()
if ok {
return s.serverConnect(deadline, proto, address)
}
result := make(chan connResult, 1)
go func() {
c, err := s.serverConnect(defaultDeadline(), proto, address)
result <- connResult{conn: c, err: err}
}()
select {
case <-ctx.Done():
// We don't want to orphan an open connection so we wait for the result and immediately close it
go func() {
r := <-result
if r.err == nil {
r.conn.Close()
}
}()
return nil, ctx.Err()
case r := <-result:
return r.conn, r.err
}
}
func (s *Session) serverConnect(deadline time.Time, proto, address string) (net.Conn, error) {
connID := atomic.AddInt64(&s.nextConnID, 1)
conn := newConnection(connID, s, proto, address)
s.Lock()
s.conns[connID] = conn
if PrintTunnelData {
GetLogger().Debugf("CONNECTIONS %d %d", s.sessionKey, len(s.conns))
}
s.Unlock()
_, err := s.writeMessage(deadline, newConnect(connID, proto, address))
if err != nil {
s.closeConnection(connID, err)
return nil, err
}
return conn, err
}
func (s *Session) writeMessage(deadline time.Time, message *message) (int, error) {
if PrintTunnelData {
GetLogger().Debugf("WRITE %v", message)
}
return message.WriteTo(deadline, s.conn)
}
func (s *Session) Close() {
s.Lock()
defer s.Unlock()
s.stopPings()
for _, connection := range s.conns {
connection.tunnelClose(errors.New("tunnel disconnect"))
}
s.conns = map[int64]*connection{}
}
func (s *Session) sessionAdded(clientKey string, sessionKey int64) {
client := fmt.Sprintf("%s/%d", clientKey, sessionKey)
_, err := s.writeMessage(time.Time{}, newAddClient(client))
if err != nil {
s.conn.conn.Close()
}
}
func (s *Session) sessionRemoved(clientKey string, sessionKey int64) {
client := fmt.Sprintf("%s/%d", clientKey, sessionKey)
_, err := s.writeMessage(time.Time{}, newRemoveClient(client))
if err != nil {
s.conn.conn.Close()
}
}
|
[
"\"CATTLE_TUNNEL_DATA_DEBUG\""
] |
[] |
[
"CATTLE_TUNNEL_DATA_DEBUG"
] |
[]
|
["CATTLE_TUNNEL_DATA_DEBUG"]
|
go
| 1 | 0 | |
cmd/kaniko-gcr/main.go
|
package main
import (
"fmt"
"io/ioutil"
"os"
"github.com/joho/godotenv"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
kaniko "github.com/drone/drone-kaniko"
)
const (
// GCR JSON key file path
gcrKeyPath string = "/kaniko/config.json"
gcrEnvVariable string = "GOOGLE_APPLICATION_CREDENTIALS"
)
var (
version = "unknown"
)
func main() {
// Load env-file if it exists first
if env := os.Getenv("PLUGIN_ENV_FILE"); env != "" {
godotenv.Load(env)
}
app := cli.NewApp()
app.Name = "kaniko gcr plugin"
app.Usage = "kaniko gcr plugin"
app.Action = run
app.Version = version
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "dockerfile",
Usage: "build dockerfile",
Value: "Dockerfile",
EnvVar: "PLUGIN_DOCKERFILE",
},
cli.StringFlag{
Name: "context",
Usage: "build context",
Value: ".",
EnvVar: "PLUGIN_CONTEXT",
},
cli.StringSliceFlag{
Name: "tags",
Usage: "build tags",
Value: &cli.StringSlice{"latest"},
EnvVar: "PLUGIN_TAGS",
FilePath: ".tags",
},
cli.StringSliceFlag{
Name: "args",
Usage: "build args",
EnvVar: "PLUGIN_BUILD_ARGS",
},
cli.StringFlag{
Name: "target",
Usage: "build target",
EnvVar: "PLUGIN_TARGET",
},
cli.StringFlag{
Name: "repo",
Usage: "gcr repository",
EnvVar: "PLUGIN_REPO",
},
cli.StringSliceFlag{
Name: "custom-labels",
Usage: "additional k=v labels",
EnvVar: "PLUGIN_CUSTOM_LABELS",
},
cli.StringFlag{
Name: "registry",
Usage: "gcr registry",
Value: "gcr.io",
EnvVar: "PLUGIN_REGISTRY",
},
cli.StringFlag{
Name: "json-key",
Usage: "docker username",
EnvVar: "PLUGIN_JSON_KEY",
},
cli.StringFlag{
Name: "snapshot-mode",
Usage: "Specify one of full, redo or time as snapshot mode",
EnvVar: "PLUGIN_SNAPSHOT_MODE",
},
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}
func run(c *cli.Context) error {
err := setupGCRAuth(c.String("json-key"))
if err != nil {
return err
}
if c.String("repo") == "" {
return fmt.Errorf("repo must be specified")
}
plugin := kaniko.Plugin{
Build: kaniko.Build{
Dockerfile: c.String("dockerfile"),
Context: c.String("context"),
Tags: c.StringSlice("tags"),
Args: c.StringSlice("args"),
Target: c.String("target"),
Repo: fmt.Sprintf("%s/%s", c.String("registry"), c.String("repo")),
Labels: c.StringSlice("custom-labels"),
SnapshotMode: c.String("snapshot-mode"),
},
}
return plugin.Exec()
}
func setupGCRAuth(jsonKey string) error {
if jsonKey == "" {
return fmt.Errorf("GCR JSON key must be specified")
}
err := ioutil.WriteFile(gcrKeyPath, []byte(jsonKey), 0644)
if err != nil {
return errors.Wrap(err, "failed to write GCR JSON key")
}
err = os.Setenv(gcrEnvVariable, gcrKeyPath)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to set %s environment variable", gcrEnvVariable))
}
return nil
}
|
[
"\"PLUGIN_ENV_FILE\""
] |
[] |
[
"PLUGIN_ENV_FILE"
] |
[]
|
["PLUGIN_ENV_FILE"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"encoding/json"
"fmt"
coinapi "github.com/omaribrown/coinalert/data"
"github.com/omaribrown/coinalert/slack"
"github.com/robfig/cron"
"io/ioutil"
"log"
"net/http"
"os"
)
func main() {
port := os.Getenv("PORT")
http.HandleFunc("/", RootHandler)
go coinToSlack()
log.Fatal(http.ListenAndServe(":"+port, nil))
//log.Fatal(http.ListenAndServe(":8080", nil))
}
func RootHandler(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
fmt.Fprintln(w, err)
}
fmt.Fprintln(w, string(body))
}
func coinToSlack() {
//envErr := godotenv.Load(".env")
//if envErr != nil {
// fmt.Printf("Could not load .env file")
// os.Exit(1)
//}
CoinAPIKey := os.Getenv("API_KEY")
coinapi := &coinapi.Coinapi{
API_KEY: CoinAPIKey,
Client: &http.Client{},
}
slackService := &slack.SlackService{
SlackToken: os.Getenv("SLACK_AUTH_TOKEN"),
SlackChannelID: os.Getenv("SLACK_CHANNEL_ID"),
}
c := cron.New()
fmt.Println("starting cron job")
c.AddFunc("@every 1m", func() {
ohlvcLatest := coinapi.GetCoinLatest("BTC/USD", "1MIN", "1")
remarshal, err := json.Marshal(ohlvcLatest)
if err != nil {
panic(err)
}
stringData := string(remarshal)
slackService.SendSlackMessage(slack.SlackMessage{
Pretext: "Incoming crypto data...",
Text: stringData,
})
})
c.Start()
select {}
}
// Period ID's:
// Second 1SEC, 2SEC, 3SEC, 4SEC, 5SEC, 6SEC, 10SEC, 15SEC, 20SEC, 30SEC
// Minute 1MIN, 2MIN, 3MIN, 4MIN, 5MIN, 6MIN, 10MIN, 15MIN, 20MIN, 30MIN
// Hour 1HRS, 2HRS, 3HRS, 4HRS, 6HRS, 8HRS, 12HRS
// Day 1DAY, 2DAY, 3DAY, 5DAY, 7DAY, 10DAY
// Month 1MTH, 2MTH, 3MTH, 4MTH, 6MTH
// Year 1YRS, 2YRS, 3YRS, 4YRS, 5YRS
|
[
"\"PORT\"",
"\"API_KEY\"",
"\"SLACK_AUTH_TOKEN\"",
"\"SLACK_CHANNEL_ID\""
] |
[] |
[
"PORT",
"API_KEY",
"SLACK_CHANNEL_ID",
"SLACK_AUTH_TOKEN"
] |
[]
|
["PORT", "API_KEY", "SLACK_CHANNEL_ID", "SLACK_AUTH_TOKEN"]
|
go
| 4 | 0 | |
rlunch/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rlunch.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
infrastructure/db/db.go
|
package db
import (
"log"
"os"
"path/filepath"
"runtime"
"github.com/GuiFerrari/codepix-go/domain/model"
"github.com/jinzhu/gorm"
"github.com/joho/godotenv"
_ "github.com/lib/pq"
_ "gorm.io/driver/sqlite"
)
func init() {
_, b, _, _ := runtime.Caller(0)
basepath := filepath.Dir(b)
err := godotenv.Load(basepath + "/../../.env")
if err != nil {
log.Fatalf("Error loading .env files")
}
}
func ConnectDB(env string) *gorm.DB {
var dsn string
var db *gorm.DB
var err error
if env != "test" {
dsn = os.Getenv("dsn")
db, err = gorm.Open(os.Getenv("dbType"), dsn)
} else {
dsn = os.Getenv("dsnTest")
db, err = gorm.Open(os.Getenv("dbTypeTest"), dsn)
}
if err != nil {
log.Fatalf("Error connecting to database: %v", err)
panic(err)
}
if os.Getenv("debug") == "true" {
db.LogMode(true)
}
if os.Getenv("AutoMigrateDb") == "true" {
db.AutoMigrate(&model.Bank{}, &model.Account{}, &model.PixKey{}, &model.Transaction{})
}
return db
}
|
[
"\"dsn\"",
"\"dbType\"",
"\"dsnTest\"",
"\"dbTypeTest\"",
"\"debug\"",
"\"AutoMigrateDb\""
] |
[] |
[
"dbTypeTest",
"debug",
"dsn",
"dsnTest",
"AutoMigrateDb",
"dbType"
] |
[]
|
["dbTypeTest", "debug", "dsn", "dsnTest", "AutoMigrateDb", "dbType"]
|
go
| 6 | 0 | |
code/utilities.py
|
import os
import sys
import json
import logging
import pathlib
from logging.handlers import RotatingFileHandler
## Config
CONFIG_OPTIONS = {} # This'll be populated on import
CONFIG_NAME = "config.json" # The name of the config file
DEV_CONFIG_NAME = "config.dev.json" # The name of the dev config file (overrides properties stored in the normal and prod config files)
PROD_CONFIG_NAME = "config.prod.json" # The name of the prod config file (overrides properties stored in the normal config file)
DIRS_FROM_ROOT = 1 # How many directories away this script is from the root
PLATFORM = sys.platform
def get_root_path():
## -1 includes this script itself in the realpath
return os.sep.join(os.path.realpath(__file__).split(os.path.sep)[:(-1 - DIRS_FROM_ROOT)])
def load_json(path):
with open(path) as fd:
return json.load(fd)
def load_config():
config_path = pathlib.Path(os.sep.join([get_root_path(), CONFIG_NAME]))
if (not config_path.exists()):
raise RuntimeError("Unable to find config.json file in root!")
config = load_json(config_path)
## Override the config values if the prod config file exists.
prod_config_path = pathlib.Path(os.sep.join([get_root_path(), PROD_CONFIG_NAME]))
if (prod_config_path.exists()):
prod_config = load_json(prod_config_path)
for key, value in prod_config.items():
config[key] = value
## Override the config values if the dev config file exists.
dev_config_path = pathlib.Path(os.sep.join([get_root_path(), DEV_CONFIG_NAME]))
if (dev_config_path.exists()):
dev_config = load_json(dev_config_path)
for key, value in dev_config.items():
config[key] = value
return config
def is_linux():
return ("linux" in PLATFORM)
def is_windows():
return ("win" in PLATFORM)
def initialize_logging(logger):
FORMAT = "%(asctime)s - %(module)s - %(funcName)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(FORMAT)
logging.basicConfig(format=FORMAT)
log_level = str(CONFIG_OPTIONS.get("log_level", "DEBUG"))
if (log_level == "DEBUG"):
logger.setLevel(logging.DEBUG)
elif (log_level == "INFO"):
logger.setLevel(logging.INFO)
elif (log_level == "WARNING"):
logger.setLevel(logging.WARNING)
elif (log_level == "ERROR"):
logger.setLevel(logging.ERROR)
elif (log_level == "CRITICAL"):
logger.setLevel(logging.CRITICAL)
else:
logger.setLevel(logging.DEBUG)
## Get the directory containing the logs and make sure it exists, creating it if it doesn't
log_path = CONFIG_OPTIONS.get("log_path")
if (not log_path):
log_path = os.path.sep.join([get_root_path(), "logs"]) # Default logs to a 'logs' folder inside the hawking directory
pathlib.Path(log_path).mkdir(parents=True, exist_ok=True) # Basically a mkdir -p $log_path
log_file = os.path.sep.join([log_path, "clipster.log"]) # Build the true path to the log file
## Setup and add the rotating log handler to the logger
max_bytes = CONFIG_OPTIONS.get("log_max_bytes", 1024 * 1024 * 10) # 10 MB
backup_count = CONFIG_OPTIONS.get("log_backup_count", 10)
rotating_log_handler = RotatingFileHandler(log_file, maxBytes=max_bytes, backupCount=backup_count)
rotating_log_handler.setFormatter(formatter)
logger.addHandler(rotating_log_handler)
return logger
os.environ = {}
CONFIG_OPTIONS = load_config()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
plugins/inputs/fail2ban/fail2ban_test.go
|
package fail2ban
import (
"fmt"
"os"
"os/exec"
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
)
// By all rights, we should use `string literal`, but the string contains "`".
var execStatusOutput = "Status\n" +
"|- Number of jail:\t3\n" +
"`- Jail list:\tdovecot, postfix, sshd"
var execStatusDovecotOutput = "Status for the jail: dovecot\n" +
"|- Filter\n" +
"| |- Currently failed:\t11\n" +
"| |- Total failed:\t22\n" +
"| `- File list:\t/var/log/maillog\n" +
"`- Actions\n" +
" |- Currently banned:\t0\n" +
" |- Total banned:\t100\n" +
" `- Banned IP list:"
var execStatusPostfixOutput = "Status for the jail: postfix\n" +
"|- Filter\n" +
"| |- Currently failed:\t4\n" +
"| |- Total failed:\t10\n" +
"| `- File list:\t/var/log/maillog\n" +
"`- Actions\n" +
" |- Currently banned:\t3\n" +
" |- Total banned:\t60\n" +
" `- Banned IP list:\t192.168.10.1 192.168.10.3"
var execStatusSshdOutput = "Status for the jail: sshd\n" +
"|- Filter\n" +
"| |- Currently failed:\t0\n" +
"| |- Total failed:\t5\n" +
"| `- File list:\t/var/log/secure\n" +
"`- Actions\n" +
" |- Currently banned:\t2\n" +
" |- Total banned:\t50\n" +
" `- Banned IP list:\t192.168.0.1 192.168.1.1"
func TestGather(t *testing.T) {
f := Fail2ban{
path: "/usr/bin/fail2ban-client",
}
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
err := f.Gather(&acc)
if err != nil {
t.Fatal(err)
}
fields1 := map[string]interface{}{
"banned": 2,
"failed": 0,
}
tags1 := map[string]string{
"jail": "sshd",
}
fields2 := map[string]interface{}{
"banned": 3,
"failed": 4,
}
tags2 := map[string]string{
"jail": "postfix",
}
fields3 := map[string]interface{}{
"banned": 0,
"failed": 11,
}
tags3 := map[string]string{
"jail": "dovecot",
}
acc.AssertContainsTaggedFields(t, "fail2ban", fields1, tags1)
acc.AssertContainsTaggedFields(t, "fail2ban", fields2, tags2)
acc.AssertContainsTaggedFields(t, "fail2ban", fields3, tags3)
}
func fakeExecCommand(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
func TestHelperProcess(_ *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
args := os.Args
cmd, args := args[3], args[4:]
if !strings.HasSuffix(cmd, "fail2ban-client") {
//nolint:errcheck,revive // Test will fail anyway
fmt.Fprint(os.Stdout, "command not found")
//nolint:revive // os.Exit called intentionally
os.Exit(1)
}
if len(args) == 1 && args[0] == "status" {
//nolint:errcheck,revive // Test will fail anyway
fmt.Fprint(os.Stdout, execStatusOutput)
//nolint:revive // os.Exit called intentionally
os.Exit(0)
} else if len(args) == 2 && args[0] == "status" {
if args[1] == "sshd" {
//nolint:errcheck,revive // Test will fail anyway
fmt.Fprint(os.Stdout, execStatusSshdOutput)
//nolint:revive // os.Exit called intentionally
os.Exit(0)
} else if args[1] == "postfix" {
//nolint:errcheck,revive // Test will fail anyway
fmt.Fprint(os.Stdout, execStatusPostfixOutput)
//nolint:revive // os.Exit called intentionally
os.Exit(0)
} else if args[1] == "dovecot" {
//nolint:errcheck,revive // Test will fail anyway
fmt.Fprint(os.Stdout, execStatusDovecotOutput)
//nolint:revive // os.Exit called intentionally
os.Exit(0)
}
}
//nolint:errcheck,revive // Test will fail anyway
fmt.Fprint(os.Stdout, "invalid argument")
//nolint:revive // os.Exit called intentionally
os.Exit(1)
}
|
[
"\"GO_WANT_HELPER_PROCESS\""
] |
[] |
[
"GO_WANT_HELPER_PROCESS"
] |
[]
|
["GO_WANT_HELPER_PROCESS"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
//"io"
"fmt"
"os"
"log"
"github.com/docker/docker/client"
"github.com/docker/docker/api/types"
//"github.com/docker/docker/api/types/container"
"golang.org/x/net/context"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"strings"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func describe_nodes(role string, states []string, leader string) []string {
sess := session.Must(session.NewSession())
//awsRegion := "us-east-1"
awsRegion := "us-west-2"
svc := ec2.New(sess, &aws.Config{Region: aws.String(awsRegion)})
//fmt.Printf("listing instances with tag %v in: %v\n", nameFilter, awsRegion)
for _, stat := range states {
fmt.Println("state: " + stat)
}
filters := []*ec2.Filter{
{
Name: aws.String("tag:Name"),
Values: []*string{aws.String(role)},
},
{
Name: aws.String("instance-state-name"),
Values: []*string{aws.String("running")},
},
{
Name: aws.String("tag:Init"),
Values: []*string{
aws.String(strings.Join([]string{leader}, "")),
},
},
}
params := &ec2.DescribeInstancesInput{Filters: filters}
resp, err := svc.DescribeInstances(params)
if err != nil {
fmt.Println("There was an error listing instances in", awsRegion, err.Error())
log.Fatal(err.Error())
}
var instancesIds []string
for _, reservation := range resp.Reservations {
for _, instance := range reservation.Instances {
instancesIds = append(instancesIds, *instance.InstanceId)
}
}
//fmt.Println(instancesIds)
return instancesIds
}
func main() {
// Obteniendo variables de entorno
role := os.Getenv("ROLE")
//bm_env := os.Getenv("BMENV")
//current_instance := os.Getenv("INSTANCE")
fmt.Println(role)
//fmt.Println(bm_env)
//fmt.Println(current_instance)
// Iniciando cli docker
ctx := context.Background()
cli, err := client.NewEnvClient()
check(err)
cli.ContainerList(ctx, types.ContainerListOptions{})
managers_running := describe_nodes("manager", []string{"running"}, "true")
fmt.Printf("%+v\n", managers_running)
managers_replaced := describe_nodes("manager", []string{"shutting-down", "stopped", "terminated", "stopping"}, "true")
fmt.Printf("%+v\n", managers_replaced)
worker_replaced := describe_nodes("worker", []string{"shutting-down", "stopped", "terminated", "stopping"}, "false")
fmt.Printf("%+v\n", worker_replaced)
}
|
[
"\"ROLE\"",
"\"BMENV\"",
"\"INSTANCE\""
] |
[] |
[
"ROLE",
"BMENV",
"INSTANCE"
] |
[]
|
["ROLE", "BMENV", "INSTANCE"]
|
go
| 3 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "heroku_blog.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/cmd/compile/internal/ssa/debug_test.go
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa_test
import (
"bytes"
"flag"
"fmt"
"internal/testenv"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
"time"
)
var update = flag.Bool("u", false, "update test reference files")
var verbose = flag.Bool("v", false, "print debugger interactions (very verbose)")
var dryrun = flag.Bool("n", false, "just print the command line and first debugging bits")
var useDelve = flag.Bool("d", false, "use Delve (dlv) instead of gdb, use dlv reverence files")
var force = flag.Bool("f", false, "force run under not linux-amd64; also do not use tempdir")
var repeats = flag.Bool("r", false, "detect repeats in debug steps and don't ignore them")
var inlines = flag.Bool("i", false, "do inlining for gdb (makes testing flaky till inlining info is correct)")
var hexRe = regexp.MustCompile("0x[a-zA-Z0-9]+")
var numRe = regexp.MustCompile("-?[0-9]+")
var stringRe = regexp.MustCompile("\"([^\\\"]|(\\.))*\"")
var leadingDollarNumberRe = regexp.MustCompile("^[$][0-9]+")
var optOutGdbRe = regexp.MustCompile("[<]optimized out[>]")
var numberColonRe = regexp.MustCompile("^ *[0-9]+:")
var gdb = "gdb" // Might be "ggdb" on Darwin, because gdb no longer part of XCode
var debugger = "gdb" // For naming files, etc.
var gogcflags = os.Getenv("GO_GCFLAGS")
// optimizedLibs usually means "not running in a noopt test builder".
var optimizedLibs = (!strings.Contains(gogcflags, "-N") && !strings.Contains(gogcflags, "-l"))
// TestNexting go-builds a file, then uses a debugger (default gdb, optionally delve)
// to next through the generated executable, recording each line landed at, and
// then compares those lines with reference file(s).
// Flag -u updates the reference file(s).
// Flag -d changes the debugger to delve (and uses delve-specific reference files)
// Flag -v is ever-so-slightly verbose.
// Flag -n is for dry-run, and prints the shell and first debug commands.
//
// Because this test (combined with existing compiler deficiencies) is flaky,
// for gdb-based testing by default inlining is disabled
// (otherwise output depends on library internals)
// and for both gdb and dlv by default repeated lines in the next stream are ignored
// (because this appears to be timing-dependent in gdb, and the cleanest fix is in code common to gdb and dlv).
//
// Also by default, any source code outside of .../testdata/ is not mentioned
// in the debugging histories. This deals both with inlined library code once
// the compiler is generating clean inline records, and also deals with
// runtime code between return from main and process exit. This is hidden
// so that those files (in the runtime/library) can change without affecting
// this test.
//
// These choices can be reversed with -i (inlining on) and -r (repeats detected) which
// will also cause their own failures against the expected outputs. Note that if the compiler
// and debugger were behaving properly, the inlined code and repeated lines would not appear,
// so the expected output is closer to what we hope to see, though it also encodes all our
// current bugs.
//
// The file being tested may contain comments of the form
// //DBG-TAG=(v1,v2,v3)
// where DBG = {gdb,dlv} and TAG={dbg,opt}
// each variable may optionally be followed by a / and one or more of S,A,N,O
// to indicate normalization of Strings, (hex) addresses, and numbers.
// "O" is an explicit indication that we expect it to be optimized out.
// For example:
/*
if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A)
*/
// TODO: not implemented for Delve yet, but this is the plan
//
// After a compiler change that causes a difference in the debug behavior, check
// to see if it is sensible or not, and if it is, update the reference files with
// go test debug_test.go -args -u
// (for Delve)
// go test debug_test.go -args -u -d
func TestNexting(t *testing.T) {
skipReasons := "" // Many possible skip reasons, list all that apply
if testing.Short() {
skipReasons = "not run in short mode; "
}
testenv.MustHaveGoBuild(t)
if !*useDelve && !*force && !(runtime.GOOS == "linux" && runtime.GOARCH == "amd64") {
// Running gdb on OSX/darwin is very flaky.
// Sometimes it is called ggdb, depending on how it is installed.
// It also sometimes requires an admin password typed into a dialog box.
// Various architectures tend to differ slightly sometimes, and keeping them
// all in sync is a pain for people who don't have them all at hand,
// so limit testing to amd64 (for now)
skipReasons += "not run unless linux-amd64 or -d (delve) or -f (force); "
}
if *useDelve {
debugger = "dlv"
_, err := exec.LookPath("dlv")
if err != nil {
skipReasons += "not run because dlv (requested by -d option) not on path; "
}
} else {
_, err := exec.LookPath(gdb)
if err != nil {
if runtime.GOOS != "darwin" {
skipReasons += "not run because gdb not on path; "
} else {
// On Darwin, MacPorts installs gdb as "ggdb".
_, err = exec.LookPath("ggdb")
if err != nil {
skipReasons += "not run because gdb (and also ggdb) not on path; "
} else {
gdb = "ggdb"
}
}
}
}
if skipReasons != "" {
t.Skip(skipReasons[:len(skipReasons)-2])
}
optFlags := "" // Whatever flags are needed to test debugging of optimized code.
dbgFlags := "-N -l"
if !*useDelve && !*inlines {
// For gdb (default), disable inlining so that a compiler test does not depend on library code.
// TODO: Technically not necessary in 1.10, but it causes a largish regression that needs investigation.
optFlags += " -l"
}
subTest(t, debugger+"-dbg", "hist", dbgFlags)
subTest(t, debugger+"-dbg", "scopes", dbgFlags)
subTest(t, debugger+"-dbg", "i22558", dbgFlags)
subTest(t, debugger+"-dbg-race", "i22600", dbgFlags, "-race")
optSubTest(t, debugger+"-opt", "hist", optFlags)
optSubTest(t, debugger+"-opt", "scopes", optFlags)
}
// subTest creates a subtest that compiles basename.go with the specified gcflags and additional compiler arguments,
// then runs the debugger on the resulting binary, with any comment-specified actions matching tag triggered.
func subTest(t *testing.T, tag string, basename string, gcflags string, moreargs ...string) {
t.Run(tag+"-"+basename, func(t *testing.T) {
testNexting(t, basename, tag, gcflags, moreargs...)
})
}
// optSubTest is the same as subTest except that it skips the test if the runtime and libraries
// were not compiled with optimization turned on. (The skip may not be necessary with Go 1.10 and later)
func optSubTest(t *testing.T, tag string, basename string, gcflags string, moreargs ...string) {
// If optimized test is run with unoptimized libraries (compiled with -N -l), it is very likely to fail.
// This occurs in the noopt builders (for example).
t.Run(tag+"-"+basename, func(t *testing.T) {
if *force || optimizedLibs {
testNexting(t, basename, tag, gcflags, moreargs...)
} else {
t.Skip("skipping for unoptimized stdlib/runtime")
}
})
}
func testNexting(t *testing.T, base, tag, gcflags string, moreArgs ...string) {
// (1) In testdata, build sample.go into test-sample.<tag>
// (2) Run debugger gathering a history
// (3) Read expected history from testdata/sample.<tag>.nexts
// optionally, write out testdata/sample.<tag>.nexts
testbase := filepath.Join("testdata", base) + "." + tag
tmpbase := filepath.Join("testdata", "test-"+base+"."+tag)
// Use a temporary directory unless -f is specified
if !*force {
tmpdir, err := ioutil.TempDir("", "debug_test")
if err != nil {
panic(fmt.Sprintf("Problem creating TempDir, error %v\n", err))
}
tmpbase = filepath.Join(tmpdir, "test-"+base+"."+tag)
if *verbose {
fmt.Printf("Tempdir is %s\n", tmpdir)
}
defer os.RemoveAll(tmpdir)
}
exe := tmpbase
runGoArgs := []string{"build", "-o", exe, "-gcflags=all=" + gcflags}
runGoArgs = append(runGoArgs, moreArgs...)
runGoArgs = append(runGoArgs, filepath.Join("testdata", base+".go"))
runGo(t, "", runGoArgs...)
nextlog := testbase + ".nexts"
tmplog := tmpbase + ".nexts"
var dbg dbgr
if *useDelve {
dbg = newDelve(tag, exe)
} else {
dbg = newGdb(tag, exe)
}
h1 := runDbgr(dbg, 1000)
if *dryrun {
fmt.Printf("# Tag for above is %s\n", dbg.tag())
return
}
if *update {
h1.write(nextlog)
} else {
h0 := &nextHist{}
h0.read(nextlog)
if !h0.equals(h1) {
// Be very noisy about exactly what's wrong to simplify debugging.
h1.write(tmplog)
cmd := exec.Command("diff", "-u", nextlog, tmplog)
line := asCommandLine("", cmd)
bytes, err := cmd.CombinedOutput()
if err != nil && len(bytes) == 0 {
t.Fatalf("step/next histories differ, diff command %s failed with error=%v", line, err)
}
t.Fatalf("step/next histories differ, diff=\n%s", string(bytes))
}
}
}
type dbgr interface {
start()
stepnext(s string) bool // step or next, possible with parameter, gets line etc. returns true for success, false for unsure response
quit()
hist() *nextHist
tag() string
}
func runDbgr(dbg dbgr, maxNext int) *nextHist {
dbg.start()
if *dryrun {
return nil
}
for i := 0; i < maxNext; i++ {
if !dbg.stepnext("n") {
break
}
}
h := dbg.hist()
return h
}
func runGo(t *testing.T, dir string, args ...string) string {
var stdout, stderr bytes.Buffer
cmd := exec.Command(testenv.GoToolPath(t), args...)
cmd.Dir = dir
if *dryrun {
fmt.Printf("%s\n", asCommandLine("", cmd))
return ""
}
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
t.Fatalf("error running cmd (%s): %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String())
}
if s := stderr.String(); s != "" {
t.Fatalf("Stderr = %s\nWant empty", s)
}
return stdout.String()
}
// tstring provides two strings, o (stdout) and e (stderr)
type tstring struct {
o string
e string
}
func (t tstring) String() string {
return t.o + t.e
}
type pos struct {
line uint16
file uint8 // Artifact of plans to implement differencing instead of calling out to diff.
}
type nextHist struct {
f2i map[string]uint8
fs []string
ps []pos
texts []string
vars [][]string
}
func (h *nextHist) write(filename string) {
file, err := os.Create(filename)
if err != nil {
panic(fmt.Sprintf("Problem opening %s, error %v\n", filename, err))
}
defer file.Close()
var lastfile uint8
for i, x := range h.texts {
p := h.ps[i]
if lastfile != p.file {
fmt.Fprintf(file, " %s\n", h.fs[p.file-1])
lastfile = p.file
}
fmt.Fprintf(file, "%d:%s\n", p.line, x)
// TODO, normalize between gdb and dlv into a common, comparable format.
for _, y := range h.vars[i] {
y = strings.TrimSpace(y)
fmt.Fprintf(file, "%s\n", y)
}
}
file.Close()
}
func (h *nextHist) read(filename string) {
h.f2i = make(map[string]uint8)
bytes, err := ioutil.ReadFile(filename)
if err != nil {
panic(fmt.Sprintf("Problem reading %s, error %v\n", filename, err))
}
var lastfile string
lines := strings.Split(string(bytes), "\n")
for i, l := range lines {
if len(l) > 0 && l[0] != '#' {
if l[0] == ' ' {
// file -- first two characters expected to be " "
lastfile = strings.TrimSpace(l)
} else if numberColonRe.MatchString(l) {
// line number -- <number>:<line>
colonPos := strings.Index(l, ":")
if colonPos == -1 {
panic(fmt.Sprintf("Line %d (%s) in file %s expected to contain '<number>:' but does not.\n", i+1, l, filename))
}
h.add(lastfile, l[0:colonPos], l[colonPos+1:])
} else {
h.addVar(l)
}
}
}
}
// add appends file (name), line (number) and text (string) to the history,
// provided that the file+line combo does not repeat the previous position,
// and provided that the file is within the testdata directory. The return
// value indicates whether the append occurred.
func (h *nextHist) add(file, line, text string) bool {
// Only record source code in testdata unless the inlines flag is set
if !*inlines && !strings.Contains(file, "/testdata/") {
return false
}
fi := h.f2i[file]
if fi == 0 {
h.fs = append(h.fs, file)
fi = uint8(len(h.fs))
h.f2i[file] = fi
}
line = strings.TrimSpace(line)
var li int
var err error
if line != "" {
li, err = strconv.Atoi(line)
if err != nil {
panic(fmt.Sprintf("Non-numeric line: %s, error %v\n", line, err))
}
}
l := len(h.ps)
p := pos{line: uint16(li), file: fi}
if l == 0 || *repeats || h.ps[l-1] != p {
h.ps = append(h.ps, p)
h.texts = append(h.texts, text)
h.vars = append(h.vars, []string{})
return true
}
return false
}
func (h *nextHist) addVar(text string) {
l := len(h.texts)
h.vars[l-1] = append(h.vars[l-1], text)
}
func invertMapSU8(hf2i map[string]uint8) map[uint8]string {
hi2f := make(map[uint8]string)
for hs, i := range hf2i {
hi2f[i] = hs
}
return hi2f
}
func (h *nextHist) equals(k *nextHist) bool {
if len(h.f2i) != len(k.f2i) {
return false
}
if len(h.ps) != len(k.ps) {
return false
}
hi2f := invertMapSU8(h.f2i)
ki2f := invertMapSU8(k.f2i)
for i, hs := range hi2f {
if hs != ki2f[i] {
return false
}
}
for i, x := range h.ps {
if k.ps[i] != x {
return false
}
}
for i, hv := range h.vars {
kv := k.vars[i]
if len(hv) != len(kv) {
return false
}
for j, hvt := range hv {
if hvt != kv[j] {
return false
}
}
}
return true
}
// canonFileName strips everything before "/src/" from a filename.
// This makes file names portable across different machines,
// home directories, and temporary directories.
func canonFileName(f string) string {
i := strings.Index(f, "/src/")
if i != -1 {
f = f[i+1:]
}
return f
}
/* Delve */
type delveState struct {
cmd *exec.Cmd
tagg string
*ioState
atLineRe *regexp.Regexp // "\n =>"
funcFileLinePCre *regexp.Regexp // "^> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)"
line string
file string
function string
}
func newDelve(tag, executable string, args ...string) dbgr {
cmd := exec.Command("dlv", "exec", executable)
cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb")
if len(args) > 0 {
cmd.Args = append(cmd.Args, "--")
cmd.Args = append(cmd.Args, args...)
}
s := &delveState{tagg: tag, cmd: cmd}
// HAHA Delve has control characters embedded to change the color of the => and the line number
// that would be '(\\x1b\\[[0-9;]+m)?' OR TERM=dumb
s.atLineRe = regexp.MustCompile("\n=>[[:space:]]+[0-9]+:(.*)")
s.funcFileLinePCre = regexp.MustCompile("> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)[)]\n")
s.ioState = newIoState(s.cmd)
return s
}
func (s *delveState) tag() string {
return s.tagg
}
func (s *delveState) stepnext(ss string) bool {
x := s.ioState.writeReadExpect(ss+"\n", "[(]dlv[)] ")
excerpts := s.atLineRe.FindStringSubmatch(x.o)
locations := s.funcFileLinePCre.FindStringSubmatch(x.o)
excerpt := ""
if len(excerpts) > 1 {
excerpt = excerpts[1]
}
if len(locations) > 0 {
fn := canonFileName(locations[2])
if *verbose {
if s.file != fn {
fmt.Printf("%s\n", locations[2]) // don't canonocalize verbose logging
}
fmt.Printf(" %s\n", locations[3])
}
s.line = locations[3]
s.file = fn
s.function = locations[1]
s.ioState.history.add(s.file, s.line, excerpt)
// TODO: here is where variable processing will be added. See gdbState.stepnext as a guide.
// Adding this may require some amount of normalization so that logs are comparable.
return true
}
if *verbose {
fmt.Printf("DID NOT MATCH EXPECTED NEXT OUTPUT\nO='%s'\nE='%s'\n", x.o, x.e)
}
return false
}
func (s *delveState) start() {
if *dryrun {
fmt.Printf("%s\n", asCommandLine("", s.cmd))
fmt.Printf("b main.test\n")
fmt.Printf("c\n")
return
}
err := s.cmd.Start()
if err != nil {
line := asCommandLine("", s.cmd)
panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err))
}
s.ioState.readExpecting(-1, 5000, "Type 'help' for list of commands.")
expect("Breakpoint [0-9]+ set at ", s.ioState.writeReadExpect("b main.test\n", "[(]dlv[)] "))
s.stepnext("c")
}
func (s *delveState) quit() {
expect("", s.ioState.writeRead("q\n"))
}
/* Gdb */
type gdbState struct {
cmd *exec.Cmd
tagg string
args []string
*ioState
atLineRe *regexp.Regexp
funcFileLinePCre *regexp.Regexp
line string
file string
function string
}
func newGdb(tag, executable string, args ...string) dbgr {
// Turn off shell, necessary for Darwin apparently
cmd := exec.Command(gdb, "-ex", "set startup-with-shell off", executable)
cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb")
s := &gdbState{tagg: tag, cmd: cmd, args: args}
s.atLineRe = regexp.MustCompile("(^|\n)([0-9]+)(.*)")
s.funcFileLinePCre = regexp.MustCompile(
"([^ ]+) [(][^)]*[)][ \\t\\n]+at ([^:]+):([0-9]+)")
// runtime.main () at /Users/drchase/GoogleDrive/work/go/src/runtime/proc.go:201
// function file line
// Thread 2 hit Breakpoint 1, main.main () at /Users/drchase/GoogleDrive/work/debug/hist.go:18
s.ioState = newIoState(s.cmd)
return s
}
func (s *gdbState) tag() string {
return s.tagg
}
func (s *gdbState) start() {
run := "run"
for _, a := range s.args {
run += " " + a // Can't quote args for gdb, it will pass them through including the quotes
}
if *dryrun {
fmt.Printf("%s\n", asCommandLine("", s.cmd))
fmt.Printf("tbreak main.test\n")
fmt.Printf("%s\n", run)
return
}
err := s.cmd.Start()
if err != nil {
line := asCommandLine("", s.cmd)
panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err))
}
s.ioState.readExpecting(-1, -1, "[(]gdb[)] ")
x := s.ioState.writeReadExpect("b main.test\n", "[(]gdb[)] ")
expect("Breakpoint [0-9]+ at", x)
s.stepnext(run)
}
func (s *gdbState) stepnext(ss string) bool {
x := s.ioState.writeReadExpect(ss+"\n", "[(]gdb[)] ")
excerpts := s.atLineRe.FindStringSubmatch(x.o)
locations := s.funcFileLinePCre.FindStringSubmatch(x.o)
excerpt := ""
addedLine := false
if len(excerpts) == 0 && len(locations) == 0 {
if *verbose {
fmt.Printf("DID NOT MATCH %s", x.o)
}
return false
}
if len(excerpts) > 0 {
excerpt = excerpts[3]
}
if len(locations) > 0 {
fn := canonFileName(locations[2])
if *verbose {
if s.file != fn {
fmt.Printf("%s\n", locations[2])
}
fmt.Printf(" %s\n", locations[3])
}
s.line = locations[3]
s.file = fn
s.function = locations[1]
addedLine = s.ioState.history.add(s.file, s.line, excerpt)
}
if len(excerpts) > 0 {
if *verbose {
fmt.Printf(" %s\n", excerpts[2])
}
s.line = excerpts[2]
addedLine = s.ioState.history.add(s.file, s.line, excerpt)
}
if !addedLine {
// True if this was a repeat line
return true
}
// Look for //gdb-<tag>=(v1,v2,v3) and print v1, v2, v3
vars := varsToPrint(excerpt, "//"+s.tag()+"=(")
for _, v := range vars {
response := printVariableAndNormalize(v, func(v string) string {
return s.ioState.writeReadExpect("p "+v+"\n", "[(]gdb[)] ").String()
})
s.ioState.history.addVar(response)
}
return true
}
// printVariableAndNormalize extracts any slash-indicated normalizing requests from the variable
// name, then uses printer to get the value of the variable from the debugger, and then
// normalizes and returns the response.
func printVariableAndNormalize(v string, printer func(v string) string) string {
slashIndex := strings.Index(v, "/")
substitutions := ""
if slashIndex != -1 {
substitutions = v[slashIndex:]
v = v[:slashIndex]
}
response := printer(v)
// expect something like "$1 = ..."
dollar := strings.Index(response, "$")
cr := strings.Index(response, "\n")
if dollar == -1 { // some not entirely expected response, whine and carry on.
if cr == -1 {
response = strings.TrimSpace(response) // discards trailing newline
response = strings.Replace(response, "\n", "<BR>", -1)
return "$ Malformed response " + response
}
response = strings.TrimSpace(response[:cr])
return "$ " + response
}
if cr == -1 {
cr = len(response)
}
// Convert the leading $<number> into the variable name to enhance readability
// and reduce scope of diffs if an earlier print-variable is added.
response = strings.TrimSpace(response[dollar:cr])
response = leadingDollarNumberRe.ReplaceAllString(response, v)
// Normalize value as requested.
if strings.Contains(substitutions, "A") {
response = hexRe.ReplaceAllString(response, "<A>")
}
if strings.Contains(substitutions, "N") {
response = numRe.ReplaceAllString(response, "<N>")
}
if strings.Contains(substitutions, "S") {
response = stringRe.ReplaceAllString(response, "<S>")
}
if strings.Contains(substitutions, "O") {
response = optOutGdbRe.ReplaceAllString(response, "<Optimized out, as expected>")
}
return response
}
// varsToPrint takes a source code line, and extracts the comma-separated variable names
// found between lookfor and the next ")".
// For example, if line includes "... //gdb-foo=(v1,v2,v3)" and
// lookfor="//gdb-foo=(", then varsToPrint returns ["v1", "v2", "v3"]
func varsToPrint(line, lookfor string) []string {
var vars []string
if strings.Contains(line, lookfor) {
x := line[strings.Index(line, lookfor)+len(lookfor):]
end := strings.Index(x, ")")
if end == -1 {
panic(fmt.Sprintf("Saw variable list begin %s in %s but no closing ')'", lookfor, line))
}
vars = strings.Split(x[:end], ",")
for i, y := range vars {
vars[i] = strings.TrimSpace(y)
}
}
return vars
}
func (s *gdbState) quit() {
response := s.ioState.writeRead("q\n")
if strings.Contains(response.o, "Quit anyway? (y or n)") {
s.ioState.writeRead("Y\n")
}
}
type ioState struct {
stdout io.ReadCloser
stderr io.ReadCloser
stdin io.WriteCloser
outChan chan string
errChan chan string
last tstring // Output of previous step
history *nextHist
}
func newIoState(cmd *exec.Cmd) *ioState {
var err error
s := &ioState{}
s.history = &nextHist{}
s.history.f2i = make(map[string]uint8)
s.stdout, err = cmd.StdoutPipe()
line := asCommandLine("", cmd)
if err != nil {
panic(fmt.Sprintf("There was an error [stdoutpipe] running '%s', %v\n", line, err))
}
s.stderr, err = cmd.StderrPipe()
if err != nil {
panic(fmt.Sprintf("There was an error [stdouterr] running '%s', %v\n", line, err))
}
s.stdin, err = cmd.StdinPipe()
if err != nil {
panic(fmt.Sprintf("There was an error [stdinpipe] running '%s', %v\n", line, err))
}
s.outChan = make(chan string, 1)
s.errChan = make(chan string, 1)
go func() {
buffer := make([]byte, 4096)
for {
n, err := s.stdout.Read(buffer)
if n > 0 {
s.outChan <- string(buffer[0:n])
}
if err == io.EOF || n == 0 {
break
}
if err != nil {
fmt.Printf("Saw an error forwarding stdout")
break
}
}
close(s.outChan)
s.stdout.Close()
}()
go func() {
buffer := make([]byte, 4096)
for {
n, err := s.stderr.Read(buffer)
if n > 0 {
s.errChan <- string(buffer[0:n])
}
if err == io.EOF || n == 0 {
break
}
if err != nil {
fmt.Printf("Saw an error forwarding stderr")
break
}
}
close(s.errChan)
s.stderr.Close()
}()
return s
}
func (s *ioState) hist() *nextHist {
return s.history
}
// writeRead writes ss, then reads stdout and stderr, waiting 500ms to
// be sure all the output has appeared.
func (s *ioState) writeRead(ss string) tstring {
if *verbose {
fmt.Printf("=> %s", ss)
}
_, err := io.WriteString(s.stdin, ss)
if err != nil {
panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err))
}
return s.readExpecting(-1, 500, "")
}
// writeReadExpect writes ss, then reads stdout and stderr until something
// that matches expectRE appears. expectRE should not be ""
func (s *ioState) writeReadExpect(ss, expectRE string) tstring {
if *verbose {
fmt.Printf("=> %s", ss)
}
if expectRE == "" {
panic("expectRE should not be empty; use .* instead")
}
_, err := io.WriteString(s.stdin, ss)
if err != nil {
panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err))
}
return s.readExpecting(-1, -1, expectRE)
}
func (s *ioState) readExpecting(millis, interlineTimeout int, expectedRE string) tstring {
timeout := time.Millisecond * time.Duration(millis)
interline := time.Millisecond * time.Duration(interlineTimeout)
s.last = tstring{}
var re *regexp.Regexp
if expectedRE != "" {
re = regexp.MustCompile(expectedRE)
}
loop:
for {
var timer <-chan time.Time
if timeout > 0 {
timer = time.After(timeout)
}
select {
case x, ok := <-s.outChan:
if !ok {
s.outChan = nil
}
s.last.o += x
case x, ok := <-s.errChan:
if !ok {
s.errChan = nil
}
s.last.e += x
case <-timer:
break loop
}
if re != nil {
if re.MatchString(s.last.o) {
break
}
if re.MatchString(s.last.e) {
break
}
}
timeout = interline
}
if *verbose {
fmt.Printf("<= %s%s", s.last.o, s.last.e)
}
return s.last
}
// replaceEnv returns a new environment derived from env
// by removing any existing definition of ev and adding ev=evv.
func replaceEnv(env []string, ev string, evv string) []string {
evplus := ev + "="
var found bool
for i, v := range env {
if strings.HasPrefix(v, evplus) {
found = true
env[i] = evplus + evv
}
}
if !found {
env = append(env, evplus+evv)
}
return env
}
// asCommandLine renders cmd as something that could be copy-and-pasted into a command line
// If cwd is not empty and different from the command's directory, prepend an approprirate "cd"
func asCommandLine(cwd string, cmd *exec.Cmd) string {
s := "("
if cmd.Dir != "" && cmd.Dir != cwd {
s += "cd" + escape(cmd.Dir) + ";"
}
for _, e := range cmd.Env {
if !strings.HasPrefix(e, "PATH=") &&
!strings.HasPrefix(e, "HOME=") &&
!strings.HasPrefix(e, "USER=") &&
!strings.HasPrefix(e, "SHELL=") {
s += escape(e)
}
}
for _, a := range cmd.Args {
s += escape(a)
}
s += " )"
return s
}
// escape inserts escapes appropriate for use in a shell command line
func escape(s string) string {
s = strings.Replace(s, "\\", "\\\\", -1)
s = strings.Replace(s, "'", "\\'", -1)
// Conservative guess at characters that will force quoting
if strings.ContainsAny(s, "\\ ;#*&$~?!|[]()<>{}`") {
s = " '" + s + "'"
} else {
s = " " + s
}
return s
}
func expect(want string, got tstring) {
if want != "" {
match, err := regexp.MatchString(want, got.o)
if err != nil {
panic(fmt.Sprintf("Error for regexp %s, %v\n", want, err))
}
if match {
return
}
match, err = regexp.MatchString(want, got.e)
if match {
return
}
fmt.Printf("EXPECTED '%s'\n GOT O='%s'\nAND E='%s'\n", want, got.o, got.e)
}
}
|
[
"\"GO_GCFLAGS\""
] |
[] |
[
"GO_GCFLAGS"
] |
[]
|
["GO_GCFLAGS"]
|
go
| 1 | 0 | |
tpcdsDataGenerator/dataParser/inventory.py
|
from pyspark.sql import Row
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import HiveContext
import os
conf = SparkConf()
sc = SparkContext(conf=conf)
spark = HiveContext(sc)
textDataRDD = sc.textFile(os.environ["DATA_HDFS"] + "inventory.dat")
textDataDF = textDataRDD.map(lambda x: x.split("|")).map(lambda x: Row(inv_date_sk = x[0],inv_item_sk = x[1],inv_warehouse_sk = x[2],inv_quantity_on_hand = x[3])).toDF()
textDataDF.write.saveAsTable("tpcds.inventory", format="parquet", mode="overwrite")
|
[] |
[] |
[
"DATA_HDFS"
] |
[]
|
["DATA_HDFS"]
|
python
| 1 | 0 | |
grr/core/grr_response_core/lib/build.py
|
#!/usr/bin/env python
"""Classes for handling build and repackaging of clients.
This handles invocations for the build across the supported platforms including
handling Visual Studio, pyinstaller and other packaging mechanisms.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import logging
import os
import re
import shutil
import struct
import subprocess
import tempfile
import zipfile
from future.builtins import str
from future.utils import iteritems
from future.utils import iterkeys
from future.utils import itervalues
# pylint: disable=g-import-not-at-top,unused-import
# This is a workaround so we don't need to maintain the whole PyInstaller
# codebase as a full-fledged dependency.
try:
# pytype: disable=import-error
from PyInstaller import __main__ as PyInstallerMain
# pytype: enable=import-error
except ImportError:
# We ignore this failure since most people running the code don't build their
# own clients and printing an error message causes confusion. Those building
# their own clients will need PyInstaller installed.
pass
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from fleetspeak.src.client.daemonservice.proto.fleetspeak_daemonservice import config_pb2 as fs_config_pb2
from fleetspeak.src.common.proto.fleetspeak import system_pb2 as fs_system_pb2
from grr_response_core import config
from grr_response_core import version
from grr_response_core.config import contexts
from grr_response_core.lib import config_lib
from grr_response_core.lib import config_validator_base
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib import utils
# Pull in local config validators.
from grr_response_core.lib.local import plugins
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.util import yaml
# pylint: enable=g-import-not-at-top,unused-import
class BuildError(Exception):
pass
class BuilderBase(object):
"""A base class for builder / repacker that provides utility functions."""
def __init__(self, context=None):
self.context = context or config.CONFIG.context[:]
self.context = [contexts.CLIENT_BUILD_CONTEXT] + self.context
def GenerateDirectory(self,
input_dir=None,
output_dir=None,
replacements=None):
input_dir = utils.NormalizePath(input_dir)
output_dir = utils.NormalizePath(output_dir)
replacements = replacements or []
for (root, _, files) in os.walk(input_dir):
for filename in files:
in_file = utils.JoinPath(root, filename)
out_file = in_file.replace(input_dir, output_dir)
for (s, replacement) in replacements:
out_file = out_file.replace(s, replacement)
utils.EnsureDirExists(os.path.dirname(out_file))
self.GenerateFile(in_file, out_file)
def GenerateFile(self, input_filename=None, output_filename=None):
"""Generates a file from a template, interpolating config values."""
if input_filename is None:
input_filename = output_filename + ".in"
if output_filename[-3:] == ".in":
output_filename = output_filename[:-3]
logging.debug("Generating file %s from %s", output_filename, input_filename)
with io.open(input_filename, "r") as fd:
data = fd.read()
with io.open(output_filename, "w") as fd:
fd.write(config.CONFIG.InterpolateValue(data, context=self.context))
class ClientBuilder(BuilderBase):
"""A client builder is responsible for building the binary template.
This is an abstract client builder class, used by the OS specific
implementations. Note that client builders typically run on the target
operating system.
"""
REQUIRED_BUILD_YAML_KEYS = set([
"Client.build_environment", "Client.build_time", "Template.build_type",
"Template.build_context", "Template.version_major",
"Template.version_minor", "Template.version_revision",
"Template.version_release", "Template.arch"
])
def __init__(self, context=None):
super(ClientBuilder, self).__init__(context=context)
self.build_dir = ""
def MakeBuildDirectory(self):
"""Prepares the build directory."""
# Create the build directory and let pyinstaller loose on it.
self.build_dir = config.CONFIG.Get(
"PyInstaller.build_dir", context=self.context)
self.work_path = config.CONFIG.Get(
"PyInstaller.workpath_dir", context=self.context)
self.CleanDirectory(self.build_dir)
self.CleanDirectory(self.work_path)
def CleanDirectory(self, directory):
logging.info("Clearing directory %s", directory)
try:
shutil.rmtree(directory)
except OSError:
pass
utils.EnsureDirExists(directory)
def BuildWithPyInstaller(self):
"""Use pyinstaller to build a client package."""
self.CleanDirectory(
config.CONFIG.Get("PyInstaller.distpath", context=self.context))
logging.info("Copying pyinstaller support files")
self.spec_file = os.path.join(self.build_dir, "grr.spec")
with open(self.spec_file, "wb") as fd:
fd.write(config.CONFIG.Get("PyInstaller.spec", context=self.context))
with open(os.path.join(self.build_dir, "version.txt"), "wb") as fd:
fd.write(config.CONFIG.Get("PyInstaller.version", context=self.context))
shutil.copy(
src=config.CONFIG.Get("PyInstaller.icon_path", context=self.context),
dst=os.path.join(self.build_dir, u"grr.ico"))
# We expect the onedir output at this location.
self.output_dir = os.path.join(
config.CONFIG.Get("PyInstaller.distpath", context=self.context),
"grr-client")
# Pyinstaller doesn't handle unicode strings.
args = [
"--distpath",
str(config.CONFIG.Get("PyInstaller.distpath", context=self.context)),
"--workpath",
str(
config.CONFIG.Get("PyInstaller.workpath_dir",
context=self.context)),
str(self.spec_file)
]
logging.info("Running pyinstaller: %s", args)
PyInstallerMain.run(pyi_args=[utils.SmartStr(x) for x in args])
# Clear out some crud that pyinstaller includes.
for path in ["tcl", "tk", "pytz"]:
dir_path = os.path.join(self.output_dir, path)
try:
shutil.rmtree(dir_path)
except OSError:
logging.error("Unable to remove directory: %s", dir_path)
try:
os.mkdir(dir_path)
except OSError:
logging.error("Unable to create directory: %s", dir_path)
file_path = os.path.join(dir_path, path)
try:
# Create an empty file so the directories get put in the installers.
with open(file_path, "wb"):
pass
except IOError:
logging.error("Unable to create file: %s", file_path)
version_ini = version.VersionPath()
shutil.copy(version_ini, os.path.join(self.output_dir, "version.ini"))
with open(os.path.join(self.output_dir, "build.yaml"), "wb") as fd:
self.WriteBuildYaml(fd)
def WriteBuildYaml(self, fd, build_timestamp=True):
"""Write build spec to fd."""
output = {
"Client.build_environment":
rdf_client.Uname.FromCurrentSystem().signature(),
"Template.build_type":
config.CONFIG.Get("ClientBuilder.build_type", context=self.context),
"Template.version_major":
config.CONFIG.Get("Source.version_major", context=self.context),
"Template.version_minor":
config.CONFIG.Get("Source.version_minor", context=self.context),
"Template.version_revision":
config.CONFIG.Get("Source.version_revision", context=self.context),
"Template.version_release":
config.CONFIG.Get("Source.version_release", context=self.context),
"Template.arch":
config.CONFIG.Get("Client.arch", context=self.context)
}
if build_timestamp:
output["Client.build_time"] = rdfvalue.RDFDatetime.Now()
else:
self.REQUIRED_BUILD_YAML_KEYS.remove("Client.build_time")
for key, value in iteritems(output):
output[key] = str(value)
output["Template.build_context"] = self.context
output_keys = set(iterkeys(output))
if output_keys != self.REQUIRED_BUILD_YAML_KEYS:
raise RuntimeError("Bad build.yaml: expected %s, got %s" %
(self.REQUIRED_BUILD_YAML_KEYS, output_keys))
fd.write(yaml.Dump(output).encode("utf-8"))
def CopyMissingModules(self):
"""Copy any additional DLLs that cant be found."""
def MakeExecutableTemplate(self, output_file=None):
"""Create the executable template.
Args:
output_file: string filename where we will write the template.
The client is build in two phases. First an executable template is created
with the client binaries contained inside a zip file. Then the installation
package is created by appending the SFX extractor to this template and
writing a config file into the zip file.
This technique allows the client build to be carried out once on the
supported platform (e.g. windows with MSVS), but the deployable installer
can be build on any platform which supports python.
Subclasses for each OS do the actual work, we just make sure the output
directory is set up correctly here.
"""
self.template_file = output_file or config.CONFIG.Get(
"ClientBuilder.template_path", context=self.context)
utils.EnsureDirExists(os.path.dirname(self.template_file))
def MakeZip(self, input_dir, output_file):
"""Creates a ZIP archive of the files in the input directory.
Args:
input_dir: the name of the input directory.
output_file: the name of the output ZIP archive without extension.
"""
logging.info("Generating zip template file at %s", output_file)
basename, _ = os.path.splitext(output_file)
# TODO(user):pytype: incorrect make_archive() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.make_archive(
basename, "zip", base_dir=".", root_dir=input_dir, verbose=True)
# pytype: enable=wrong-arg-types
class ClientRepacker(BuilderBase):
"""Takes the binary template and producing an installer.
Note that this should be runnable on all operating systems.
"""
CONFIG_SECTIONS = [
"CA", "Client", "ClientRepacker", "Logging", "Config", "Nanny",
"Installer", "Template"
]
# Config options that should never make it to a deployable binary.
SKIP_OPTION_LIST = ["Client.private_key"]
def __init__(self, context=None, signer=None):
super(ClientRepacker, self).__init__(context=context)
self.signer = signer
self.signed_template = False
def GetClientConfig(self, context, validate=True, deploy_timestamp=True):
"""Generates the client config file for inclusion in deployable binaries."""
with utils.TempDirectory() as tmp_dir:
# Make sure we write the file in yaml format.
filename = os.path.join(
tmp_dir,
config.CONFIG.Get("ClientBuilder.config_filename", context=context))
new_config = config.CONFIG.MakeNewConfig()
new_config.Initialize(reset=True, data="")
new_config.SetWriteBack(filename)
# Only copy certain sections to the client. We enumerate all
# defined options and then resolve those from the config in the
# client's context. The result is the raw option as if the
# client read our config file.
client_context = context[:]
while contexts.CLIENT_BUILD_CONTEXT in client_context:
client_context.remove(contexts.CLIENT_BUILD_CONTEXT)
for descriptor in sorted(config.CONFIG.type_infos, key=lambda x: x.name):
if descriptor.name in self.SKIP_OPTION_LIST:
continue
if descriptor.section in self.CONFIG_SECTIONS:
value = config.CONFIG.GetRaw(
descriptor.name, context=client_context, default=None)
if value is not None:
logging.debug("Copying config option to client: %s",
descriptor.name)
new_config.SetRaw(descriptor.name, value)
if config.CONFIG.Get("ClientBuilder.fleetspeak_enabled", context=context):
new_config.Set("Client.fleetspeak_enabled", True)
if deploy_timestamp:
deploy_time_string = str(rdfvalue.RDFDatetime.Now())
new_config.Set("Client.deploy_time", deploy_time_string)
new_config.Write()
if validate:
self.ValidateEndConfig(new_config)
private_validator = config.CONFIG.Get(
"ClientBuilder.private_config_validator_class", context=context)
if private_validator:
try:
validator = config_validator_base.PrivateConfigValidator.classes[
private_validator]()
except KeyError:
logging.error(
"Couldn't find config validator class %s, "
"you probably need to copy it into lib/local", private_validator)
raise
validator.ValidateEndConfig(new_config, self.context)
return io.open(filename, "r").read()
def ValidateEndConfig(self, config_obj, errors_fatal=True):
"""Given a generated client config, attempt to check for common errors."""
errors = []
if not config.CONFIG["ClientBuilder.fleetspeak_enabled"]:
location = config_obj.Get("Client.server_urls", context=self.context)
if not location:
errors.append("Empty Client.server_urls")
for url in location:
if not url.startswith("http"):
errors.append("Bad Client.server_urls specified %s" % url)
key_data = config_obj.GetRaw(
"Client.executable_signing_public_key",
default=None,
context=self.context)
if key_data is None:
errors.append("Missing Client.executable_signing_public_key.")
elif not key_data.startswith("-----BEGIN PUBLIC"):
errors.append(
"Invalid Client.executable_signing_public_key: %s" % key_data)
else:
rsa_key = rdf_crypto.RSAPublicKey()
rsa_key.ParseFromHumanReadable(key_data)
logging.info(
"Executable signing key successfully parsed from config (%d-bit)",
rsa_key.KeyLen())
if not config.CONFIG["ClientBuilder.fleetspeak_enabled"]:
certificate = config_obj.GetRaw(
"CA.certificate", default=None, context=self.context)
if certificate is None or not certificate.startswith("-----BEGIN CERTIF"):
errors.append("CA certificate missing from config.")
for bad_opt in ["Client.private_key"]:
if config_obj.Get(bad_opt, context=self.context, default=""):
errors.append("Client cert in conf, this should be empty at deployment"
" %s" % bad_opt)
if errors_fatal and errors:
for error in errors:
logging.error("Build Config Error: %s", error)
raise RuntimeError("Bad configuration generated. Terminating.")
else:
return errors
def MakeDeployableBinary(self, template_path, output_path):
"""Use the template to create a customized installer."""
class WindowsClientRepacker(ClientRepacker):
"""Repackages windows installers."""
def ValidateEndConfig(self, config_obj, errors_fatal=True):
"""Windows specific config validations."""
errors = super(WindowsClientRepacker, self).ValidateEndConfig(
config_obj, errors_fatal=errors_fatal)
install_dir = config_obj["Client.install_path"]
for path in config_obj["Client.tempdir_roots"]:
if path.startswith("/"):
errors.append(
"Client.tempdir_root %s starts with /, probably has Unix path." %
path)
if not path.startswith(install_dir):
errors.append(
"Client.tempdir_root %s is not inside the install_dir %s, this is "
"a security risk" % ((path, install_dir)))
if config_obj.Get("Logging.path").startswith("/"):
errors.append("Logging.path starts with /, probably has Unix path. %s" %
config_obj["Logging.path"])
if "Windows\\" in config_obj.GetRaw("Logging.path"):
errors.append("Windows in Logging.path, you probably want "
"%(WINDIR|env) instead")
if not config_obj["Client.binary_name"].endswith(".exe"):
errors.append("Missing .exe extension on binary_name %s" %
config_obj["Client.binary_name"])
if not config_obj["Nanny.binary"].endswith(".exe"):
errors.append("Missing .exe extension on nanny_binary")
if errors_fatal and errors:
for error in errors:
logging.error("Build Config Error: %s", error)
raise RuntimeError("Bad configuration generated. Terminating.")
else:
return errors
def MakeDeployableBinary(self, template_path, output_path):
"""Repackage the template zip with the installer."""
context = self.context + ["Client Context"]
zip_data = io.BytesIO()
output_zip = zipfile.ZipFile(
zip_data, mode="w", compression=zipfile.ZIP_DEFLATED)
z_template = zipfile.ZipFile(open(template_path, "rb"))
# Track which files we've copied already.
completed_files = [
"grr-client.exe", "GRRservice.exe", "dbg_grr-client.exe",
"dbg_GRRservice.exe"
]
# Change the name of the main binary to the configured name.
client_bin_name = config.CONFIG.Get("Client.binary_name", context=context)
console_build = config.CONFIG.Get("ClientBuilder.console", context=context)
if console_build:
client_filename = "dbg_grr-client.exe"
service_filename = "dbg_GRRservice.exe"
else:
client_filename = "grr-client.exe"
service_filename = "GRRservice.exe"
bin_name = z_template.getinfo(client_filename)
output_zip.writestr(client_bin_name, z_template.read(bin_name))
CopyFileInZip(z_template, "grr-client.exe.manifest", output_zip,
"%s.manifest" % client_bin_name)
completed_files.append("grr-client.exe.manifest")
# Change the name of the service binary to the configured name.
service_template = z_template.getinfo(service_filename)
service_bin_name = config.CONFIG.Get(
"Nanny.service_binary_name", context=context)
output_zip.writestr(service_bin_name, z_template.read(service_template))
if config.CONFIG["ClientBuilder.fleetspeak_enabled"]:
self._GenerateFleetspeakServiceConfig(output_zip)
if self.signed_template:
# If the template libs were already signed we can skip signing
CreateNewZipWithSignedLibs(
z_template, output_zip, ignore_files=completed_files)
else:
CreateNewZipWithSignedLibs(
z_template,
output_zip,
ignore_files=completed_files,
signer=self.signer)
output_zip.close()
return self.MakeSelfExtractingZip(zip_data.getvalue(), output_path)
def _GenerateFleetspeakServiceConfig(self, zip_file):
orig_fs_config_path = config.CONFIG["ClientBuilder.fleetspeak_config_path"]
final_fs_config_fname = config.CONFIG[
"Client.fleetspeak_unsigned_config_fname"]
if orig_fs_config_path.endswith(".in"):
logging.info("Interpolating %s", orig_fs_config_path)
logging.warning("Backslashes will be naively re-escaped after "
"interpolation. If this is not desired, use a Fleetspeak "
"config file without the '.in' extension.")
with utils.TempDirectory() as temp_dir:
temp_fs_config_path = os.path.join(temp_dir, final_fs_config_fname)
with io.open(orig_fs_config_path, "r") as source:
with io.open(temp_fs_config_path, "w") as dest:
interpolated = config.CONFIG.InterpolateValue(
source.read(), context=self.context)
dest.write(re.sub(r"\\", r"\\\\", interpolated))
self._ValidateFleetspeakServiceConfig(temp_fs_config_path)
zip_file.write(temp_fs_config_path, final_fs_config_fname)
else:
self._ValidateFleetspeakServiceConfig(orig_fs_config_path)
zip_file.write(orig_fs_config_path, final_fs_config_fname)
def _ValidateFleetspeakServiceConfig(self, config_path):
"""Validates a Fleetspeak service config.
Checks that the given file is a valid TextFormat representation of
a Fleetspeak service config proto.
Args:
config_path: Path to the config file.
Raises:
BuildError: If the config is not valid.
"""
with open(config_path, "rb") as f:
pool = descriptor_pool.DescriptorPool()
pool.AddDescriptor(fs_config_pb2.Config.DESCRIPTOR)
parsed_config = text_format.Parse(
f.read(), fs_system_pb2.ClientServiceConfig(), descriptor_pool=pool)
if parsed_config.factory != "Daemon":
raise BuildError(
"Fleetspeak config does not have the expected factory type.")
daemon_cfg = fs_config_pb2.Config()
parsed_config.config.Unpack(daemon_cfg)
if not daemon_cfg.argv:
raise BuildError(
"Fleetspeak daemon service config does not specify command line "
"args.")
def MakeSelfExtractingZip(self, payload_data, output_path):
"""Repack the installer into the payload.
Args:
payload_data: data payload for zip file
output_path: filename for the zip output
Raises:
RuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin.
Returns:
output_path: filename string of zip output file
"""
context = self.context + ["Client Context"]
src_zip = zipfile.ZipFile(io.BytesIO(payload_data), mode="r")
zip_data = io.BytesIO()
output_zip = zipfile.ZipFile(
zip_data, mode="w", compression=zipfile.ZIP_DEFLATED)
config_file_name = config.CONFIG.Get(
"ClientBuilder.config_filename", context=context)
# Copy the rest of the files from the package to the new zip.
for template_file in src_zip.namelist():
if template_file != config_file_name:
# Avoid writing the config file twice if we're repacking a binary that
# has already been run through deployment. We write it in the next step,
# so no need to copy over from the original here.
CopyFileInZip(src_zip, template_file, output_zip)
client_config_content = self.GetClientConfig(context)
output_zip.writestr(
config_file_name,
client_config_content.encode("utf-8"),
compress_type=zipfile.ZIP_STORED)
# The zip file comment is used by the self extractor to run the installation
# script. Comment has to be `bytes` object because `zipfile` module is not
# smart enough to properly handle `unicode` objects. We use the `encode`
# method instead of `SmartStr` because we expect this option to be an
# `unicode` object and in case it is not, we want it to blow up.
output_zip.comment = b"$AUTORUN$>%s" % config.CONFIG.Get(
"ClientBuilder.autorun_command_line", context=context).encode("utf-8")
output_zip.close()
utils.EnsureDirExists(os.path.dirname(output_path))
with open(output_path, "wb") as fd:
# First write the installer stub
stub_data = io.BytesIO()
unzipsfx_stub = config.CONFIG.Get(
"ClientBuilder.unzipsfx_stub", context=context)
stub_raw = open(unzipsfx_stub, "rb").read()
# Check stub has been compiled with the requireAdministrator manifest.
if b"level=\"requireAdministrator" not in stub_raw:
raise RuntimeError("Bad unzip binary in use. Not compiled with the"
"requireAdministrator manifest option.")
stub_data.write(stub_raw)
# If in verbose mode, modify the unzip bins PE header to run in console
# mode for easier debugging.
SetPeSubsystem(
stub_data,
console=config.CONFIG.Get("ClientBuilder.console", context=context))
# Now patch up the .rsrc section to contain the payload.
end_of_file = zip_data.tell() + stub_data.tell()
# This is the IMAGE_SECTION_HEADER.Name which is also the start of
# IMAGE_SECTION_HEADER.
offset_to_rsrc = stub_data.getvalue().find(b".rsrc")
# IMAGE_SECTION_HEADER.PointerToRawData is a 32 bit int.
stub_data.seek(offset_to_rsrc + 20)
start_of_rsrc_section = struct.unpack("<I", stub_data.read(4))[0]
# Adjust IMAGE_SECTION_HEADER.SizeOfRawData to span from the old start to
# the end of file.
stub_data.seek(offset_to_rsrc + 16)
stub_data.write(struct.pack("<I", end_of_file - start_of_rsrc_section))
# Concatenate stub and zip file.
out_data = io.BytesIO()
out_data.write(stub_data.getvalue())
out_data.write(zip_data.getvalue())
# Then write the actual output file.
fd.write(out_data.getvalue())
if self.signer:
self.signer.SignFile(output_path)
logging.info("Deployable binary generated at %s", output_path)
return output_path
class DarwinClientRepacker(ClientRepacker):
"""Repackage OSX clients."""
def MakeDeployableBinary(self, template_path, output_path):
"""This will add the config to the client template."""
context = self.context + ["Client Context"]
utils.EnsureDirExists(os.path.dirname(output_path))
client_config_data = self.GetClientConfig(context)
shutil.copyfile(template_path, output_path)
zip_file = zipfile.ZipFile(output_path, mode="a")
zip_info = zipfile.ZipInfo(filename="config.yaml")
zip_file.writestr(zip_info, client_config_data)
zip_file.close()
return output_path
class LinuxClientRepacker(ClientRepacker):
"""Repackage Linux templates."""
# TODO(user):pytype: incorrect shutil.move() definition in typeshed.
# pytype: disable=wrong-arg-types
def GenerateDPKGFiles(self, template_path):
"""Generates the files needed by dpkg-buildpackage."""
# Rename the generated binaries to the correct name.
template_binary_dir = os.path.join(template_path, "dist/debian/grr-client")
package_name = config.CONFIG.Get(
"ClientBuilder.package_name", context=self.context)
target_binary_dir = os.path.join(
template_path, "dist/debian/%s%s" %
(package_name,
config.CONFIG.Get("ClientBuilder.target_dir", context=self.context)))
if package_name == "grr-client":
# Need to rename the template path or the move will fail.
shutil.move(template_binary_dir, "%s-template" % template_binary_dir)
template_binary_dir = "%s-template" % template_binary_dir
utils.EnsureDirExists(os.path.dirname(target_binary_dir))
shutil.move(template_binary_dir, target_binary_dir)
shutil.move(
os.path.join(target_binary_dir, "grr-client"),
os.path.join(
target_binary_dir,
config.CONFIG.Get("Client.binary_name", context=self.context)))
deb_in_dir = os.path.join(template_path, "dist/debian/debian.in/")
self.GenerateDirectory(deb_in_dir, os.path.join(
template_path, "dist/debian"), [("grr-client", package_name)])
# Generate directories for the /usr/sbin link.
utils.EnsureDirExists(
os.path.join(template_path, "dist/debian/%s/usr/sbin" % package_name))
# Generate the nanny template. This only exists from client version 3.1.2.5
# onwards.
if config.CONFIG["Template.version_numeric"] >= 3125:
self.GenerateFile(
os.path.join(target_binary_dir, "nanny.sh.in"),
os.path.join(target_binary_dir, "nanny.sh"))
# Generate the upstart template.
self.GenerateFile(
os.path.join(template_path, "dist/debian/upstart.in/grr-client.conf"),
os.path.join(template_path, "dist/debian/%s.upstart" % package_name))
# Generate the initd template. The init will not run if it detects upstart
# is present.
self.GenerateFile(
os.path.join(template_path, "dist/debian/initd.in/grr-client"),
os.path.join(template_path, "dist/debian/%s.init" % package_name))
# Generate the systemd unit file.
self.GenerateFile(
os.path.join(template_path,
"dist/debian/systemd.in/grr-client.service"),
os.path.join(template_path, "dist/debian/%s.service" % package_name))
# Clean up the template dirs.
shutil.rmtree(deb_in_dir)
shutil.rmtree(os.path.join(template_path, "dist/debian/upstart.in"))
shutil.rmtree(os.path.join(template_path, "dist/debian/initd.in"))
shutil.rmtree(os.path.join(template_path, "dist/debian/systemd.in"))
# pytype: enable=wrong-arg-types
def MakeDeployableBinary(self, template_path, output_path):
"""This will add the config to the client template and create a .deb."""
buildpackage_binary = "/usr/bin/dpkg-buildpackage"
if not os.path.exists(buildpackage_binary):
logging.error("dpkg-buildpackage not found, unable to repack client.")
return
with utils.TempDirectory() as tmp_dir:
template_dir = os.path.join(tmp_dir, "dist")
utils.EnsureDirExists(template_dir)
zf = zipfile.ZipFile(template_path)
for name in zf.namelist():
dirname = os.path.dirname(name)
utils.EnsureDirExists(os.path.join(template_dir, dirname))
with open(os.path.join(template_dir, name), "wb") as fd:
fd.write(zf.read(name))
# Generate the dpkg files.
self.GenerateDPKGFiles(tmp_dir)
# Create a client config.
client_context = ["Client Context"] + self.context
client_config_content = self.GetClientConfig(client_context)
# We need to strip leading /'s or .join will ignore everything that comes
# before it.
target_dir = config.CONFIG.Get(
"ClientBuilder.target_dir", context=self.context).lstrip("/")
agent_dir = os.path.join(
template_dir, "debian",
config.CONFIG.Get("ClientBuilder.package_name", context=self.context),
target_dir)
with open(
os.path.join(
agent_dir,
config.CONFIG.Get(
"ClientBuilder.config_filename", context=self.context)),
"wb") as fd:
fd.write(client_config_content)
# Set the daemon to executable.
os.chmod(
os.path.join(
agent_dir,
config.CONFIG.Get("Client.binary_name", context=self.context)),
0o755)
arch = config.CONFIG.Get("Template.arch", context=self.context)
try:
old_working_dir = os.getcwd()
except OSError:
old_working_dir = os.environ.get("HOME", "/tmp")
try:
os.chdir(template_dir)
command = [buildpackage_binary, "-uc", "-d", "-b", "-a%s" % arch]
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if "Failed to sign" not in e.output:
logging.error("Error calling %s.", command)
logging.error(e.output)
raise
filename_base = config.CONFIG.Get(
"ClientBuilder.debian_package_base", context=self.context)
output_base = config.CONFIG.Get(
"ClientRepacker.output_basename", context=self.context)
finally:
try:
os.chdir(old_working_dir)
except OSError:
pass
utils.EnsureDirExists(os.path.dirname(output_path))
for extension in [
".changes",
config.CONFIG.Get(
"ClientBuilder.output_extension", context=self.context)
]:
input_name = "%s%s" % (filename_base, extension)
output_name = "%s%s" % (output_base, extension)
# TODO(user):pytype: incorrect move() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.move(
os.path.join(tmp_dir, input_name),
os.path.join(os.path.dirname(output_path), output_name))
# pytype: enable=wrong-arg-types
logging.info("Created package %s", output_path)
return output_path
class CentosClientRepacker(LinuxClientRepacker):
"""Repackages Linux RPM templates."""
def Sign(self, rpm_filename):
if self.signer:
return self.signer.AddSignatureToRPMs([rpm_filename])
def MakeDeployableBinary(self, template_path, output_path):
"""This will add the config to the client template and create a .rpm."""
rpmbuild_binary = "/usr/bin/rpmbuild"
if not os.path.exists(rpmbuild_binary):
logging.error("rpmbuild not found, unable to repack client.")
return
with utils.TempDirectory() as tmp_dir:
template_dir = os.path.join(tmp_dir, "dist")
utils.EnsureDirExists(template_dir)
zf = zipfile.ZipFile(template_path)
for name in zf.namelist():
dirname = os.path.dirname(name)
utils.EnsureDirExists(os.path.join(template_dir, dirname))
with open(os.path.join(template_dir, name), "wb") as fd:
fd.write(zf.read(name))
# Set up a RPM building environment.
rpm_root_dir = os.path.join(tmp_dir, "rpmbuild")
rpm_build_dir = os.path.join(rpm_root_dir, "BUILD")
utils.EnsureDirExists(rpm_build_dir)
rpm_buildroot_dir = os.path.join(rpm_root_dir, "BUILDROOT")
utils.EnsureDirExists(rpm_buildroot_dir)
rpm_rpms_dir = os.path.join(rpm_root_dir, "RPMS")
utils.EnsureDirExists(rpm_rpms_dir)
rpm_specs_dir = os.path.join(rpm_root_dir, "SPECS")
utils.EnsureDirExists(rpm_specs_dir)
template_binary_dir = os.path.join(tmp_dir, "dist/rpmbuild/grr-client")
target_binary_dir = "%s%s" % (
rpm_build_dir,
config.CONFIG.Get("ClientBuilder.target_dir", context=self.context))
utils.EnsureDirExists(os.path.dirname(target_binary_dir))
try:
shutil.rmtree(target_binary_dir)
except OSError:
pass
# TODO(user):pytype: incorrect move() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.move(template_binary_dir, target_binary_dir)
# pytype: enable=wrong-arg-types
client_name = config.CONFIG.Get("Client.name", context=self.context)
client_binary_name = config.CONFIG.Get(
"Client.binary_name", context=self.context)
if client_binary_name != "grr-client":
# TODO(user):pytype: incorrect move() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.move(
os.path.join(target_binary_dir, "grr-client"),
os.path.join(target_binary_dir, client_binary_name))
# pytype: enable=wrong-arg-types
# Generate spec
spec_filename = os.path.join(rpm_specs_dir, "%s.spec" % client_name)
self.GenerateFile(
os.path.join(tmp_dir, "dist/rpmbuild/grr.spec.in"), spec_filename)
initd_target_filename = os.path.join(rpm_build_dir, "etc/init.d",
client_name)
# Generate init.d
utils.EnsureDirExists(os.path.dirname(initd_target_filename))
self.GenerateFile(
os.path.join(tmp_dir, "dist/rpmbuild/grr-client.initd.in"),
initd_target_filename)
# Generate systemd unit
if config.CONFIG["Template.version_numeric"] >= 3125:
systemd_target_filename = os.path.join(rpm_build_dir,
"usr/lib/systemd/system/",
"%s.service" % client_name)
utils.EnsureDirExists(os.path.dirname(systemd_target_filename))
self.GenerateFile(
os.path.join(tmp_dir, "dist/rpmbuild/grr-client.service.in"),
systemd_target_filename)
# Generate prelinking blacklist file
prelink_target_filename = os.path.join(
rpm_build_dir, "etc/prelink.conf.d", "%s.conf" % client_name)
utils.EnsureDirExists(os.path.dirname(prelink_target_filename))
self.GenerateFile(
os.path.join(tmp_dir, "dist/rpmbuild/prelink_blacklist.conf.in"),
prelink_target_filename)
# Create a client config.
client_context = ["Client Context"] + self.context
client_config_content = self.GetClientConfig(client_context)
with open(
os.path.join(
target_binary_dir,
config.CONFIG.Get(
"ClientBuilder.config_filename", context=self.context)),
"wb") as fd:
fd.write(client_config_content)
# Set the daemon to executable.
os.chmod(os.path.join(target_binary_dir, client_binary_name), 0o755)
client_arch = config.CONFIG.Get("Template.arch", context=self.context)
if client_arch == "amd64":
client_arch = "x86_64"
command = [
rpmbuild_binary, "--define", "_topdir " + rpm_root_dir, "--target",
client_arch, "--buildroot", rpm_buildroot_dir, "-bb", spec_filename
]
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error("Error calling %s.", command)
logging.error(e.output)
raise
client_version = config.CONFIG.Get(
"Template.version_string", context=self.context)
rpm_filename = os.path.join(
rpm_rpms_dir, client_arch,
"%s-%s-1.%s.rpm" % (client_name, client_version, client_arch))
utils.EnsureDirExists(os.path.dirname(output_path))
shutil.move(rpm_filename, output_path)
logging.info("Created package %s", output_path)
self.Sign(output_path)
return output_path
def CopyFileInZip(from_zip, from_name, to_zip, to_name=None, signer=None):
"""Read a file from a ZipFile and write it to a new ZipFile."""
data = from_zip.read(from_name)
if to_name is None:
to_name = from_name
if signer:
logging.debug("Signing %s", from_name)
data = signer.SignBuffer(data)
to_zip.writestr(to_name, data)
def CreateNewZipWithSignedLibs(z_in,
z_out,
ignore_files=None,
signer=None,
skip_signing_files=None):
"""Copies files from one zip to another, signing all qualifying files."""
ignore_files = ignore_files or []
skip_signing_files = skip_signing_files or []
extensions_to_sign = [".sys", ".exe", ".dll", ".pyd"]
to_sign = []
for template_file in z_in.namelist():
if template_file not in ignore_files:
extension = os.path.splitext(template_file)[1].lower()
if (signer and template_file not in skip_signing_files and
extension in extensions_to_sign):
to_sign.append(template_file)
else:
CopyFileInZip(z_in, template_file, z_out)
temp_files = {}
for filename in to_sign:
fd, path = tempfile.mkstemp()
with os.fdopen(fd, "wb") as temp_fd:
temp_fd.write(z_in.read(filename))
temp_files[filename] = path
try:
signer.SignFiles(itervalues(temp_files))
except AttributeError:
for f in itervalues(temp_files):
signer.SignFile(f)
for filename, tempfile_path in iteritems(temp_files):
z_out.writestr(filename, open(tempfile_path, "rb").read())
def SetPeSubsystem(fd, console=True):
"""Takes file like obj and returns (offset, value) for the PE subsystem."""
current_pos = fd.tell()
fd.seek(0x3c) # _IMAGE_DOS_HEADER.e_lfanew
header_offset = struct.unpack("<I", fd.read(4))[0]
# _IMAGE_NT_HEADERS.OptionalHeader.Subsystem ( 0x18 + 0x44)
subsystem_offset = header_offset + 0x5c
fd.seek(subsystem_offset)
if console:
fd.write(b"\x03")
else:
fd.write(b"\x02")
fd.seek(current_pos)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
tools/demo.py
|
from dataset.tf_data_handler import tf_data_handler
import tensorflow as tf
from config import Config
import time
import os
from model.model import vgg_crnn
from tools.utils import ctc_decode
from tools.utils import map_to_text
import cv2
# tf_config = tf.ConfigProto()
# tf_config.gpu_options.per_process_gpu_memory_fraction = 0.9 # 分配50%
# tf_config.gpu_options.allow_growth = True # 自适应
# session = tf.Session(config=tf_config)
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
model_file = "/home/ethony/github_work/crnn_ctc_tf2/checkpoint/epoch_20_model"
model = vgg_crnn()
model.load_weights(model_file)
def demo(img_path):
img = tf.io.read_file(img_path)
img = tf.io.decode_jpeg(img, channels=1)
img_shape = tf.shape(img)
scale_factor = Config.des_img_shape[0] / img_shape[0]
img_width = scale_factor * tf.cast(img_shape[1], tf.float64)
img_width = tf.cast(img_width, tf.int32)
img = tf.image.resize(img, (Config.des_img_shape[0], img_width)) / 255.0
img = tf.expand_dims(img,axis=0)
pred = model(img)
pre_index = ctc_decode(pred)
text = map_to_text(pre_index[0])
print(text)
if __name__ == "__main__":
test_path = "/home/ethony/github_work/crnn_ctc_tf2/temp/ture_test_imgs"
for item in os.listdir(test_path)[:100]:
if item.endswith("jpg"):
img_path = os.path.join(test_path,item)
item_img = cv2.imread(img_path)
cv2.imshow("item_img",item_img)
# start_time = time.time()
# print(img_path)
demo(img_path)
cv2.waitKey(0)
# print(time.time() - start_time)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
tests/test_catalina_10_15_7.py
|
""" Basic tests for Photos 5 on MacOS 10.15.7 """
import datetime
import os
import os.path
import pathlib
import sqlite3
import tempfile
import time
from collections import Counter, namedtuple
import pytest
import osxphotos
from osxphotos._constants import _UNKNOWN_PERSON
from osxphotos.utils import _get_os_version
OS_VERSION = _get_os_version()
SKIP_TEST = "OSXPHOTOS_TEST_EXPORT" not in os.environ or OS_VERSION[1] != "15"
PHOTOS_DB_LOCAL = os.path.expanduser("~/Pictures/Photos Library.photoslibrary")
PHOTOS_DB = "tests/Test-10.15.7.photoslibrary/database/photos.db"
PHOTOS_DB_PATH = "/Test-10.15.7.photoslibrary/database/photos.db"
PHOTOS_LIBRARY_PATH = "/Test-10.15.7.photoslibrary"
PHOTOS_DB_LEN = 25
PHOTOS_NOT_IN_TRASH_LEN = 23
PHOTOS_IN_TRASH_LEN = 2
PHOTOS_DB_IMPORT_SESSIONS = 17
KEYWORDS = [
"Kids",
"wedding",
"flowers",
"England",
"London",
"London 2018",
"St. James's Park",
"UK",
"United Kingdom",
"foo/bar",
"Travel",
"Maria",
"Drink",
"Val d'Isère",
"Wine",
"Wine Bottle",
"Food",
"Furniture",
"Pizza",
"Table",
"Cloudy",
"Cord",
"Outdoor",
"Sky",
"Sunset Sunrise",
]
# Photos 5 includes blank person for detected face
PERSONS = ["Katie", "Suzy", "Maria", _UNKNOWN_PERSON]
ALBUMS = [
"2018-10 - Sponsion, Museum, Frühstück, Römermuseum",
"2019-10/11 Paris Clermont",
"AlbumInFolder",
"EmptyAlbum",
"I have a deleted twin", # there's an empty album with same name that has been deleted
"Multi Keyword",
"Pumpkin Farm",
"Raw",
"Sorted Manual",
"Sorted Newest First",
"Sorted Oldest First",
"Sorted Title",
"Test Album", # there are 2 albums named "Test Album" for testing duplicate album names
]
KEYWORDS_DICT = {
"Drink": 2,
"England": 1,
"Kids": 4,
"London 2018": 1,
"London": 1,
"Maria": 1,
"St. James's Park": 1,
"Travel": 2,
"UK": 1,
"United Kingdom": 1,
"Val d'Isère": 2,
"Wine Bottle": 2,
"Wine": 2,
"flowers": 1,
"foo/bar": 1,
"wedding": 3,
"Food": 2,
"Furniture": 2,
"Pizza": 2,
"Table": 2,
"Cloudy": 2,
"Cord": 2,
"Outdoor": 2,
"Sky": 2,
"Sunset Sunrise": 2,
}
PERSONS_DICT = {"Katie": 3, "Suzy": 2, "Maria": 2, _UNKNOWN_PERSON: 1}
ALBUM_DICT = {
"2018-10 - Sponsion, Museum, Frühstück, Römermuseum": 1,
"2019-10/11 Paris Clermont": 1,
"AlbumInFolder": 2,
"EmptyAlbum": 0,
"I have a deleted twin": 1,
"Multi Keyword": 2,
"Pumpkin Farm": 3,
"Raw": 4,
"Sorted Manual": 3,
"Sorted Newest First": 3,
"Sorted Oldest First": 3,
"Sorted Title": 3,
"Test Album": 2,
} # Note: there are 2 albums named "Test Album" for testing duplicate album names
UUID_DICT = {
"missing": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"favorite": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"not_favorite": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"hidden": "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C",
"not_hidden": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"has_adjustments": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"adjustments_info": "7783E8E6-9CAC-40F3-BE22-81FB7051C266",
"no_adjustments": "D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068",
"location": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"no_location": "6191423D-8DB8-4D4C-92BE-9BBBA308AAC4",
"external_edit": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"no_external_edit": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"export": "D79B8D77-BFFC-460B-9312-034F2877D35B", # "Pumkins2.jpg"
"export_tif": "8846E3E6-8AC8-4857-8448-E3D025784410",
"in_album": "D79B8D77-BFFC-460B-9312-034F2877D35B", # "Pumkins2.jpg"
"date_invalid": "8846E3E6-8AC8-4857-8448-E3D025784410",
"intrash": "71E3E212-00EB-430D-8A63-5E294B268554",
"not_intrash": "DC99FBDD-7A52-4100-A5BB-344131646C30",
"intrash_person_keywords": "6FD38366-3BF2-407D-81FE-7153EB6125B6",
"import_session": "8846E3E6-8AC8-4857-8448-E3D025784410",
"movie": "D1359D09-1373-4F3B-B0E3-1A4DE573E4A3",
"description_newlines": "7F74DD34-5920-4DA3-B284-479887A34F66",
"no_duplicates": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"multi_query_1": "D79B8D77-BFFC-460B-9312-034F2877D35B",
"multi_query_2": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
}
UUID_DICT_LOCAL = {
"not_visible": "4A836160-51B2-4E32-907D-ECDDB2CEC657", # IMG_9815.JPG
"burst": "9A5B4CE6-6A9F-4917-95D4-1C98D14FCE4F", # IMG_9812.JPG
"burst_key": "9A5B4CE6-6A9F-4917-95D4-1C98D14FCE4F", # IMG_9812.JPG
"burst_not_key": "4A836160-51B2-4E32-907D-ECDDB2CEC657", # IMG_9815.JPG
"burst_selected": "75154738-83AA-4DCD-A913-632D5D1C0FEE", # IMG_9814.JPG
"burst_not_selected": "89E235DD-B9AC-4E8D-BDA2-986981CA7582", # IMG_9813.JPG
"burst_default": "F5E6BD24-B493-44E9-BDA2-7AD9D2CC8C9D", # IMG_9816.JPG
"burst_not_default": "75154738-83AA-4DCD-A913-632D5D1C0FEE", # IMG_9814.JPG
"live_edited": "54A01B04-16D7-4FDE-8860-19F2A641E433", # IMG_3203.HEIC
"live": "8EC216A2-0032-4934-BD3F-04C6259B3304", # IMG_3259.HEIC
}
UUID_PUMPKIN_FARM = [
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
]
ALBUM_SORT_ORDER = [
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
]
ALBUM_KEY_PHOTO = "D79B8D77-BFFC-460B-9312-034F2877D35B"
UTI_DICT = {
"8846E3E6-8AC8-4857-8448-E3D025784410": "public.tiff",
"7783E8E6-9CAC-40F3-BE22-81FB7051C266": "public.jpeg",
"1EB2B765-0765-43BA-A90C-0D0580E6172C": "public.jpeg",
}
UTI_ORIGINAL_DICT = {
"8846E3E6-8AC8-4857-8448-E3D025784410": "public.tiff",
"7783E8E6-9CAC-40F3-BE22-81FB7051C266": "public.heic",
"1EB2B765-0765-43BA-A90C-0D0580E6172C": "public.jpeg",
}
RawInfo = namedtuple(
"RawInfo",
[
"comment",
"original_filename",
"has_raw",
"israw",
"raw_original",
"uti",
"uti_original",
"uti_raw",
],
)
RAW_DICT = {
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068": RawInfo(
"raw image, no jpeg pair",
"DSC03584.dng",
False,
True,
False,
"com.adobe.raw-image",
"com.adobe.raw-image",
None,
),
"A92D9C26-3A50-4197-9388-CB5F7DB9FA91": RawInfo(
"raw+jpeg, jpeg original",
"IMG_1994.JPG",
True,
False,
False,
"public.jpeg",
"public.jpeg",
"com.canon.cr2-raw-image",
),
"4D521201-92AC-43E5-8F7C-59BC41C37A96": RawInfo(
"raw+jpeg, raw original",
"IMG_1997.JPG",
True,
False,
True,
"public.jpeg",
"public.jpeg",
"com.canon.cr2-raw-image",
),
"E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51": RawInfo(
"jpeg, no raw",
"wedding.jpg",
False,
False,
False,
"public.jpeg",
"public.jpeg",
None,
),
}
ORIGINAL_FILENAME_DICT = {
"uuid": "D79B8D77-BFFC-460B-9312-034F2877D35B",
"filename": "D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg",
"original_filename": "Pumkins2.jpg",
}
UUID_IS_REFERENCE = "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C"
UUID_NOT_REFERENCE = "F12384F6-CD17-4151-ACBA-AE0E3688539E"
UUID_DUPLICATE = ""
UUID_DETECTED_TEXT = {
"E2078879-A29C-4D6F-BACB-E3BBE6C3EB91": "osxphotos",
"A92D9C26-3A50-4197-9388-CB5F7DB9FA91": None,
}
@pytest.fixture(scope="module")
def photosdb():
return osxphotos.PhotosDB(dbfile=PHOTOS_DB)
@pytest.fixture(scope="module")
def photosdb_local():
return osxphotos.PhotosDB(dbfile=PHOTOS_DB_LOCAL)
def test_init1():
# test named argument
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
assert isinstance(photosdb, osxphotos.PhotosDB)
def test_init2():
# test positional argument
photosdb = osxphotos.PhotosDB(PHOTOS_DB)
assert isinstance(photosdb, osxphotos.PhotosDB)
def test_init3():
# test positional and named argument (raises exception)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(PHOTOS_DB, dbfile=PHOTOS_DB)
def test_init4():
# test invalid db
(bad_db, bad_db_name) = tempfile.mkstemp(suffix=".db", prefix="osxphotos-")
os.close(bad_db)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(bad_db_name)
with pytest.raises(Exception):
assert osxphotos.PhotosDB(dbfile=bad_db_name)
try:
os.remove(bad_db_name)
except:
pass
def test_init5(mocker):
# test failed get_last_library_path
def bad_library():
return None
# get_last_library actually in utils but need to patch it in photosdb because it's imported into photosdb
# because of the layout of photosdb/ need to patch it this way...don't really understand why, but it works
mocker.patch("osxphotos.photosdb.photosdb.get_last_library_path", new=bad_library)
with pytest.raises(Exception):
assert osxphotos.PhotosDB()
def test_db_len(photosdb):
# assert photosdb.db_version in osxphotos._TESTED_DB_VERSIONS
assert len(photosdb) == PHOTOS_DB_LEN
def test_db_version(photosdb):
# assert photosdb.db_version in osxphotos._TESTED_DB_VERSIONS
assert photosdb.db_version == "6000"
def test_persons(photosdb):
assert "Katie" in photosdb.persons
assert Counter(PERSONS) == Counter(photosdb.persons)
def test_keywords(photosdb):
assert "wedding" in photosdb.keywords
assert Counter(KEYWORDS) == Counter(photosdb.keywords)
def test_album_names(photosdb):
assert "Pumpkin Farm" in photosdb.albums
assert Counter(ALBUMS) == Counter(photosdb.albums)
def test_keywords_dict(photosdb):
keywords = photosdb.keywords_as_dict
assert keywords["wedding"] == 3
assert keywords == KEYWORDS_DICT
def test_persons_as_dict(photosdb):
persons = photosdb.persons_as_dict
assert persons["Maria"] == 2
assert persons == PERSONS_DICT
def test_albums_as_dict(photosdb):
albums = photosdb.albums_as_dict
assert albums["Pumpkin Farm"] == 3
assert albums == ALBUM_DICT
def test_album_sort_order(photosdb):
album = [a for a in photosdb.album_info if a.title == "Pumpkin Farm"][0]
photos = album.photos
uuids = [p.uuid for p in photos]
assert uuids == ALBUM_SORT_ORDER
def test_album_empty_album(photosdb):
album = [a for a in photosdb.album_info if a.title == "EmptyAlbum"][0]
photos = album.photos
assert photos == []
def test_attributes(photosdb):
photos = photosdb.photos(uuid=["D79B8D77-BFFC-460B-9312-034F2877D35B"])
assert len(photos) == 1
p = photos[0]
assert p.keywords == ["Kids"]
assert p.original_filename == "Pumkins2.jpg"
assert p.filename == "D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg"
assert p.date == datetime.datetime(
2018, 9, 28, 16, 7, 7, 0, datetime.timezone(datetime.timedelta(seconds=-14400))
)
assert p.date_added == datetime.datetime(
2019,
7,
27,
9,
16,
49,
778432,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
)
assert p.description == "Girl holding pumpkin"
assert p.title == "I found one!"
assert sorted(p.albums) == ["Multi Keyword", "Pumpkin Farm", "Test Album"]
assert p.persons == ["Katie"]
assert p.path.endswith(
"tests/Test-10.15.7.photoslibrary/originals/D/D79B8D77-BFFC-460B-9312-034F2877D35B.jpeg"
)
assert p.ismissing == False
def test_attributes_2(photosdb):
"""Test attributes including height, width, etc"""
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert sorted(p.keywords) == ["Maria", "wedding"]
assert p.original_filename == "wedding.jpg"
assert p.filename == "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51.jpeg"
assert p.date == datetime.datetime(
2019,
4,
15,
14,
40,
24,
86000,
datetime.timezone(datetime.timedelta(seconds=-14400)),
)
assert p.description == "Bride Wedding day"
assert p.title is None
assert sorted(p.albums) == [
"AlbumInFolder",
"I have a deleted twin",
"Multi Keyword",
]
assert p.persons == ["Maria"]
assert p.path.endswith(
"tests/Test-10.15.7.photoslibrary/originals/E/E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51.jpeg"
)
assert not p.ismissing
assert p.hasadjustments
assert p.height == 1325
assert p.width == 1526
assert p.original_height == 1367
assert p.original_width == 2048
assert p.orientation == 1
assert p.original_orientation == 1
assert p.original_filesize == 460483
def test_missing(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert len(photos) == 1
p = photos[0]
assert p.path is None
assert p.ismissing == True
def test_favorite(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"]])
assert len(photos) == 1
p = photos[0]
assert p.favorite == True
def test_not_favorite(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_favorite"]])
assert len(photos) == 1
p = photos[0]
assert p.favorite == False
def test_hidden(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.hidden == True
def test_not_hidden(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.hidden == False
def test_visible(photosdb):
"""test visible"""
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert p.visible
def test_not_burst(photosdb):
"""test not burst"""
photos = photosdb.photos(uuid=[UUID_DICT["not_hidden"]])
assert len(photos) == 1
p = photos[0]
assert not p.burst
def test_location_1(photosdb):
# test photo with lat/lon info
photos = photosdb.photos(uuid=[UUID_DICT["location"]])
assert len(photos) == 1
p = photos[0]
lat, lon = p.location
assert lat == pytest.approx(51.50357167)
assert lon == pytest.approx(-0.1318055)
def test_location_2(photosdb):
# test photo with no location info
photos = photosdb.photos(uuid=[UUID_DICT["no_location"]])
assert len(photos) == 1
p = photos[0]
lat, lon = p.location
assert lat is None
assert lon is None
def test_hasadjustments1(photosdb):
# test hasadjustments == True
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert p.hasadjustments == True
def test_hasadjustments2(photosdb):
# test hasadjustments == False
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
assert p.hasadjustments == False
def test_external_edit1(photosdb):
# test image has been edited in external editor
photos = photosdb.photos(uuid=[UUID_DICT["external_edit"]])
assert len(photos) == 1
p = photos[0]
assert p.external_edit == True
def test_external_edit2(photosdb):
# test image has not been edited in external editor
photos = photosdb.photos(uuid=[UUID_DICT["no_external_edit"]])
assert len(photos) == 1
p = photos[0]
assert p.external_edit == False
def test_path_edited1(photosdb):
# test a valid edited path
photos = photosdb.photos(uuid=["E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51"])
assert len(photos) == 1
p = photos[0]
path = p.path_edited
assert path.endswith(
"resources/renders/E/E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51_1_201_a.jpeg"
)
assert os.path.exists(path)
def test_path_edited2(photosdb):
# test an invalid edited path
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
path = p.path_edited
assert path is None
def test_path_derivatives(photosdb):
# test an path_derivatives
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
assert len(photos) == 1
p = photos[0]
path = p.path_derivatives
derivs = [
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068_1_100_o.jpeg",
"D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068_1_105_c.jpeg",
]
for i, p in enumerate(path):
assert p.endswith(derivs[i])
def test_ismovie(photosdb):
# test ismovie == True
photos = photosdb.photos(uuid=[UUID_DICT["movie"]])
p = photos[0]
assert p.ismovie
def test_not_ismovie(photosdb):
# test ismovie == False
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
p = photos[0]
assert not p.ismovie
def test_count(photosdb):
photos = photosdb.photos()
assert len(photos) == PHOTOS_NOT_IN_TRASH_LEN
def test_photos_intrash_1(photosdb):
"""test PhotosDB.photos(intrash=True)"""
photos = photosdb.photos(intrash=True)
assert len(photos) == PHOTOS_IN_TRASH_LEN
def test_photos_intrash_2(photosdb):
"""test PhotosDB.photos(intrash=True)"""
photos = photosdb.photos(intrash=True)
for p in photos:
assert p.intrash
def test_photos_intrash_3(photosdb):
"""test PhotosDB.photos(intrash=False)"""
photos = photosdb.photos(intrash=False)
for p in photos:
assert not p.intrash
def test_photoinfo_intrash_1(photosdb):
"""Test PhotoInfo.intrash"""
p = photosdb.photos(uuid=[UUID_DICT["intrash"]], intrash=True)[0]
assert p.intrash
assert p.date_trashed.isoformat() == "2120-06-10T11:24:47.685857-05:00"
def test_photoinfo_intrash_2(photosdb):
"""Test PhotoInfo.intrash and intrash=default"""
p = photosdb.photos(uuid=[UUID_DICT["intrash"]])
assert not p
def test_photoinfo_intrash_3(photosdb):
"""Test PhotoInfo.intrash and photo has keyword and person"""
p = photosdb.photos(uuid=[UUID_DICT["intrash_person_keywords"]], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_intrash_4(photosdb):
"""Test PhotoInfo.intrash and photo has keyword and person"""
p = photosdb.photos(persons=["Maria"], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_intrash_5(photosdb):
"""Test PhotoInfo.intrash and photo has keyword and person"""
p = photosdb.photos(keywords=["wedding"], intrash=True)[0]
assert p.intrash
assert "Maria" in p.persons
assert "wedding" in p.keywords
def test_photoinfo_not_intrash(photosdb):
"""Test PhotoInfo.intrash"""
p = photosdb.photos(uuid=[UUID_DICT["not_intrash"]])[0]
assert not p.intrash
assert p.date_trashed is None
def test_keyword_2(photosdb):
photos = photosdb.photos(keywords=["wedding"])
assert len(photos) == 2 # won't show the one in the trash
def test_keyword_not_in_album(photosdb):
# find all photos with keyword "Kids" not in the album "Pumpkin Farm"
photos1 = photosdb.photos(albums=["Pumpkin Farm"])
photos2 = photosdb.photos(keywords=["Kids"])
photos3 = [p for p in photos2 if p not in photos1]
assert len(photos3) == 1
assert photos3[0].uuid == "A1DD1F98-2ECD-431F-9AC9-5AFEFE2D3A5C"
def test_album_folder_name(photosdb):
"""Test query with album name same as a folder name"""
photos = photosdb.photos(albums=["Pumpkin Farm"])
assert sorted(p.uuid for p in photos) == sorted(UUID_PUMPKIN_FARM)
def test_multi_person(photosdb):
photos = photosdb.photos(persons=["Katie", "Suzy"])
assert len(photos) == 3
def test_get_db_path(photosdb):
db_path = photosdb.db_path
assert db_path.endswith(PHOTOS_DB_PATH)
def test_get_library_path(photosdb):
lib_path = photosdb.library_path
assert lib_path.endswith(PHOTOS_LIBRARY_PATH)
def test_get_db_connection(photosdb):
"""Test PhotosDB.get_db_connection"""
conn, cursor = photosdb.get_db_connection()
assert isinstance(conn, sqlite3.Connection)
assert isinstance(cursor, sqlite3.Cursor)
results = conn.execute(
"SELECT ZUUID FROM ZGENERICASSET WHERE ZFAVORITE = 1;"
).fetchall()
assert len(results) == 1
assert results[0][0] == "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51" # uuid
conn.close()
def test_export_1(photosdb):
# test basic export
# get an unedited image and export it using default filename
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
def test_export_2(photosdb):
# test export with user provided filename
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
def test_export_3(photosdb):
# test file already exists and test increment=True (default)
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
filename2 = pathlib.Path(filename)
filename2 = f"{filename2.stem} (1){filename2.suffix}"
expected_dest_2 = os.path.join(dest, filename2)
got_dest = photos[0].export(dest)[0]
got_dest_2 = photos[0].export(dest)[0]
assert got_dest_2 == expected_dest_2
assert os.path.isfile(got_dest_2)
def test_export_4(photosdb):
# test user supplied file already exists and test increment=True (default)
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.jpg"
filename2 = f"osxphotos-export-2-test-{timestamp} (1).jpg"
expected_dest_2 = os.path.join(dest, filename2)
got_dest = photos[0].export(dest, filename)[0]
got_dest_2 = photos[0].export(dest, filename)[0]
assert got_dest_2 == expected_dest_2
assert os.path.isfile(got_dest_2)
def test_export_5(photosdb):
# test file already exists and test increment=True (default)
# and overwrite = True
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].original_filename
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
got_dest_2 = photos[0].export(dest, overwrite=True)[0]
assert got_dest_2 == got_dest
assert got_dest_2 == expected_dest
assert os.path.isfile(got_dest_2)
def test_export_6(photosdb):
# test user supplied file already exists and test increment=True (default)
# and overwrite = True
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
got_dest_2 = photos[0].export(dest, filename, overwrite=True)[0]
assert got_dest_2 == got_dest
assert got_dest_2 == expected_dest
assert os.path.isfile(got_dest_2)
def test_export_7(photosdb):
# test file already exists and test increment=False (not default), overwrite=False (default)
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].filename
got_dest = photos[0].export(dest)[0]
with pytest.raises(Exception) as e:
# try to export again with increment = False
assert photos[0].export(dest, increment=False)
assert e.type == type(FileExistsError())
def test_export_8(photosdb):
# try to export missing file
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert photos[0].export(dest) == []
def test_export_9(photosdb):
# try to export edited file that's not edited
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
filename = photos[0].filename
with pytest.raises(Exception) as e:
assert photos[0].export(dest, edited=True)
assert e.type == ValueError
def test_export_10(photosdb):
# try to export edited file that's not edited and name provided
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["no_adjustments"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
with pytest.raises(Exception) as e:
assert photos[0].export(dest, filename, edited=True)
assert e.type == ValueError
def test_export_11(photosdb):
# export edited file with name provided
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
timestamp = time.time()
filename = f"osxphotos-export-test-{timestamp}.jpg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename, edited=True)[0]
assert got_dest == expected_dest
def test_export_12(photosdb):
# export edited file with default name
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["has_adjustments"]])
edited_name = pathlib.Path(photos[0].path_edited).name
edited_suffix = pathlib.Path(edited_name).suffix
filename = (
pathlib.Path(photos[0].original_filename).stem + "_edited" + edited_suffix
)
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, edited=True)[0]
assert got_dest == expected_dest
def test_export_13(photosdb):
# export to invalid destination
# should raise exception
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
# create a folder that doesn't exist
i = 0
while os.path.isdir(dest):
dest = os.path.join(dest, str(i))
i += 1
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
filename = photos[0].filename
with pytest.raises(Exception) as e:
assert photos[0].export(dest)
assert e.type == type(FileNotFoundError())
def test_export_14(photosdb, caplog):
# test export with user provided filename with different (but valid) extension than source
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export_tif"]])
timestamp = time.time()
filename = f"osxphotos-export-2-test-{timestamp}.tif"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest, filename)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
assert "Invalid destination suffix" not in caplog.text
def test_export_no_original_filename(photosdb):
# test export OK if original filename is null
# issue #267
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["export"]])
# monkey patch original_filename for testing
original_filename = photos[0]._info["originalFilename"]
photos[0]._info["originalFilename"] = None
filename = f"{photos[0].uuid}.jpeg"
expected_dest = os.path.join(dest, filename)
got_dest = photos[0].export(dest)[0]
assert got_dest == expected_dest
assert os.path.isfile(got_dest)
photos[0]._info["originalFilename"] = original_filename
def test_eq():
"""Test equality of two PhotoInfo objects"""
photosdb1 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos1 = photosdb1.photos(uuid=[UUID_DICT["export"]])
photos2 = photosdb2.photos(uuid=[UUID_DICT["export"]])
assert photos1[0] == photos2[0]
def test_eq_2():
"""Test equality of two PhotoInfo objects when one has memoized property"""
photosdb1 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos1 = photosdb1.photos(uuid=[UUID_DICT["in_album"]])
photos2 = photosdb2.photos(uuid=[UUID_DICT["in_album"]])
# memoize a value
albums = photos1[0].albums
assert albums
assert photos1[0] == photos2[0]
def test_not_eq(photosdb):
photos1 = photosdb.photos(uuid=[UUID_DICT["export"]])
photos2 = photosdb.photos(uuid=[UUID_DICT["missing"]])
assert photos1[0] != photos2[0]
def test_photosdb_repr():
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photosdb2 = eval(repr(photosdb))
ignore_keys = ["_tmp_db", "_tempdir", "_tempdir_name", "_db_connection"]
assert {k: v for k, v in photosdb.__dict__.items() if k not in ignore_keys} == {
k: v for k, v in photosdb2.__dict__.items() if k not in ignore_keys
}
def test_photosinfo_repr(photosdb):
photos = photosdb.photos(uuid=[UUID_DICT["favorite"]])
photo = photos[0]
photo2 = eval(repr(photo))
assert {k: str(v).encode("utf-8") for k, v in photo.__dict__.items()} == {
k: str(v).encode("utf-8") for k, v in photo2.__dict__.items()
}
def test_from_to_date(photosdb):
"""test from_date / to_date"""
os.environ["TZ"] = "US/Pacific"
time.tzset()
photos = photosdb.photos(from_date=datetime.datetime(2018, 10, 28))
assert len(photos) == 16
photos = photosdb.photos(to_date=datetime.datetime(2018, 10, 28))
assert len(photos) == 7
photos = photosdb.photos(
from_date=datetime.datetime(2018, 9, 28), to_date=datetime.datetime(2018, 9, 29)
)
assert len(photos) == 4
def test_from_to_date_tz(photosdb):
"""Test from_date / to_date with and without timezone"""
os.environ["TZ"] = "US/Pacific"
time.tzset()
photos = photosdb.photos(
from_date=datetime.datetime(2018, 9, 28, 13, 7, 0),
to_date=datetime.datetime(2018, 9, 28, 13, 9, 0),
)
assert len(photos) == 1
assert photos[0].uuid == "D79B8D77-BFFC-460B-9312-034F2877D35B"
photos = photosdb.photos(
from_date=datetime.datetime(
2018,
9,
28,
16,
7,
0,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
),
to_date=datetime.datetime(
2018,
9,
28,
16,
9,
0,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=72000)),
),
)
assert len(photos) == 1
assert photos[0].uuid == "D79B8D77-BFFC-460B-9312-034F2877D35B"
def test_date_invalid():
"""Test date is invalid"""
# doesn't run correctly with the module-level fixture
from datetime import datetime, timedelta, timezone
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos = photosdb.photos(uuid=[UUID_DICT["date_invalid"]])
assert len(photos) == 1
p = photos[0]
delta = timedelta(seconds=p.tzoffset)
tz = timezone(delta)
assert p.date == datetime(1970, 1, 1).astimezone(tz=tz)
def test_date_modified_invalid(photosdb):
"""Test date modified is invalid"""
photos = photosdb.photos(uuid=[UUID_DICT["date_invalid"]])
assert len(photos) == 1
p = photos[0]
assert p.date_modified is None
def test_import_session_count(photosdb):
"""Test PhotosDB.import_session"""
import_sessions = photosdb.import_info
assert len(import_sessions) == PHOTOS_DB_IMPORT_SESSIONS
def test_import_session_photo(photosdb):
"""Test photo.import_session"""
photo = photosdb.get_photo(UUID_DICT["import_session"])
import_session = photo.import_info
assert import_session.creation_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
729811,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert import_session.start_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
725564,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert import_session.end_date == datetime.datetime(
2020,
6,
6,
7,
15,
24,
725564,
tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200), "PDT"),
)
assert len(import_session.photos) == 1
def test_uti(photosdb):
"""test uti"""
for uuid, uti in UTI_DICT.items():
photo = photosdb.get_photo(uuid)
assert photo.uti == uti
assert photo.uti_original == UTI_ORIGINAL_DICT[uuid]
def test_raw(photosdb):
"""Test various raw properties"""
for uuid, rawinfo in RAW_DICT.items():
photo = photosdb.get_photo(uuid)
assert photo.original_filename == rawinfo.original_filename
assert photo.has_raw == rawinfo.has_raw
assert photo.israw == rawinfo.israw
assert photo.uti == rawinfo.uti
assert photo.uti_original == rawinfo.uti_original
assert photo.uti_raw == rawinfo.uti_raw
def test_verbose(capsys):
"""test verbose output in PhotosDB()"""
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB, verbose=print)
captured = capsys.readouterr()
assert "Processing database" in captured.out
def test_original_filename(photosdb):
"""test original filename"""
uuid = ORIGINAL_FILENAME_DICT["uuid"]
photo = photosdb.get_photo(uuid)
assert photo.original_filename == ORIGINAL_FILENAME_DICT["original_filename"]
assert photo.filename == ORIGINAL_FILENAME_DICT["filename"]
# monkey patch
original_filename = photo._info["originalFilename"]
photo._info["originalFilename"] = None
assert photo.original_filename == ORIGINAL_FILENAME_DICT["filename"]
photo._info["originalFilename"] = original_filename
# The following tests only run on the author's personal library
# They test things difficult to test in the test libraries
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_not_visible_burst(photosdb_local):
"""test not visible and burst (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["not_visible"])
assert not photo.visible
assert photo.burst
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_visible_burst(photosdb_local):
"""test not visible and burst (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst"])
assert photo.visible
assert photo.burst
assert len(photo.burst_photos) == 4
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_key(photosdb_local):
"""test burst_key"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_key"])
assert photo.burst_key
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_key"])
assert not photo.burst_key
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_selected(photosdb_local):
"""test burst_selected"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_selected"])
assert photo.burst_selected
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_selected"])
assert not photo.burst_selected
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_burst_default_pic(photosdb_local):
"""test burst_default_pick"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_default"])
assert photo.burst_default_pick
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["burst_not_default"])
assert not photo.burst_default_pick
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_path_edited_live_photo(photosdb_local):
"""test path_edited_live_photo (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["live_edited"])
assert photo.path_edited_live_photo is not None
@pytest.mark.skipif(SKIP_TEST, reason="Skip if not running on author's local machine.")
def test_path_edited_live_photo_not_edited(photosdb_local):
"""test path_edited_live_photo for a live photo that's not edited (needs image from local library)"""
photo = photosdb_local.get_photo(UUID_DICT_LOCAL["live"])
assert photo.path_edited_live_photo is None
def test_is_reference(photosdb):
"""test isreference"""
photo = photosdb.get_photo(UUID_IS_REFERENCE)
assert photo.isreference
photo = photosdb.get_photo(UUID_NOT_REFERENCE)
assert not photo.isreference
def test_adjustments(photosdb):
"""test adjustments/AdjustmentsInfo"""
from osxphotos.adjustmentsinfo import AdjustmentsInfo
photo = photosdb.get_photo(UUID_DICT["adjustments_info"])
adjustments = photo.adjustments
assert isinstance(adjustments, AdjustmentsInfo)
assert adjustments.asdict() == {
"data": b"mW[\xb7\xa2:\xb3\xfd/\xbe\xda\xa3\x17((\xf4\x18\xdf\x03H\xc2E\xb9%\\\xc4\xb3\xce\x03\x02\x12.\x82\n\x1at\x8f\xfd\xdf\xbf\xb8\xba\xfb\xec\xdec\x1c\xde\x92\xaa\xcc\x9aU\t\xa9\x99\xbff\x8f\xe26T}gv\xa7~\xf6\xe3\xaf\xd9\xf1^\xb5\xb9s?\x1f\x8b\xdb\xec\xc7\x8c\x97\xf5\xf5r\xf6m\x96^.\xd1O\xbf\xf7\xe4\x8a\xff\xce}\xe7\x17\x1c3\x0c\x19)\xce)*\x1e\xd5O#\xffmvi\xd3\xf1\xd4\xdf\xce\xcc\xd3\xc5\xfb\xd9\xdf\xdff\xe7bL\xf3tL\xdf\xf8\xe7t\x18\x8b[\\\xe5#\x99\xfdXr\x0b\x81-\xa8.E[u\xc5?\x11\xd8\xba\xef\x02C\xff\xe9l\x14UI\xc6\xd9\x0f\x81[.\xbe\xcd\xfa[Utc:\xfe\x0c\xc7\xd0\xdf\xb1\xd2\xf1\xff\x163\x06i^\xdf\x87\xf1\xcc\xdc\x86\xd9\x8f\xff\xf9\xeb\xff\xf1(\xba\xf4\xd8\x16\xf9\xec\xc7x\xbb\x17,\x8bb\x1c\xab\xae\x1c\xde\x04\xfb\xd3\x89\rw}\x96\xb6\xbb\x9fq\xb9o\xbf&\r6n\xdfs\xc3\xd7d\xd5]\xee\xe3o\x9f\xefKn\xbd\x14\xc4\xe5\x8a\x93\x16\xc2ZX,\xe4\xdf\xab\xc0t\xe9\x87\xfb\xad\xf8\x03Hm\xd3\xac\xf1\xfa\xaa\xfb\x13]\xbd\xbd\xa1\xbab\xf8\x89>\xbcs\x1c\xc6*\xfbbu\xe1\x16\xef \x1c\xb7\x96\x84%\xbf\\/DA\xe6xy\xc5\xadY\xfdD\xee\xcb&K\xdcR^\xf0\xe2JZ-\xd6\x82\xc8I\xac\x12\xf7\xb1\x8f\xd2\xf6\xfe\x0e\xfe}!\x89+\xee\x8f\x8f\x15\xf3\xf8'\x11\x86\xbe\xe4\xe5\xf5J\xe4Y\xa5EYZ\xf0k\xf1\xdbl\xec\xbb\xb4EiW\x16\xbf\x82\x08\xe2j\xcd\t\xb2\xb4\\\x8bk\xf1\xbd}\x0b\xf1\xcb\xb2\x14\x17\xb2\xc0\xf3\xeb\x95\xb0\xe6DIZ,\x99I\x96\xde&Q\xfe\xf7\xc7\x88}\x95\xd1N/l\xb3at\xd9\xe6\xdc\xe5\x88\xa3\xc6\x8f\x15q\x8f\xf8\xc6\x89U'\x860\xb9\xda\x1b\xf7b\xc1\xf2\x18\xab\xe7;\xe4\x13Ro\x82\xb5%\x83\xaa\xe1\x0e\xc4\x8c-\xd8\xf2\x9e\x19\xe9m\x9c\xf2\xf9\x18\xc7r\x9a\xb5\xfcb\xbfl\xb5\xcf\x0fbQ\xad\r\xbd\xa8\xc9\x13\x0bf^\x84\x94\t\xaa\x073\x06$\xd1#\x07\xc4\xaa\xb5\x07m\x92\xc4\x1b\xdd\xb4\xd2\xd6I\xa6G\t\x97Jy\x0co4\xcc\xc5\x88\x8f\x0eC\xb4\xe0\x0fG\xfe2\xed\x8d\xe8T\xa8gM\xc3\x8d\x13Q1fD\xa2H\x831\xe2s#\xe2\xc8\x1e\xc3\x9c\xe1\xb6\x0c\xb7\t\xe2\xe6fz\xe9\xf0\xf8\xfc\x08\xd7\xa2\xc6\x0f\xdeAEcx>\x84)\x8c\xae\xd1\x83\x1b\x86Mm\xc5\xa7)k[Q\x80Op\xc0\xaa\xca\x80\x92c\xa46\x19\x08\x84\xd0\x00\xf9\x1eG\xc4b\x80\x07\xdc\xb6\xdb\x98\x1b\xb3\x00\xf2\xf6\xbe\x8aJt\x02\xce\xa6\x94[\xb7C\xf8\x14\xa1>\xd2/Q\xf3,??\xb6\\\x98!\xd2p\xa1\xd7\xbb\xa6j\x9d\xd0\x9c1\xa3\x9c\xa3\xbd\xec\xd4P\xe5\x04\xc3\xdf\x80\x97m\xdc\x8c\xc7/\xc0F,\x83\x05\xf4\x92\x92\xd3\xb5\xd8\xe7\x1fZ\xf4\xf9\x11\x19\xf6\xa2\xdc\xc0!\x12\xac\r?\xc5%L\xa5\x90\x12\x13C\xd5\x0c\xa3\t\xed\xdd\xb8\xc7\x11\xaa\xb6x\xab\x9aI\xf3\x8ba\xc3\xf6\x8e\x9f\x18 \x7f\xfa\x02$\xacV~\xe8\xc4\xad\xb5rt;\xcc\x91\xca;\xb2\xb2\xa7\x93\xdb\x81\xa7\x1f\x00b#\xad\xc9\xf6\x08e!\x8c\xca\x18?\xbd\xc2J\xb3\xea\x10^\xaa/\x82\xdc\x9b \xc3\x0b\x7f\xe1\xb5\xb0\xd1\xe2\xc4QK\xf1\x1ey\x02r\xc9\xd6\x02HA\x00\x99\x18t~\x98\xf3\xa2\x94$!\x8a&'\x82\x93\xbf\xe7P\xbe\x87\xe7\xb2\xfd\xfch\x96\x9f\x1f\xf8!\xff\xc30\xe4\x8b\xdf\x88\xe1\xdevsU\x1c\xbdk\xc96\x8b\xce\xe5mB\xaf=l\xb9\xb8s\x8e7^\\\xb2cD\xae\xefc\xd9\xf6\xfb\x18E7k\xa4\x97X\x9b\x9f\xf0]Y\xed\xc1\xa5\xfb\xaa!\xf7\xab\x86<l\xbde\xdf\x1fp\x1e\x9a\xb1\x99\x14jG\xf4s\x9f\x132\xef\x8d.\xa9m\x1c\x1fL\xbd\xd9?T\xb0\xc3\x9f\x1f\xd6\x96\x01\x1c\xf5\xa6\x8coj\xb1E)\xb1W\xcd\xeb\x10\xe4\xb2\xcbq\x9f\x1fy0w|\x9e7\x82p'\x04\xe5\xa4\x10\xedI\x91\x8b@\x0c\xe2\x81\xac'\xbf5_\xc3\x0b\x05H\xb79\xfb\xee\xa1q\x05\xfa\x88\xa56\x15\x10R\x0f(\x92\xab\xbd|\x84\xc8\x0e\x82\x81\xe2;\xd9J\xc6\xc5?f\x13}\xc0'\xf5\xfcR8i1\x87_\xca<\xd5(\xf5\x81\x1a>\xb5)\xb9x5\xef\xfaP\x91\x02\xed\x00\x1c\xa7\xbf6\xe1\x93B\xc8!\x8d2<\x02|\x80\x8c\x1e\xc4\nN\xc8Xou\xfb\xe2W\xc9\xc2|\xf9\xc7\xb4\x94oo\x1c\x9d\nX#\xbd\xa3Q\x0eCl\x16\xce\xb3a\xd9\xc8\x9b0\x18\xed\xddR\xb4\x1f\xaf+\x82j\x883\x04\xcf\xf0\x98\xc5t\xf2}\xfd\xe4xm\xab\xd6a\x1c\xde\x0e\xf8\xd0\x99\xe7KtT\xa31\xea\x14'\xf3\xb9\x9d\x86\xedt\x8b\xc1`\xe2\xbe\xb6kE\xb2_bV@Q4\xba\xa6|Vk\xdf\x16{O#\xd3\x11l\xa8g\xa2tm\xb8M\xb8\xa6\x82\xa9\xf9\x99WD\x8el\xb8y\x9c\xc1v\x02\x9d\xe2\xea>54\xc4\x9d\xed']\xee\xb4\xecfW\r\xb55n(\xf4\x8d\x9d\xec\xe9\xe3\xa4\xae6\xd66\xaa\x16j\x04\xe1\xa8`\xaa|~\x9c\xb4K\xef\x18>\x97\xb3\x04=\xb1\\\x9c4?q6H\xe6\xad\x8b\xe9\xe5\x94_j\x88\x01\xe3Ar\xb8\x90\xf3kG\xd9\xd5\xc3\xdd\xc5D\xda\xdf\x9d\xbal\nEOh\xd9U\xaf\xb3\xc1\x9b\x87\x0b\xe9pp:\xf7s\xfa\xf9!k~co\xc9\xee\xbc=\xd9\xaeD\x17\x08t\t\xceU\x93U\x88\xc3\xa6B\x91\xa5\r\x12\xae\xc7\xad\x0b\x92\x97\xaf\xeb\xca\xc1TV\xb5\x9en\"\xc1\xce\xab\xca\x9ao\xe5vs\xf3\xe5\xd1\x08\xedC\x80^km\x0e\x1c\x80\xfc\x00\x9at\x7fUwW\xb0\xf5#\x1d5\xa5\xb1\xf1s\x0bq\x9d\x86\x04g\xfbl\xc16,/h\xe3K\x9a\x00\xcf\x04^\xdd\x83\xec\xd4\x15\xfb[\xf5CHe\xd8yZ*\xf9W\xb5s\\;C\x13\xa2\x9d^\xdby\x82\xe8IG}\xa8W`\xb0j\xe5\xe6\xe0\x86\xb74\xff\xb4+\xb9-$\xb4\xddm\x86\xa7\xf6R<XJN\xd8\xb7\xe7J\xbf\xdb\xbb\x8bTw\x9bMnm\xedC\xab\x82\x01\xa8\x12\xf6\xc8\xba6p\xc6\x9aj\xf2\xb04\xb3\xde=\xc1k\xfb\xa2/\xa49\xd0\x0e\xfd\t\xa9\xe0\xc5\xae\x86\xbdNh\xb7\x05\x19\x06\x08\xc8 \xc8p\xcd\xeb^jEq3U\xae\xd1\xd3\xa2\x9f\x9a\x0b\xab\x93\xab\x95,\xaf\xa7];XX\xdb5\xf7\xf4jen\x06!\xf1\x83\x8b\xebE@\xc4\x94\xdf\x00\x9f\xdb\x9b\x1b\xfbaa\xe1\x9a\x92\xc8\xb1Z*\xe4H>oa\xd6\x1c\x9e\x88\xd7\x0f\\\xe0=]b\xc0\xc4\x06T:\x00\xd5\xce-l\x9e\x8d\xba'^\xe5(\xb6&\r\xdef\xe0vA\xd38%w\xd4\xd4\xcc\x86\xa8<\x1b\xb8\x19\xdc\xe7+\xb7l\xa5H7\x9f\x1f\x9e)\x84\xdd\x15G\x9e\xb1\x14B\xa2:\x1bm\x11z\x16\x95\xaf`\x1a\x12\xf3iwf\x15\x12\x0b\xfbw\xebE\x9f\xbe\x16iv\xc0\xdd]FL#\x99m\x12?d'\xa9\xf3\x02K\xd8\tM\xfd\xa8\xf2\x87\xed\xf4\xf7\xb6zB\xeb<\x90+\x19\x1f\xe0U\x1e\xdb\xa9-\xad\x8e\xbb\xd4\x15\xb8\x9aUYoqx\xb3\x96\xc3<\xa8y\xc7i\xc2\x97_\x8d\x0b\xad51+\x8c\x03\xf7\x8a\xbd\xa1R\xae\x83\xe1\xd4\xd4\x05\xeb\x10FY\x9dqT\xeen\xef\x8bw\x15\x80[\xe6e\xd3\xb8\x84:%5Y,\xe1\xb6\xef\xec*\xa7\x10daG\xa5\x07\xd8J\xfe\x86\xa8\x9e\x9e\xf5\x8e:\xd9Xk@\x98*B\xc8\xda\\\xecM25Rp~ME\x0ey\xe5\x18\xa1\xf6\xa2\x9f\x95\xb4F\xb06\xac&\xca\xa6'6;.\xa8H\xfe\x04\xad\x8dw\xea\x1e[n\x92\xac\x91\x12\x03\x7f@\x83\xcf\x19\x10%\xaeG\xec\x03\x14\xc2C\xa9\xa6\x8a\xde\xd2r\xc2\x81\x06\xd3&&\x9b\xb8\x85\x87d\x9f\x93C\xa3\t\xa6\xb3\xf7\xe5J[\x8c\xf9\x92\x8a\xaca\xf6N\xe4\x7f~\xa0\x9d\x9c\xe1\xfbt2!l\xfcM)\xed\xd9\x11\x0fu\x94\xabz$\x9c\x86\x89\xdca\x96\x8cu\xa5%\x86I\x8f\x15\xa9\x00\x10}tDQ\x0b\r\x13\x87>\x1f\x00Xz\xa9\xb2\xc84A\xc1\x13\x95\x1b\xd8\xd3KG\x9e;C\xe7\xc8\xb1\x94\x13\x8d\x96\xac\xd7r\x9e\x1e\xf5\xa4\xc4\xee\x1a\x8a\xc2\xbe$\x0f\x15\xf6\xe1\xfeL\x12Y7)k\xe3\x0e\x01K\xc1\xb3\xd1\x96\x80\xa2q'*\xde\xb5'\x13\t\x04\xae\xa04\xdc\xb8MLv\x17\x9f\xff\xfcx\xee\xe6\xc6\xb5t7\ngh\xe1p\x1d\xab\xfb\xd3b=kD\x16\x81\xfb>H'\xa7\xd78\x01\x17\xaa\xab\x02\xd1\x0e\x11\x02s\x80\x05\x8f\xdd\xa6;v\xabF\x90\xca>\xb8\x98~J\x9e\x0bm! \x7f\x82\x0b\xe0\x0c~\xad\x08\xecW\x0c]\xaf2\xac\xad\xe9G)\x95\xae\xe0\x9c\xb0}\x96(\xe8B/\xa4\xbc\x08\xf6\xe10 H@\x04\xfc\x145Gv\xd7\xd8\x9a2?\x82\xbd\x106\xc8\xe2uI\xc9\xee\xbe|\xd2T!H\xe9<c\xb7\xa7\xa3\"G\xd5G;{a\xd70\x85$\x08\x118\x81\xa8\xd97\xea$\x81\xde\x0f:\xe4\xdc\xb5\xaew\xacR\xa0\xa0\x1d\x9c\x04\xc55\x90l\x9c<\xbd (\xa0uW\x16\xa5\xa6\x84N\xed\xcfc\xed98*\xe5,\xa3m\x10xv\x08\xae\x92\x82\xado\xc0A\xf1v\xbe\xbc\xd5\xf7\xc0c\xdd\x12k\xcb\xd2;\x95\\\xa9-\xfb\xff0\xe9\xdf\xbe\x05\xb8\xf2\xa7|]\xfeK\xbcr\x1c\x93\x9e\x94Tc\xf1K\xbe\xf2o\xf9\xfa\x87\xfc}\xbfD\xf8\x9f\xc2\xf8\x1fI\xfcK\"\x7f\x9b\x11\xa6?\xb7\xc5\xf3m\x96\xb8\xd5R`\xb2\x9d\xe9vQ^I\xd2\xfa\xef\xdf\x8a|\xd3w\xe3\x8d=A\xfe\x10\xe9\x98\xa4yO\xdf\n\x9dyU9{bT\xa7\xea\xeb\xa9\x84\xcf\xe9m\x0c\xfa\xae\x98\xfd\xfd\xbf\x7f\xff\x17",
"editor": "com.apple.Photos",
"format_id": "com.apple.photo",
"base_version": 0,
"format_version": "1.5",
"adjustments": [
{
"formatVersion": 1,
"enabled": True,
"settings": {
"offsetLocalLight": 0,
"offsetHighlights": 0,
"inputLight": 0.3073453608247423,
"offsetExposure": 0,
"offsetBlackPoint": 0,
"offsetBrightness": 0,
"statistics": {
"p02": 0.00784313725490196,
"p50": 0.09803921568627451,
"autoValue": 0.2856,
"blackPoint": 0.0031976514035982175,
"tonalRange": 0.09845670498375754,
"p25": 0.03529411764705882,
"p98": 0.6,
"lightMap": "FVpKd0pbSVkQWA5XR1kNWBNWFFYqMCOpJFgbWBmuF1YhjCT7J9Eik0ZhIWJFl1PIVGlWa1dtWW9acl12X3lD/hJwDlUPVkdYJFcPVRAxFBZIWEhYGVNEWBJXElYYWCGIJalNYxvgF3AgbUrwUd5V1VZsV21Zb1pxXHVfeBmDDSkNVw5WF1YVVDFWR1dHV0hXSFdIWElYGVkTWkrIPasv/U75D1sPZBRtUmZUaFVqVv0ssCjJWfxcll54FyEZSBBWR1YbVBkcET4UHEdXSVhJWElZSllKW0tcTF1MXiVgRfENCg9lOnRSfVRoVGpVkyg/K0UcRhk0UPoOIBJfR+dHVw0NDzMaHB9YSFhJWElZSlpKWktbTF1MXk5gT2FPYg0GDWQ1vDV/VHM2gCFsV4JC1xWgFa8UwhISFBIUVxRXOWoSVRiKSKBIYklZSllKWkpbS1xMXk1fT2FPYhBmDQUNWlJ6NGMUdRB1N9AXwxOnEyQTEhMRDkcXRRcUFVgWSyPeJaciZUpiSlpKW0tbTFxMXU1fT2FPYlFkDWYNBg5uVP4ROhKJERARERISEnQUd158YYURVxNVFxQX0RdXFmgl/k3/Sv9KWkpbS1xMXU1eT2FPYlFkUXMOdB5tPqgv/w+9KYwqoFl0WnNbr153X3lhq0pbSloXWRVrJtwpWD+fSuA6XEpnTF1MX05gT2FPY1FlP3ooZSdUIWIYeBnhGmodhh+oHnYjMSWZIGkXvBELS/JKXEpbGkgWrBeKRahM6kzZTd9O00/dT+NQ11HTUL4TgxAhDywROREWEWsh7xQlIzszRTRGM0MuPRt6EoVMXUxeFFwPEA8ODzQRRhLFEswSuhK8HpQbcxwvFywPQg4fDW0SzA+aDwwQEBUyDxYpPj1OQFA8TDZENNoqkUywFF0RDw8ODhEQERHoEWASYhtjKGMpQiY2IzQbag9rDwwQGw4SDhoNDw0SFSIeNyk9O09CUTtML35MvzqRFBUScRFmFbcWwxQQGfNPllBjUWUrZSZnImpVbBVtVnANcQ0LDSMaKSEsISojMjA8Mz5ceF55Hnkgyi7QM5oPDhxbECwPIRa7HOkU7w4IDQcPeVN9HOdWcFlxEnAOGQwHDR0mMyw3LDcrMikwMD0seGCMYXwvfB6CJKVi2BVFFtASwA/fDpoNHQ0dDwwP5g2fDQYNCR91JpIPdw13DRAOGSs8N0U0QjNALjsuOSh8XuNjgkeAGYwgnizmH1IZphnSTfmo+w/9DQkMKhLmKfMO8w2REnYSdBIRFB0SIAwRJDs8SjtKOEYzQGGAZIA6jGaCV4MdiiJ+K9lCrQ9tHUMZTRz7D+ENERQTFIwXqBLqEKQVGRkgHCQdJR0nDR4NKylEKUgpRCQ8D4FmhFqOZ9NjiBmDGZUw9FnPDa8QqBnNOMcRxRwnGjMdYRwfGRoUGiEsJjArNSk1JDQfLg0KFhwlLCsyDzAPFg8NUolmiGuMLp8jnCCdJKMZlBEsEB8SPh7jHSclLiYvJDIjLyEzKzwzRDNFMUQxRBEzEhMXGhwnKEcSERE9ETcSj1GPaJVWkxiOHoweoxkpFB0ODg0nDyMjNS47Mj0yPjA+ITUhQTpOPVE5Sw1CEQ0XICMvJS4qahVNJlw4dR9mKFckZyR1GZ0TPyOhHFYMEw8ZEBMdJSImHjohPiNAMD8sPCs0LTkkNg0bDBcMFRgmHSksOyzdJMAeaC/PI4UnqSVPH34UhBNCD2UPJw9qExsYIyMnIiUhJSQuJzwyQDVDMT0uOCMvDhcMIhQUDRAnPTJ4L4kjvidvMNouliyFJmshqhtvEzgblxlgJn0pjiEqIigjKSUrJ3s+Tj1NNkUzQit2DlISDg0NFXAMCw8dGEsfkje/KHgimSVgLrcXRR6TErcPcxt3FGwhjh23FKonMidwFEcUnw8vEK8QChBPGcoNBxMSDkEUaA4UElYWPx9wHaEmzxedF1AbVRlpGmAajRFjHJkVcxySIn0TihdyElMSLBXSJOYY7RAWEQsRsQ0HFRYOPhMZF4UZgBaAGlwgxSTDFakWhCWlFZYXdhZkD4INXQ9iD2td3w5yEZoNVQ/RL9cSuxfIFFkQCg8XDR4UGRdBGV4fsxhuFcYtjiDYHIwbihiEE5QRbRVlFHISUQ1TEFgPaA2cD4ASxw9kFowpnhyLHG0hbg9YDwgNCg0PGVohgSO7F54XghvBFoUXmhY9GIwWfxNhE34PMRKhEekOxw5uDykNVhF6F8sr0CWhLpQ1/yL+HqgOCA0HDUsqtiuyJYYUtRJhFXoTaxNoD04SeBOBE5MURRE+ES4PDw0LDhoVFw9QEpIQahy2D24RQxF2ENsQjA4JDQUOPiHJKIQVaw8qEmYSVg8wEnUPUw15EXUssRFhEVEQaRkbEnYMDA+bEX4UkRJ1G8AcuQ9fDB4Taw+cDQcNBRNBGtMczSOHI4YTUREfEVkXkBx8EoQTnRNuDnoNJg4wElsNYRWjE8MSYyPTTeFJuA2gDAUNjQ+WDysNBw0JHlkREynRF6YenRNkEZAPLQ9KGXEPnhGSD3gPfg0gD3o=",
"localAutoValue": 0.36000000000000004,
"whitePoint": 1.003921568627451,
"p10": 0.01568627450980392,
"highKey": 0.8063460882459689,
},
"offsetContrast": 0,
"offsetShadows": 0,
},
"identifier": "SmartTone",
}
],
"metadata": {
"masterWidth": 3024,
"pipelineVersion": "OSX.4",
"masterHeight": 4032,
"orientation": 1,
},
"orientation": 1,
"adjustment_format_version": 1,
"version_info": {
"buildNumber": "19G73",
"appVersion": "161.0.120",
"schemaRevision": 1,
"platform": "OSX",
},
"timestamp": "2020-10-03T22:54:20+00:00",
}
def test_no_adjustments(photosdb):
"""test adjustments when photo has no adjusments"""
photo = photosdb.get_photo(UUID_DICT["no_adjustments"])
assert photo.adjustments is None
def test_exiftool_newlines_in_description(photosdb):
"""Test that exiftool handles newlines embedded in description, issue #393"""
photo = photosdb.get_photo(UUID_DICT["description_newlines"])
exif = photo._exiftool_dict()
assert photo.description.find("\n") > 0
assert exif["EXIF:ImageDescription"].find("\n") > 0
@pytest.mark.skip(SKIP_TEST, reason="Not yet implemented")
def test_duplicates_1(photosdb):
# test photo has duplicates
photo = photosdb.get_photo(uuid=UUID_DICT["duplicates"])
assert len(photo.duplicates) == 1
assert photo.duplicates[0].uuid == UUID_DUPLICATE
def test_duplicates_2(photosdb):
# test photo does not have duplicates
photo = photosdb.get_photo(uuid=UUID_DICT["no_duplicates"])
assert not photo.duplicates
def test_compound_query(photosdb):
"""test photos() with multiple query terms"""
photos = photosdb.photos(persons=["Katie", "Maria"], albums=["Multi Keyword"])
assert len(photos) == 2
assert UUID_DICT["multi_query_1"] in [p.uuid for p in photos]
assert UUID_DICT["multi_query_2"] in [p.uuid for p in photos]
def test_multi_keyword(photosdb):
"""test photos() with multiple keywords"""
photos = photosdb.photos(keywords=["Kids", "wedding"])
assert len(photos) == 6
def test_multi_album(photosdb):
"""test photos() with multiple albums"""
photos = photosdb.photos(albums=["Pumpkin Farm", "Test Album"])
assert len(photos) == 3
def test_multi_uuid(photosdb):
"""test photos() with multiple uuids"""
photos = photosdb.photos(uuid=[UUID_DICT["favorite"], UUID_DICT["not_favorite"]])
assert len(photos) == 2
def test_detected_text(photosdb):
"""test PhotoInfo.detected_text"""
for uuid, expected_text in UUID_DETECTED_TEXT.items():
photo = photosdb.get_photo(uuid=uuid)
detected_text = " ".join(text for text, conf in photo.detected_text())
if expected_text is not None:
assert expected_text in detected_text
else:
assert not detected_text
|
[] |
[] |
[
"TZ"
] |
[]
|
["TZ"]
|
python
| 1 | 0 | |
pkg/controller/alamedaservice/alamedaservice_controller.go
|
package alamedaservice
import (
"context"
"fmt"
"os"
"time"
autoscaling_v1alpha1 "github.com/containers-ai/alameda/operator/pkg/apis/autoscaling/v1alpha1"
federatoraiv1alpha1 "github.com/containers-ai/federatorai-operator/pkg/apis/federatorai/v1alpha1"
"github.com/containers-ai/federatorai-operator/pkg/component"
"github.com/containers-ai/federatorai-operator/pkg/lib/resourceapply"
"github.com/containers-ai/federatorai-operator/pkg/processcrdspec"
"github.com/containers-ai/federatorai-operator/pkg/processcrdspec/alamedaserviceparamter"
"github.com/containers-ai/federatorai-operator/pkg/updateresource"
"github.com/containers-ai/federatorai-operator/pkg/util"
routev1 "github.com/openshift/api/route/v1"
securityv1 "github.com/openshift/api/security/v1"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
ingressv1beta1 "k8s.io/api/extensions/v1beta1"
v1beta1 "k8s.io/api/extensions/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
apiextension "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
alamedaServiceLockName = "alamedaservice-lock"
)
var (
_ reconcile.Reconciler = &ReconcileAlamedaService{}
log = logf.Log.WithName("controller_alamedaservice")
componentConfig *component.ComponentConfig
)
// Add creates a new AlamedaService Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
kubeClient, _ := kubernetes.NewForConfig(mgr.GetConfig())
return &ReconcileAlamedaService{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
apiextclient: apiextension.NewForConfigOrDie(mgr.GetConfig()),
kubeClient: kubeClient,
}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("alamedaservice-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource AlamedaService
err = c.Watch(&source.Kind{Type: &federatoraiv1alpha1.AlamedaService{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
util.Disable_operand_resource_protection = os.Getenv("DISABLE_OPERAND_RESOURCE_PROTECTION")
if util.Disable_operand_resource_protection != "true" {
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &federatoraiv1alpha1.AlamedaService{},
})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &federatoraiv1alpha1.AlamedaService{},
})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &federatoraiv1alpha1.AlamedaService{},
})
if err != nil {
return err
}
}
return nil
}
// ReconcileAlamedaService reconciles a AlamedaService object
type ReconcileAlamedaService struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
apiextclient apiextension.Interface
kubeClient *kubernetes.Clientset
}
// Reconcile reads that state of the cluster for a AlamedaService object and makes changes based on the state read
// and what is in the AlamedaService.Spec
// TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates
// a Pod as an example
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *ReconcileAlamedaService) Reconcile(request reconcile.Request) (reconcile.Result, error) {
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
reqLogger.Info("Reconciling AlamedaService")
// Fetch the AlamedaService instance
instance := &federatoraiv1alpha1.AlamedaService{}
err := r.client.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if k8sErrors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
// uninstallResource := alamedaserviceparamter.GetUnInstallResource()
// r.UninstallDeployment(instance,uninstallResource)
// r.UninstallService(instance,uninstallResource)
// r.UninstallConfigMap(instance,uninstallResource)
// r.uninstallCustomResourceDefinition(uninstallResource)
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
log.V(-1).Info("get AlamedaService failed, retry reconciling", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, err
}
r.InitAlamedaService(instance)
// Check if AlamedaService need to reconcile, currently only reconcile one AlamedaService in one cluster
needToReconcile, err := r.needToReconcile(instance)
if err != nil {
log.V(-1).Info("check if AlamedaService needs to reconcile failed, retry reconciling", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if !needToReconcile {
log.Info("AlamedaService doe not need to reconcile", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name)
err := r.updateAlamedaServiceActivation(instance, false)
if err != nil {
log.V(-1).Info("reconcile AlamedaService failed", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
return reconcile.Result{}, nil
} else {
if err := r.updateAlamedaServiceActivation(instance, true); err != nil {
log.V(-1).Info("reconcile AlamedaService failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
}
if flag, _ := r.checkAlamedaServiceSpecIsChange(instance, request.NamespacedName); !flag && util.Disable_operand_resource_protection == "true" {
log.Info("AlamedaService spec is not changed, skip reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name)
return reconcile.Result{}, nil
}
asp := alamedaserviceparamter.NewAlamedaServiceParamter(instance)
ns, err := r.getNamespace(request.Namespace)
if err != nil {
log.V(-1).Info("get namespace failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
componentConfig = r.newComponentConfig(ns, *instance)
installResource := asp.GetInstallResource()
if err = r.syncCustomResourceDefinition(instance, asp, installResource); err != nil {
log.Error(err, "create crd failed")
}
if err := r.syncPodSecurityPolicy(instance, asp, installResource); err != nil {
log.V(-1).Info("sync podSecurityPolicy failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncSecurityContextConstraints(instance, asp, installResource); err != nil {
log.V(-1).Info("sync securityContextConstraint failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
}
if err := r.syncClusterRole(instance, asp, installResource); err != nil {
log.V(-1).Info("sync clusterRole failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncServiceAccount(instance, asp, installResource); err != nil {
log.V(-1).Info("sync serviceAccount failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncClusterRoleBinding(instance, asp, installResource); err != nil {
log.V(-1).Info("sync clusterRoleBinding failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncRole(instance, asp, installResource); err != nil {
log.V(-1).Info("sync Role failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncRoleBinding(instance, asp, installResource); err != nil {
log.V(-1).Info("sync RoleBinding failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.createSecret(instance, asp, installResource); err != nil {
log.V(-1).Info("create secret failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.createPersistentVolumeClaim(instance, asp, installResource); err != nil {
log.V(-1).Info("create PersistentVolumeClaim failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncConfigMap(instance, asp, installResource); err != nil {
log.V(-1).Info("sync configMap failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncService(instance, asp, installResource); err != nil {
log.V(-1).Info("sync service failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.createMutatingWebhookConfiguration(instance, asp, installResource); err != nil {
log.V(-1).Info("create MutatingWebhookConfiguration failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.createValidatingWebhookConfiguration(instance, asp, installResource); err != nil {
log.V(-1).Info("create ValidatingWebhookConfiguration failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncDeployment(instance, asp, installResource); err != nil {
log.V(-1).Info("sync deployment failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncStatefulSet(instance, asp, installResource); err != nil {
log.V(-1).Info("sync statefulset failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncIngress(instance, asp, installResource); err != nil {
log.V(-1).Info("sync Ingress failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.syncRoute(instance, asp, installResource); err != nil {
log.V(-1).Info("sync route failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
}
if err := r.syncDaemonSet(instance, asp, installResource); err != nil {
log.V(-1).Info("sync DaemonSet failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if err := r.createAlamedaNotificationChannels(instance, installResource); err != nil {
log.V(-1).Info("create AlamedaNotificationChannels failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}, nil
}
if err := r.createAlamedaNotificationTopics(instance, installResource); err != nil {
log.V(-1).Info("create AlamedaNotificationTopic failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}, nil
}
// if EnableExecution Or EnableGUI has been changed to false
//Uninstall Execution Component
if !asp.EnableExecution {
log.Info("EnableExecution has been changed to false")
excutionResource := alamedaserviceparamter.GetExcutionResource()
if err := r.uninstallExecutionComponent(instance, excutionResource); err != nil {
log.V(-1).Info("retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
}
//Uninstall GUI Component
if !asp.EnableGUI {
log.Info("EnableGUI has been changed to false")
guiResource := alamedaserviceparamter.GetGUIResource()
if err := r.uninstallGUIComponent(instance, guiResource); err != nil {
log.V(-1).Info("retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
}
//Uninstall dispatcher Component
if !asp.EnableDispatcher {
log.Info("EnableDispatcher has been changed to false")
dispatcherResource := alamedaserviceparamter.GetDispatcherResource()
if err := r.uninstallExecutionComponent(instance, dispatcherResource); err != nil {
log.V(-1).Info("retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
}
//Uninstall weavescope components
if !asp.EnableWeavescope {
weavescopeResource := alamedaserviceparamter.GetWeavescopeResource()
if err := r.uninstallResource(weavescopeResource); err != nil {
log.V(-1).Info("retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
}
//Uninstall PersistentVolumeClaim Source
pvcResource := asp.GetUninstallPersistentVolumeClaimSource()
if err := r.uninstallPersistentVolumeClaim(instance, pvcResource); err != nil {
log.V(-1).Info("retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
if !asp.SelfDriving {
log.Info("selfDriving has been changed to false")
selfDrivingResource := alamedaserviceparamter.GetSelfDrivingRsource()
if err := r.uninstallScalerforAlameda(instance, selfDrivingResource); err != nil {
log.V(-1).Info("retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
} else { //install Alameda Scaler
if err := r.createScalerforAlameda(instance, asp, installResource); err != nil {
log.V(-1).Info("create scaler for alameda failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
}
if err = r.updateAlamedaService(instance, request.NamespacedName, asp); err != nil {
log.Error(err, "Update AlamedaService failed, retry reconciling AlamedaService", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name, "msg", err.Error())
return reconcile.Result{Requeue: true, RequeueAfter: 1 * time.Second}, nil
}
log.Info("Reconcile done.", "AlamedaService.Namespace", instance.Namespace, "AlamedaService.Name", instance.Name)
return reconcile.Result{}, nil
}
func (r *ReconcileAlamedaService) InitAlamedaService(alamedaService *federatoraiv1alpha1.AlamedaService) {
if alamedaService.Spec.EnableDispatcher == nil {
enableTrue := true
alamedaService.Spec.EnableDispatcher = &enableTrue
}
}
func (r *ReconcileAlamedaService) getNamespace(namespaceName string) (corev1.Namespace, error) {
namespace := corev1.Namespace{}
if err := r.client.Get(context.TODO(), client.ObjectKey{Name: namespaceName}, &namespace); err != nil {
return namespace, errors.Errorf("get namespace %s failed: %s", namespaceName, err.Error())
}
return namespace, nil
}
func (r *ReconcileAlamedaService) newComponentConfig(namespace corev1.Namespace, alamedaService federatoraiv1alpha1.AlamedaService) *component.ComponentConfig {
podTemplateConfig := component.NewDefaultPodTemplateConfig(namespace)
componentConfg := component.NewComponentConfig(namespace.Name, podTemplateConfig, alamedaService)
return componentConfg
}
func (r *ReconcileAlamedaService) createScalerforAlameda(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.AlamedaScalerList {
resourceScaler := componentConfig.NewAlamedaScaler(fileString)
if err := controllerutil.SetControllerReference(instance, resourceScaler, r.scheme); err != nil {
return errors.Errorf("Fail resourceScaler SetControllerReference: %s", err.Error())
}
foundScaler := &autoscaling_v1alpha1.AlamedaScaler{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceScaler.Name, Namespace: resourceScaler.Namespace}, foundScaler)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource Scaler... ", "resourceScaler.Name", resourceScaler.Name)
err = r.client.Create(context.TODO(), resourceScaler)
if err != nil {
return errors.Errorf("create Scaler %s/%s failed: %s", resourceScaler.Namespace, resourceScaler.Name, err.Error())
}
log.Info("Successfully Creating Resource Scaler", "resourceScaler.Name", resourceScaler.Name)
} else if err != nil {
return errors.Errorf("get Scaler %s/%s failed: %s", resourceScaler.Namespace, resourceScaler.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) syncCustomResourceDefinition(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.CustomResourceDefinitionList {
crd := componentConfig.RegistryCustomResourceDefinition(fileString)
/*if err := controllerutil.SetControllerReference(instance, crd, r.scheme); err != nil {
return errors.Errorf("Fail resourceCRB SetControllerReference: %s", err.Error())
}*/
_, err := resourceapply.ApplyCustomResourceDefinition(r.apiextclient.ApiextensionsV1beta1(), crd, asp)
if err != nil {
return errors.Wrapf(err, "syncCustomResourceDefinition faild: CustomResourceDefinition.Name: %s", crd.Name)
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallCustomResourceDefinition(resource *alamedaserviceparamter.Resource) {
for _, fileString := range resource.CustomResourceDefinitionList {
crd := componentConfig.RegistryCustomResourceDefinition(fileString)
_, _, _ = resourceapply.DeleteCustomResourceDefinition(r.apiextclient.ApiextensionsV1beta1(), crd)
}
}
func (r *ReconcileAlamedaService) syncClusterRoleBinding(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.ClusterRoleBindingList {
resourceCRB := componentConfig.NewClusterRoleBinding(FileStr)
if err := controllerutil.SetControllerReference(instance, resourceCRB, r.scheme); err != nil {
return errors.Errorf("Fail resourceCRB SetControllerReference: %s", err.Error())
}
foundCRB := &rbacv1.ClusterRoleBinding{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceCRB.Name}, foundCRB)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource ClusterRoleBinding... ", "resourceCRB.Name", resourceCRB.Name)
err = r.client.Create(context.TODO(), resourceCRB)
if err != nil {
return errors.Errorf("create clusterRoleBinding %s/%s failed: %s", resourceCRB.Namespace, resourceCRB.Name, err.Error())
}
log.Info("Successfully Creating Resource ClusterRoleBinding", "resourceCRB.Name", resourceCRB.Name)
} else if err != nil {
return errors.Errorf("get clusterRoleBinding %s/%s failed: %s", resourceCRB.Namespace, resourceCRB.Name, err.Error())
} else {
err = r.client.Update(context.TODO(), resourceCRB)
if err != nil {
return errors.Errorf("Update clusterRoleBinding %s/%s failed: %s", resourceCRB.Namespace, resourceCRB.Name, err.Error())
}
}
}
return nil
}
func (r *ReconcileAlamedaService) createAlamedaNotificationChannels(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, file := range resource.AlamedaNotificationChannelList {
src, err := componentConfig.NewAlamedaNotificationChannel(file)
if err != nil {
return errors.Errorf("get AlamedaNotificationChannel failed: file: %s, error: %s", file, err.Error())
}
if err := controllerutil.SetControllerReference(instance, src, r.scheme); err != nil {
return errors.Errorf("Fail AlamedaNotificationChannel SetControllerReference: %s", err.Error())
}
err = r.client.Create(context.TODO(), src)
if err != nil && !k8sErrors.IsAlreadyExists(err) {
return errors.Errorf("create AlamedaNotificationChannel %s failed: %s", src.GetName(), err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) createAlamedaNotificationTopics(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, file := range resource.AlamedaNotificationTopic {
src, err := componentConfig.NewAlamedaNotificationTopic(file)
if err != nil {
return errors.Errorf("get AlamedaNotificationTopic failed: file: %s, error: %s", file, err.Error())
}
if err := controllerutil.SetControllerReference(instance, src, r.scheme); err != nil {
return errors.Errorf("Fail AlamedaNotificationTopic SetControllerReference: %s", err.Error())
}
err = r.client.Create(context.TODO(), src)
if err != nil && !k8sErrors.IsAlreadyExists(err) {
return errors.Errorf("create AlamedaNotificationTopic %s failed: %s", src.GetName(), err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) syncPodSecurityPolicy(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.PodSecurityPolicyList {
resourcePSP := componentConfig.NewPodSecurityPolicy(FileStr)
if err := controllerutil.SetControllerReference(instance, resourcePSP, r.scheme); err != nil {
return errors.Errorf("Fail resourcePSP SetControllerReference: %s", err.Error())
}
foundPSP := &v1beta1.PodSecurityPolicy{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourcePSP.Name}, foundPSP)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource PodSecurityPolicy... ", "resourcePSP.Name", resourcePSP.Name)
err = r.client.Create(context.TODO(), resourcePSP)
if err != nil {
return errors.Errorf("create PodSecurityPolicy %s/%s failed: %s", resourcePSP.Namespace, resourcePSP.Name, err.Error())
}
log.Info("Successfully Creating Resource PodSecurityPolicy", "resourcePSP.Name", resourcePSP.Name)
} else if err != nil {
return errors.Errorf("get PodSecurityPolicy %s/%s failed: %s", resourcePSP.Namespace, resourcePSP.Name, err.Error())
} else {
err = r.client.Update(context.TODO(), resourcePSP)
if err != nil {
return errors.Errorf("Update PodSecurityPolicy %s/%s failed: %s", resourcePSP.Namespace, resourcePSP.Name, err.Error())
}
}
}
return nil
}
func (r *ReconcileAlamedaService) syncSecurityContextConstraints(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.SecurityContextConstraintsList {
resourceSCC := componentConfig.NewSecurityContextConstraints(FileStr)
if err := controllerutil.SetControllerReference(instance, resourceSCC, r.scheme); err != nil {
return errors.Errorf("Fail resourceSCC SetControllerReference: %s", err.Error())
}
//process resource SecurityContextConstraints according to AlamedaService CR
resourceSCC = processcrdspec.ParamterToSecurityContextConstraints(resourceSCC, asp)
foundSCC := &securityv1.SecurityContextConstraints{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceSCC.Name}, foundSCC)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource SecurityContextConstraints... ", "resourceSCC.Name", resourceSCC.Name)
err = r.client.Create(context.TODO(), resourceSCC)
if err != nil {
return errors.Errorf("create SecurityContextConstraints %s/%s failed: %s", resourceSCC.Namespace, resourceSCC.Name, err.Error())
}
log.Info("Successfully Creating Resource SecurityContextConstraints", "resourceSCC.Name", resourceSCC.Name)
} else if err != nil {
return errors.Errorf("get SecurityContextConstraints %s/%s failed: %s", resourceSCC.Namespace, resourceSCC.Name, err.Error())
} else {
err = r.client.Update(context.TODO(), resourceSCC)
if err != nil {
return errors.Errorf("Update SecurityContextConstraints %s/%s failed: %s", resourceSCC.Namespace, resourceSCC.Name, err.Error())
}
}
}
return nil
}
func (r *ReconcileAlamedaService) syncDaemonSet(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.DaemonSetList {
resourceDS := componentConfig.NewDaemonSet(FileStr)
if err := controllerutil.SetControllerReference(instance, resourceDS, r.scheme); err != nil {
return errors.Errorf("Fail resourceDS SetControllerReference: %s", err.Error())
}
//process resource DaemonSet according to AlamedaService CR
resourceDS = processcrdspec.ParamterToDaemonSet(resourceDS, asp)
if err := r.patchConfigMapResourceVersionIntoPodTemplateSpecLabel(resourceDS.Namespace, &resourceDS.Spec.Template); err != nil {
return errors.Wrap(err, "patch resourceVersion of mounted configMaps into PodTemplateSpec failed")
}
foundDS := &appsv1.DaemonSet{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceDS.Name, Namespace: resourceDS.Namespace}, foundDS)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource DaemonSet... ", "resourceDS.Namespace", resourceDS.Namespace, "resourceDS.Name", resourceDS.Name)
err = r.client.Create(context.TODO(), resourceDS)
if err != nil {
return errors.Errorf("create DaemonSet %s/%s failed: %s", resourceDS.Namespace, resourceDS.Name, err.Error())
}
log.Info("Successfully Creating Resource DaemonSet", "resourceDS.Namespace", resourceDS.Namespace, "resourceDS.Name", resourceDS.Name)
} else if err != nil {
return errors.Errorf("get DaemonSet %s/%s failed: %s", resourceDS.Namespace, resourceDS.Name, err.Error())
} else {
if updateresource.MisMatchResourceDaemonSet(foundDS, resourceDS) {
log.Info("Update Resource DaemonSet:", "foundDS.Name", foundDS.Name)
err = r.client.Delete(context.TODO(), foundDS)
if err != nil {
return errors.Errorf("delete DaemonSet %s/%s failed: %s", foundDS.Namespace, foundDS.Name, err.Error())
}
err = r.client.Create(context.TODO(), resourceDS)
if err != nil {
return errors.Errorf("create DaemonSet %s/%s failed: %s", foundDS.Namespace, foundDS.Name, err.Error())
}
log.Info("Successfully Update Resource DaemonSet", "resourceDS.Namespace", resourceDS.Namespace, "resourceDS.Name", resourceDS.Name)
}
}
}
return nil
}
func (r *ReconcileAlamedaService) syncClusterRole(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.ClusterRoleList {
resourceCR := componentConfig.NewClusterRole(FileStr)
if err := controllerutil.SetControllerReference(instance, resourceCR, r.scheme); err != nil {
return errors.Errorf("Fail resourceCR SetControllerReference: %s", err.Error())
}
foundCR := &rbacv1.ClusterRole{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceCR.Name}, foundCR)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource ClusterRole... ", "resourceCR.Name", resourceCR.Name)
err = r.client.Create(context.TODO(), resourceCR)
if err != nil {
return errors.Errorf("create clusterRole %s/%s failed: %s", resourceCR.Namespace, resourceCR.Name, err.Error())
}
log.Info("Successfully Creating Resource ClusterRole", "resourceCR.Name", resourceCR.Name)
} else if err != nil {
return errors.Errorf("get clusterRole %s/%s failed: %s", resourceCR.Namespace, resourceCR.Name, err.Error())
} else {
err = r.client.Update(context.TODO(), resourceCR)
if err != nil {
return errors.Errorf("Update clusterRole %s/%s failed: %s", resourceCR.Namespace, resourceCR.Name, err.Error())
}
}
}
return nil
}
func (r *ReconcileAlamedaService) syncServiceAccount(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.ServiceAccountList {
resourceSA := componentConfig.NewServiceAccount(FileStr)
if err := controllerutil.SetControllerReference(instance, resourceSA, r.scheme); err != nil {
return errors.Errorf("Fail resourceSA SetControllerReference: %s", err.Error())
}
foundSA := &corev1.ServiceAccount{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceSA.Name, Namespace: resourceSA.Namespace}, foundSA)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource ServiceAccount... ", "resourceSA.Name", resourceSA.Name)
err = r.client.Create(context.TODO(), resourceSA)
if err != nil {
return errors.Errorf("create serviceAccount %s/%s failed: %s", resourceSA.Namespace, resourceSA.Name, err.Error())
}
log.Info("Successfully Creating Resource ServiceAccount", "resourceSA.Name", resourceSA.Name)
} else if err != nil {
return errors.Errorf("get serviceAccount %s/%s failed: %s", resourceSA.Namespace, resourceSA.Name, err.Error())
} else {
err = r.client.Update(context.TODO(), resourceSA)
if err != nil {
return errors.Errorf("Update serviceAccount %s/%s failed: %s", resourceSA.Namespace, resourceSA.Name, err.Error())
}
}
}
return nil
}
func (r *ReconcileAlamedaService) syncRole(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.RoleList {
resourceCR := componentConfig.NewRole(FileStr)
if err := controllerutil.SetControllerReference(instance, resourceCR, r.scheme); err != nil {
return errors.Errorf("Fail resourceCR SetControllerReference: %s", err.Error())
}
foundCR := &rbacv1.Role{}
err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: resourceCR.Namespace, Name: resourceCR.Name}, foundCR)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource Role... ", "resourceCR.Namespace", resourceCR.Namespace, "resourceCR.Name", resourceCR.Name)
err = r.client.Create(context.TODO(), resourceCR)
if err != nil {
return errors.Errorf("create Role %s/%s failed: %s", resourceCR.Namespace, resourceCR.Name, err.Error())
}
log.Info("Successfully Creating Resource Role", "resourceCR.Namespace", resourceCR.Namespace, "resourceCR.Name", resourceCR.Name)
} else if err != nil {
return errors.Errorf("get Role %s/%s failed: %s", resourceCR.Namespace, resourceCR.Name, err.Error())
} else {
err = r.client.Update(context.TODO(), resourceCR)
if err != nil {
return errors.Errorf("Update Role %s/%s failed: %s", resourceCR.Namespace, resourceCR.Name, err.Error())
}
}
}
return nil
}
func (r *ReconcileAlamedaService) syncRoleBinding(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.RoleBindingList {
resourceCR := componentConfig.NewRoleBinding(FileStr)
if err := controllerutil.SetControllerReference(instance, resourceCR, r.scheme); err != nil {
return errors.Errorf("Fail resourceCR SetControllerReference: %s", err.Error())
}
foundCR := &rbacv1.RoleBinding{}
err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: resourceCR.Namespace, Name: resourceCR.Name}, foundCR)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource RoleBinding... ", "resourceCR.Namespace", resourceCR.Namespace, "resourceCR.Name", resourceCR.Name)
err = r.client.Create(context.TODO(), resourceCR)
if err != nil {
return errors.Errorf("create RoleBinding %s/%s failed: %s", resourceCR.Namespace, resourceCR.Name, err.Error())
}
log.Info("Successfully Creating Resource RoleBinding", "resourceCR.Namespace", resourceCR.Namespace, "resourceCR.Name", resourceCR.Name)
} else if err != nil {
return errors.Errorf("get RoleBinding %s/%s failed: %s", resourceCR.Namespace, resourceCR.Name, err.Error())
} else {
err = r.client.Update(context.TODO(), resourceCR)
if err != nil {
return errors.Errorf("Update RoleBinding %s/%s failed: %s", resourceCR.Namespace, resourceCR.Name, err.Error())
}
}
}
return nil
}
func (r *ReconcileAlamedaService) createPersistentVolumeClaim(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.PersistentVolumeClaimList {
resourcePVC := componentConfig.NewPersistentVolumeClaim(FileStr)
//process resource configmap into desire configmap
resourcePVC = processcrdspec.ParamterToPersistentVolumeClaim(resourcePVC, asp)
if err := controllerutil.SetControllerReference(instance, resourcePVC, r.scheme); err != nil {
return errors.Errorf("Fail resourcePVC SetControllerReference: %s", err.Error())
}
foundPVC := &corev1.PersistentVolumeClaim{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourcePVC.Name, Namespace: resourcePVC.Namespace}, foundPVC)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource PersistentVolumeClaim... ", "resourcePVC.Name", resourcePVC.Name)
err = r.client.Create(context.TODO(), resourcePVC)
if err != nil {
return errors.Errorf("create PersistentVolumeClaim %s/%s failed: %s", resourcePVC.Namespace, resourcePVC.Name, err.Error())
}
log.Info("Successfully Creating Resource PersistentVolumeClaim", "resourcePVC.Name", resourcePVC.Name)
} else if err != nil {
return errors.Errorf("get PersistentVolumeClaim %s/%s failed: %s", resourcePVC.Namespace, resourcePVC.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) createSecret(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
secret, err := componentConfig.NewAdmissionControllerSecret()
if err != nil {
return errors.Errorf("build AdmissionController secret failed: %s", err.Error())
}
if err := controllerutil.SetControllerReference(instance, secret, r.scheme); err != nil {
return errors.Errorf("set controller reference to secret %s/%s failed: %s", secret.Namespace, secret.Name, err.Error())
}
err = r.client.Create(context.TODO(), secret)
if err != nil && k8sErrors.IsAlreadyExists(err) {
log.Info("create secret failed: secret is already exists", "secret.Namespace", secret.Namespace, "secret.Name", secret.Name)
} else if err != nil {
return errors.Errorf("get secret %s/%s failed: %s", secret.Namespace, secret.Name, err.Error())
}
secret, err = componentConfig.NewInfluxDBSecret()
if err != nil {
return errors.Errorf("build InfluxDB secret failed: %s", err.Error())
}
if err := controllerutil.SetControllerReference(instance, secret, r.scheme); err != nil {
return errors.Errorf("set controller reference to secret %s/%s failed: %s", secret.Namespace, secret.Name, err.Error())
}
err = r.client.Create(context.TODO(), secret)
if err != nil && k8sErrors.IsAlreadyExists(err) {
log.Info("create secret failed: secret is already exists", "secret.Namespace", secret.Namespace, "secret.Name", secret.Name)
} else if err != nil {
return errors.Errorf("get secret %s/%s failed: %s", secret.Namespace, secret.Name, err.Error())
}
secret, err = componentConfig.NewfedemeterSecret()
if err != nil {
return errors.Errorf("build Fedemeter secret failed: %s", err.Error())
}
if err := controllerutil.SetControllerReference(instance, secret, r.scheme); err != nil {
return errors.Errorf("set controller reference to secret %s/%s failed: %s", secret.Namespace, secret.Name, err.Error())
}
err = r.client.Create(context.TODO(), secret)
if err != nil && k8sErrors.IsAlreadyExists(err) {
log.Info("create secret failed: secret is already exists", "secret.Namespace", secret.Namespace, "secret.Name", secret.Name)
} else if err != nil {
return errors.Errorf("get secret %s/%s failed: %s", secret.Namespace, secret.Name, err.Error())
}
notifierWebhookServiceAsset := alamedaserviceparamter.GetAlamedaNotifierWebhookService()
notifierWebhookService := componentConfig.NewService(notifierWebhookServiceAsset)
notifierWebhookServiceAddress := util.GetServiceDNS(notifierWebhookService)
notifierWebhookServiceCertSecretAsset := alamedaserviceparamter.GetAlamedaNotifierWebhookServerCertSecret()
notifierWebhookServiceSecret, err := componentConfig.NewTLSSecret(notifierWebhookServiceCertSecretAsset, notifierWebhookServiceAddress)
if err != nil {
return errors.Errorf("build secret failed: %s", err.Error())
}
if err := controllerutil.SetControllerReference(instance, secret, r.scheme); err != nil {
return errors.Errorf("set controller reference to secret %s/%s failed: %s", secret.Namespace, secret.Name, err.Error())
}
err = r.client.Create(context.TODO(), notifierWebhookServiceSecret)
if err != nil && k8sErrors.IsAlreadyExists(err) {
log.Info("create secret failed: secret is already exists", "secret.Namespace", secret.Namespace, "secret.Name", secret.Name)
} else if err != nil {
return errors.Errorf("get secret %s/%s failed: %s", secret.Namespace, secret.Name, err.Error())
}
return nil
}
func (r *ReconcileAlamedaService) syncConfigMap(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.ConfigMapList {
resourceCM := componentConfig.NewConfigMap(fileString)
if err := controllerutil.SetControllerReference(instance, resourceCM, r.scheme); err != nil {
return errors.Errorf("Fail resourceCM SetControllerReference: %s", err.Error())
}
//process resource configmap into desire configmap
resourceCM = processcrdspec.ParamterToConfigMap(resourceCM, asp)
foundCM := &corev1.ConfigMap{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceCM.Name, Namespace: resourceCM.Namespace}, foundCM)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource ConfigMap... ", "resourceCM.Name", resourceCM.Name)
err = r.client.Create(context.TODO(), resourceCM)
if err != nil {
return errors.Errorf("create configMap %s/%s failed: %s", resourceCM.Namespace, resourceCM.Name, err.Error())
}
log.Info("Successfully Creating Resource ConfigMap", "resourceCM.Name", resourceCM.Name)
} else if err != nil {
return errors.Errorf("get configMap %s/%s failed: %s", resourceCM.Namespace, resourceCM.Name, err.Error())
} else {
if updateresource.MisMatchResourceConfigMap(foundCM, resourceCM) {
log.Info("Update Resource Service:", "foundCM.Name", foundCM.Name)
err = r.client.Update(context.TODO(), foundCM)
if err != nil {
return errors.Errorf("update configMap %s/%s failed: %s", foundCM.Namespace, foundCM.Name, err.Error())
} else {
if foundCM.Name == util.GrafanaDatasourcesName { //if modify grafana-datasource then delete Deployment(Temporary strategy)
grafanaDep := componentConfig.NewDeployment(util.GrafanaYaml)
err = r.deleteDeploymentWhenModifyConfigMapOrService(grafanaDep)
if err != nil {
errors.Errorf("delete Deployment when modify ConfigMap %s/%s failed: %s", grafanaDep.Namespace, grafanaDep.Name, err.Error())
}
}
}
log.Info("Successfully Update Resource CinfigMap", "resourceCM.Name", foundCM.Name)
}
}
}
return nil
}
func (r *ReconcileAlamedaService) syncService(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.ServiceList {
resourceSV := componentConfig.NewService(fileString)
if err := controllerutil.SetControllerReference(instance, resourceSV, r.scheme); err != nil {
return errors.Errorf("Fail resourceSV SetControllerReference: %s", err.Error())
}
if err := processcrdspec.ParamterToService(resourceSV, asp); err != nil {
return errors.Wrapf(err, "process service (%s/%s) failed", resourceSV.Namespace, resourceSV.Name)
}
foundSV := &corev1.Service{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceSV.Name, Namespace: resourceSV.Namespace}, foundSV)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource Service... ", "resourceSV.Name", resourceSV.Name)
err = r.client.Create(context.TODO(), resourceSV)
if err != nil {
return errors.Errorf("create service %s/%s failed: %s", resourceSV.Namespace, resourceSV.Name, err.Error())
}
log.Info("Successfully Creating Resource Service", "resourceSV.Name", resourceSV.Name)
} else if err != nil {
return errors.Errorf("get service %s/%s failed: %s", resourceSV.Namespace, resourceSV.Name, err.Error())
} else {
if updateresource.MisMatchResourceService(foundSV, resourceSV) {
log.Info("Update Resource Service:", "foundSV.Name", foundSV.Name)
err = r.client.Delete(context.TODO(), foundSV)
if err != nil {
return errors.Errorf("delete service %s/%s failed: %s", foundSV.Namespace, foundSV.Name, err.Error())
}
err = r.client.Create(context.TODO(), resourceSV)
if err != nil {
return errors.Errorf("create service %s/%s failed: %s", foundSV.Namespace, foundSV.Name, err.Error())
}
log.Info("Successfully Update Resource Service", "resourceSV.Name", foundSV.Name)
}
}
}
return nil
}
func (r *ReconcileAlamedaService) getSecret(namespace, name string) (corev1.Secret, error) {
secret := corev1.Secret{}
err := r.client.Get(context.TODO(), client.ObjectKey{Namespace: namespace, Name: name}, &secret)
if err != nil {
return secret, errors.Errorf("get secret (%s/%s) failed", namespace, name)
}
return secret, nil
}
func (r *ReconcileAlamedaService) createMutatingWebhookConfiguration(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.MutatingWebhookConfigurationList {
mutatingWebhookConfiguration, err := componentConfig.NewMutatingWebhookConfiguration(fileString)
if err != nil {
return errors.Wrap(err, "new MutatingWebhookConfiguration failed")
}
if err := controllerutil.SetControllerReference(instance, mutatingWebhookConfiguration, r.scheme); err != nil {
return errors.Errorf("Fail MutatingWebhookConfiguration SetControllerReference: %s", err.Error())
}
secretName := mutatingWebhookConfiguration.ObjectMeta.Annotations["secret.name"]
secret, err := r.getSecret(instance.Namespace, secretName)
if err != nil {
return errors.Errorf("get secret failed: %s", err.Error())
}
caCert := secret.Data["ca.crt"]
for i := range mutatingWebhookConfiguration.Webhooks {
mutatingWebhookConfiguration.Webhooks[i].ClientConfig.CABundle = caCert
}
log.Info("Creating a new Resource MutatingWebhookConfiguration... ", "name", mutatingWebhookConfiguration.Name)
err = r.client.Create(context.TODO(), mutatingWebhookConfiguration)
if err != nil && !k8sErrors.IsAlreadyExists(err) {
return errors.Errorf("create MutatingWebhookConfiguration %s failed: %s", mutatingWebhookConfiguration.Name, err.Error())
}
log.Info("Successfully Creating Resource MutatingWebhookConfiguration", "name", mutatingWebhookConfiguration.Name)
}
return nil
}
func (r *ReconcileAlamedaService) createValidatingWebhookConfiguration(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.ValidatingWebhookConfigurationList {
validatingWebhookConfiguration, err := componentConfig.NewValidatingWebhookConfiguration(fileString)
if err != nil {
return errors.Wrap(err, "new ValidatingWebhookConfigurationList failed")
}
if err := controllerutil.SetControllerReference(instance, validatingWebhookConfiguration, r.scheme); err != nil {
return errors.Errorf("Fail ValidatingWebhookConfiguration SetControllerReference: %s", err.Error())
}
secretName := validatingWebhookConfiguration.ObjectMeta.Annotations["secret.name"]
secret, err := r.getSecret(instance.Namespace, secretName)
if err != nil {
return errors.Errorf("get secret failed: %s", err.Error())
}
caCert := secret.Data["ca.crt"]
for i := range validatingWebhookConfiguration.Webhooks {
validatingWebhookConfiguration.Webhooks[i].ClientConfig.CABundle = caCert
}
log.Info("Creating a new Resource ValidatingWebhookConfiguration... ", "name", validatingWebhookConfiguration.Name)
err = r.client.Create(context.TODO(), validatingWebhookConfiguration)
if err != nil && !k8sErrors.IsAlreadyExists(err) {
return errors.Errorf("create ValidatingWebhookConfiguration %s failed: %s", validatingWebhookConfiguration.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) syncDeployment(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.DeploymentList {
resourceDep := componentConfig.NewDeployment(fileString)
if err := controllerutil.SetControllerReference(instance, resourceDep, r.scheme); err != nil {
return errors.Errorf("Fail resourceDep SetControllerReference: %s", err.Error())
}
//process resource deployment into desire deployment
resourceDep = processcrdspec.ParamterToDeployment(resourceDep, asp)
if err := r.patchConfigMapResourceVersionIntoPodTemplateSpecLabel(resourceDep.Namespace, &resourceDep.Spec.Template); err != nil {
return errors.Wrap(err, "patch resourceVersion of mounted configMaps into PodTemplateSpec failed")
}
foundDep := &appsv1.Deployment{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceDep.Name, Namespace: resourceDep.Namespace}, foundDep)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource Deployment... ", "resourceDep.Name", resourceDep.Name)
err = r.client.Create(context.TODO(), resourceDep)
if err != nil {
return errors.Errorf("create deployment %s/%s failed: %s", resourceDep.Namespace, resourceDep.Name, err.Error())
}
log.Info("Successfully Creating Resource Deployment", "resourceDep.Name", resourceDep.Name)
continue
} else if err != nil {
return errors.Errorf("get deployment %s/%s failed: %s", resourceDep.Namespace, resourceDep.Name, err.Error())
} else {
if updateresource.MisMatchResourceDeployment(foundDep, resourceDep) {
log.Info("Update Resource Deployment:", "resourceDep.Name", foundDep.Name)
err = r.client.Update(context.TODO(), foundDep)
if err != nil {
return errors.Errorf("update deployment %s/%s failed: %s", foundDep.Namespace, foundDep.Name, err.Error())
}
log.Info("Successfully Update Resource Deployment", "resourceDep.Name", foundDep.Name)
}
}
}
return nil
}
func (r *ReconcileAlamedaService) syncRoute(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.RouteList {
resourceRT := componentConfig.NewRoute(FileStr)
if err := controllerutil.SetControllerReference(instance, resourceRT, r.scheme); err != nil {
return errors.Errorf("Fail resourceRT SetControllerReference: %s", err.Error())
}
foundRT := &routev1.Route{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceRT.Name, Namespace: resourceRT.Namespace}, foundRT)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource Route... ", "resourceRT.Name", resourceRT.Name)
err = r.client.Create(context.TODO(), resourceRT)
if err != nil {
return errors.Errorf("create route %s/%s failed: %s", resourceRT.Namespace, resourceRT.Name, err.Error())
}
log.Info("Successfully Creating Resource route", "resourceRT.Name", resourceRT.Name)
} else if err != nil {
return errors.Errorf("get route %s/%s failed: %s", resourceRT.Namespace, resourceRT.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) syncIngress(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.IngressList {
resourceIG := componentConfig.NewIngress(FileStr)
if err := controllerutil.SetControllerReference(instance, resourceIG, r.scheme); err != nil {
return errors.Errorf("Fail resourceIG SetControllerReference: %s", err.Error())
}
foundIG := &ingressv1beta1.Ingress{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceIG.Name, Namespace: resourceIG.Namespace}, foundIG)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource Route... ", "resourceIG.Name", resourceIG.Name)
err = r.client.Create(context.TODO(), resourceIG)
if err != nil {
return errors.Errorf("create route %s/%s failed: %s", resourceIG.Namespace, resourceIG.Name, err.Error())
}
log.Info("Successfully Creating Resource route", "resourceRT.Name", resourceIG.Name)
} else if err != nil {
return errors.Errorf("get route %s/%s failed: %s", resourceIG.Namespace, resourceIG.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) syncStatefulSet(instance *federatoraiv1alpha1.AlamedaService, asp *alamedaserviceparamter.AlamedaServiceParamter, resource *alamedaserviceparamter.Resource) error {
for _, FileStr := range resource.StatefulSetList {
resourceSS := componentConfig.NewStatefulSet(FileStr)
if err := controllerutil.SetControllerReference(instance, resourceSS, r.scheme); err != nil {
return errors.Errorf("Fail resourceSS SetControllerReference: %s", err.Error())
}
resourceSS = processcrdspec.ParamterToStatefulset(resourceSS, asp)
if err := r.patchConfigMapResourceVersionIntoPodTemplateSpecLabel(resourceSS.Namespace, &resourceSS.Spec.Template); err != nil {
return errors.Wrap(err, "patch resourceVersion of mounted configMaps into PodTemplateSpec failed")
}
foundSS := &appsv1.StatefulSet{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceSS.Name, Namespace: resourceSS.Namespace}, foundSS)
if err != nil && k8sErrors.IsNotFound(err) {
log.Info("Creating a new Resource Route... ", "resourceSS.Name", resourceSS.Name)
err = r.client.Create(context.TODO(), resourceSS)
if err != nil {
return errors.Errorf("create route %s/%s failed: %s", resourceSS.Namespace, resourceSS.Name, err.Error())
}
log.Info("Successfully Creating Resource route", "resourceSS.Name", resourceSS.Name)
} else if err != nil {
return errors.Errorf("get route %s/%s failed: %s", resourceSS.Namespace, resourceSS.Name, err.Error())
} else {
log.Info("Update Resource StatefulSet:", "resourceSS.Name", resourceSS.Name)
if updateresource.MisMatchResourceStatefulSet(foundSS, resourceSS) {
log.Info("Update Resource StatefulSet:", "name", foundSS.Name)
err = r.client.Update(context.TODO(), foundSS)
if err != nil {
return errors.Errorf("update statefulSet %s/%s failed: %s", foundSS.Namespace, foundSS.Name, err.Error())
}
log.Info("Successfully Update Resource StatefulSet", "name", foundSS.Name)
}
log.Info("Updating Resource StatefulSet", "name", resourceSS.Name)
err = r.client.Update(context.TODO(), resourceSS)
if err != nil {
return errors.Errorf("update StatefulSet %s/%s failed: %s", resourceSS.Namespace, resourceSS.Name, err.Error())
}
log.Info("Successfully Update Resource StatefulSet", "resourceSS.Name", resourceSS.Name)
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallStatefulSet(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.StatefulSetList {
resourceSS := componentConfig.NewStatefulSet(fileString)
err := r.client.Delete(context.TODO(), resourceSS)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete statefulset %s/%s failed: %s", resourceSS.Namespace, resourceSS.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallIngress(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.IngressList {
resourceIG := componentConfig.NewIngress(fileString)
err := r.client.Delete(context.TODO(), resourceIG)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete ingress %s/%s failed: %s", resourceIG.Namespace, resourceIG.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallRoute(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.RouteList {
resourceRT := componentConfig.NewRoute(fileString)
err := r.client.Delete(context.TODO(), resourceRT)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete route %s/%s failed: %s", resourceRT.Namespace, resourceRT.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallDeployment(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.DeploymentList {
resourceDep := componentConfig.NewDeployment(fileString)
err := r.client.Delete(context.TODO(), resourceDep)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete deployment %s/%s failed: %s", resourceDep.Namespace, resourceDep.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallService(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.ServiceList {
resourceSVC := componentConfig.NewService(fileString)
err := r.client.Delete(context.TODO(), resourceSVC)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete service %s/%s failed: %s", resourceSVC.Namespace, resourceSVC.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallConfigMap(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.ConfigMapList {
resourceCM := componentConfig.NewConfigMap(fileString)
err := r.client.Delete(context.TODO(), resourceCM)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete comfigMap %s/%s failed: %s", resourceCM.Namespace, resourceCM.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallSecret(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.SecretList {
resourceSec, _ := componentConfig.NewSecret(fileString)
err := r.client.Delete(context.TODO(), resourceSec)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete secret %s/%s failed: %s", resourceSec.Namespace, resourceSec.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallServiceAccount(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.ServiceAccountList {
resourceSA := componentConfig.NewServiceAccount(fileString)
err := r.client.Delete(context.TODO(), resourceSA)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete serviceAccount %s/%s failed: %s", resourceSA.Namespace, resourceSA.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallClusterRole(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.ClusterRoleList {
resourceCR := componentConfig.NewClusterRole(fileString)
err := r.client.Delete(context.TODO(), resourceCR)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete clusterRole %s/%s failed: %s", resourceCR.Namespace, resourceCR.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallClusterRoleBinding(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.ClusterRoleBindingList {
resourceCRB := componentConfig.NewClusterRoleBinding(fileString)
err := r.client.Delete(context.TODO(), resourceCRB)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete clusterRoleBinding %s/%s failed: %s", resourceCRB.Namespace, resourceCRB.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallAlamedaScaler(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.AlamedaScalerList {
resourceScaler := componentConfig.NewAlamedaScaler(fileString)
err := r.client.Delete(context.TODO(), resourceScaler)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete resourceScaler %s/%s failed: %s", resourceScaler.Namespace, resourceScaler.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallMutatingWebhookConfiguration(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.MutatingWebhookConfigurationList {
mutatingWebhookConfiguration, err := componentConfig.NewMutatingWebhookConfiguration(fileString)
if err != nil {
return errors.Wrap(err, "new MutatingWebhookConfiguration failed")
}
err = r.client.Delete(context.TODO(), mutatingWebhookConfiguration)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete MutatingWebhookConfiguratio %s failed: %s", mutatingWebhookConfiguration.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallValidatingWebhookConfiguration(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.AlamedaScalerList {
validatingWebhookConfiguration, err := componentConfig.NewValidatingWebhookConfiguration(fileString)
if err != nil {
return errors.Wrap(err, "new ValidatingWebhookConfiguration failed")
}
err = r.client.Delete(context.TODO(), validatingWebhookConfiguration)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete ValidatingWebhookConfiguration %s failed: %s", validatingWebhookConfiguration.Name, err.Error())
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallScalerforAlameda(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
if err := r.uninstallAlamedaScaler(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall selfDriving scaler failed")
}
return nil
}
func (r *ReconcileAlamedaService) uninstallGUIComponent(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
if err := r.uninstallRoute(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
if err := r.uninstallDeployment(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
if err := r.uninstallService(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
if err := r.uninstallConfigMap(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
if err := r.uninstallServiceAccount(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
if err := r.uninstallClusterRole(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
if err := r.uninstallClusterRoleBinding(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
return nil
}
func (r *ReconcileAlamedaService) uninstallExecutionComponent(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
if err := r.uninstallDeployment(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall execution component failed")
}
if err := r.uninstallService(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall execution component failed")
}
if err := r.uninstallSecret(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
if err := r.uninstallServiceAccount(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
if err := r.uninstallClusterRole(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
if err := r.uninstallClusterRoleBinding(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall gui component failed")
}
return nil
}
func (r *ReconcileAlamedaService) uninstallFedemeterComponent(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
if err := r.uninstallIngress(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall Fedemeter component failed")
}
if err := r.uninstallDeployment(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall Fedemeter component failed")
}
if err := r.uninstallService(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall Fedemeter component failed")
}
if err := r.uninstallSecret(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall Fedemeter component failed")
}
if err := r.uninstallConfigMap(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall Fedemeter component failed")
}
if err := r.uninstallStatefulSet(instance, resource); err != nil {
return errors.Wrapf(err, "uninstall Fedemeter component failed")
}
return nil
}
func (r *ReconcileAlamedaService) uninstallPersistentVolumeClaim(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.PersistentVolumeClaimList {
resourcePVC := componentConfig.NewPersistentVolumeClaim(fileString)
foundPVC := &corev1.PersistentVolumeClaim{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourcePVC.Name, Namespace: resourcePVC.Namespace}, foundPVC)
if err != nil && k8sErrors.IsNotFound(err) {
continue
} else if err != nil {
return errors.Errorf("get PersistentVolumeClaim %s/%s failed: %s", resourcePVC.Namespace, resourcePVC.Name, err.Error())
} else {
err := r.client.Delete(context.TODO(), resourcePVC)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete PersistentVolumeClaim %s/%s failed: %s", resourcePVC.Namespace, resourcePVC.Name, err.Error())
}
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallDaemonSet(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.DaemonSetList {
resourceDaemonSet := componentConfig.NewDaemonSet(fileString)
foundDaemonSet := &appsv1.DaemonSet{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourceDaemonSet.Name, Namespace: resourceDaemonSet.Namespace}, foundDaemonSet)
if err != nil && k8sErrors.IsNotFound(err) {
continue
} else if err != nil {
return errors.Errorf("get DaemonSet %s/%s failed: %s", resourceDaemonSet.Namespace, resourceDaemonSet.Name, err.Error())
} else {
err := r.client.Delete(context.TODO(), resourceDaemonSet)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete DaemonSet %s/%s failed: %s", resourceDaemonSet.Namespace, resourceDaemonSet.Name, err.Error())
}
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallPodSecurityPolicy(instance *federatoraiv1alpha1.AlamedaService, resource *alamedaserviceparamter.Resource) error {
for _, fileString := range resource.PodSecurityPolicyList {
resourcePodSecurityPolicy := componentConfig.NewPodSecurityPolicy(fileString)
foundPodSecurityPolicy := &v1beta1.PodSecurityPolicy{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: resourcePodSecurityPolicy.Name, Namespace: resourcePodSecurityPolicy.Namespace}, foundPodSecurityPolicy)
if err != nil && k8sErrors.IsNotFound(err) {
continue
} else if err != nil {
return errors.Errorf("get PodSecurityPolicy %s/%s failed: %s", resourcePodSecurityPolicy.Namespace, resourcePodSecurityPolicy.Name, err.Error())
} else {
err := r.client.Delete(context.TODO(), resourcePodSecurityPolicy)
if err != nil && k8sErrors.IsNotFound(err) {
return nil
} else if err != nil {
return errors.Errorf("delete PodSecurityPolicy %s/%s failed: %s", resourcePodSecurityPolicy.Namespace, resourcePodSecurityPolicy.Name, err.Error())
}
}
}
return nil
}
func (r *ReconcileAlamedaService) uninstallResource(resource alamedaserviceparamter.Resource) error {
if err := r.uninstallStatefulSet(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall StatefulSet failed")
}
if err := r.uninstallIngress(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall Ingress failed")
}
if err := r.uninstallRoute(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall Route failed")
}
if err := r.uninstallDeployment(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall Deployment failed")
}
if err := r.uninstallService(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall Service failed")
}
if err := r.uninstallConfigMap(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall ConfigMap failed")
}
if err := r.uninstallSecret(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall Secret failed")
}
if err := r.uninstallServiceAccount(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall ServiceAccount failed")
}
if err := r.uninstallClusterRole(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall ClusterRole failed")
}
if err := r.uninstallClusterRoleBinding(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall ClusterRoleBinding failed")
}
if err := r.uninstallAlamedaScaler(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall AlamedaScaler failed")
}
if err := r.uninstallPersistentVolumeClaim(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall PersistentVolumeClaim failed")
}
if err := r.uninstallDaemonSet(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall DaemonSet failed")
}
if err := r.uninstallPodSecurityPolicy(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall PodSecurityPolicy failed")
}
if err := r.uninstallMutatingWebhookConfiguration(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall MutatingWebhookConfiguration failed")
}
if err := r.uninstallValidatingWebhookConfiguration(nil, &resource); err != nil {
return errors.Wrap(err, "uninstall ValidatingWebhookConfiguration failed")
}
return nil
}
func (r *ReconcileAlamedaService) needToReconcile(alamedaService *federatoraiv1alpha1.AlamedaService) (bool, error) {
lock, lockErr := r.getAlamedaServiceLock(alamedaService.Namespace, alamedaServiceLockName)
if lockErr == nil {
if lockIsOwnedByAlamedaService(lock, alamedaService) {
return true, nil
}
} else if k8sErrors.IsNotFound(lockErr) {
err := r.createAlamedaServiceLock(alamedaService)
if err == nil {
return true, nil
} else if !k8sErrors.IsAlreadyExists(err) {
return false, errors.Wrap(err, "check if needs to reconcile failed")
}
} else if lockErr != nil {
return false, errors.Wrap(lockErr, "check if needs to reconcile failed")
}
return false, nil
}
func (r *ReconcileAlamedaService) getAlamedaServiceLock(ns, name string) (rbacv1.ClusterRole, error) {
lock := rbacv1.ClusterRole{}
err := r.client.Get(context.Background(), types.NamespacedName{Name: name}, &lock)
if err != nil {
if k8sErrors.IsNotFound(err) {
return lock, err
}
return lock, errors.Errorf("get AlamedaService lock failed: %s", err.Error())
}
return lock, nil
}
func (r *ReconcileAlamedaService) createAlamedaServiceLock(alamedaService *federatoraiv1alpha1.AlamedaService) error {
lock := rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: alamedaServiceLockName,
},
}
if err := controllerutil.SetControllerReference(alamedaService, &lock, r.scheme); err != nil {
return errors.Errorf("create AlamedaService lock failed: %s", err)
}
err := r.client.Create(context.Background(), &lock)
if err != nil {
if k8sErrors.IsAlreadyExists(err) {
return err
}
return errors.Errorf("create AlamedaService lock failed: %s", err.Error())
}
return nil
}
func (r *ReconcileAlamedaService) updateAlamedaServiceActivation(alamedaService *federatoraiv1alpha1.AlamedaService, active bool) error {
copyAlamedaService := &federatoraiv1alpha1.AlamedaService{}
r.client.Get(context.TODO(), client.ObjectKey{Namespace: alamedaService.Namespace, Name: alamedaService.Name}, copyAlamedaService)
if active {
copyAlamedaService.Status.Conditions = []federatoraiv1alpha1.AlamedaServiceStatusCondition{
federatoraiv1alpha1.AlamedaServiceStatusCondition{
Paused: !active,
},
}
} else {
copyAlamedaService.Status.Conditions = []federatoraiv1alpha1.AlamedaServiceStatusCondition{
federatoraiv1alpha1.AlamedaServiceStatusCondition{
Paused: !active,
Message: "Other AlamedaService is active.",
},
}
}
if err := r.client.Update(context.Background(), copyAlamedaService); err != nil {
return errors.Errorf("update AlamedaService active failed: %s", err.Error())
}
return nil
}
func lockIsOwnedByAlamedaService(lock rbacv1.ClusterRole, alamedaService *federatoraiv1alpha1.AlamedaService) bool {
for _, ownerReference := range lock.OwnerReferences {
if ownerReference.UID == alamedaService.UID {
return true
}
}
return false
}
func (r *ReconcileAlamedaService) updateAlamedaService(alamedaService *federatoraiv1alpha1.AlamedaService, namespaceName client.ObjectKey, asp *alamedaserviceparamter.AlamedaServiceParamter) error {
if err := r.updateAlamedaServiceStatus(alamedaService, namespaceName, asp); err != nil {
return err
}
if err := r.updateAlamedaServiceAnnotations(alamedaService, namespaceName); err != nil {
return err
}
return nil
}
func (r *ReconcileAlamedaService) updateAlamedaServiceStatus(alamedaService *federatoraiv1alpha1.AlamedaService, namespaceName client.ObjectKey, asp *alamedaserviceparamter.AlamedaServiceParamter) error {
copyAlamedaService := alamedaService.DeepCopy()
if err := r.client.Get(context.TODO(), namespaceName, copyAlamedaService); err != nil {
return errors.Errorf("get AlamedaService failed: %s", err.Error())
}
r.InitAlamedaService(copyAlamedaService)
copyAlamedaService.Status.CRDVersion = asp.CurrentCRDVersion
if err := r.client.Update(context.Background(), copyAlamedaService); err != nil {
return errors.Errorf("update AlamedaService Status failed: %s", err.Error())
}
log.Info("Update AlamedaService Status Successfully", "resource.Name", copyAlamedaService.Name)
return nil
}
func (r *ReconcileAlamedaService) updateAlamedaServiceAnnotations(alamedaService *federatoraiv1alpha1.AlamedaService, namespaceName client.ObjectKey) error {
copyAlamedaService := alamedaService.DeepCopy()
if err := r.client.Get(context.TODO(), namespaceName, copyAlamedaService); err != nil {
return errors.Errorf("get AlamedaService failed: %s", err.Error())
}
r.InitAlamedaService(copyAlamedaService)
jsonSpec, err := copyAlamedaService.GetSpecAnnotationWithoutKeycode()
if err != nil {
return errors.Errorf("get AlamedaService spec annotation without keycode failed: %s", err.Error())
}
if copyAlamedaService.Annotations != nil {
copyAlamedaService.Annotations["previousAlamedaServiceSpec"] = jsonSpec
} else {
annotations := make(map[string]string)
annotations["previousAlamedaServiceSpec"] = jsonSpec
copyAlamedaService.Annotations = annotations
}
if err := r.client.Update(context.Background(), copyAlamedaService); err != nil {
return errors.Errorf("update AlamedaService Annotations failed: %s", err.Error())
}
log.Info("Update AlamedaService Annotations Successfully", "resource.Name", copyAlamedaService.Name)
return nil
}
func (r *ReconcileAlamedaService) checkAlamedaServiceSpecIsChange(alamedaService *federatoraiv1alpha1.AlamedaService, namespaceName client.ObjectKey) (bool, error) {
jsonSpec, err := alamedaService.GetSpecAnnotationWithoutKeycode()
if err != nil {
return false, errors.Errorf("get AlamedaService spec annotation without keycode failed: %s", err.Error())
}
currentAlamedaServiceSpec := jsonSpec
previousAlamedaServiceSpec := alamedaService.Annotations["previousAlamedaServiceSpec"]
if currentAlamedaServiceSpec == previousAlamedaServiceSpec {
return false, nil
}
return true, nil
}
func (r *ReconcileAlamedaService) deleteDeploymentWhenModifyConfigMapOrService(dep *appsv1.Deployment) error {
err := r.client.Delete(context.TODO(), dep)
if err != nil {
return err
}
return nil
}
func (r *ReconcileAlamedaService) patchConfigMapResourceVersionIntoPodTemplateSpecLabel(namespace string, podTemplateSpec *corev1.PodTemplateSpec) error {
var (
mountedConfigMapKey = "configmaps.volumes.federator.ai/name-resourceversion"
mountedConfigMapValueFormat = "%s-%s"
)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
for _, volume := range podTemplateSpec.Spec.Volumes {
if volume.ConfigMap != nil {
configMap := corev1.ConfigMap{}
err := r.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: volume.ConfigMap.Name}, &configMap)
if err != nil {
return errors.Errorf("get ConfigMap failed: %s", err.Error())
}
labels := podTemplateSpec.Labels
if labels == nil {
labels = make(map[string]string)
}
key := mountedConfigMapKey
labels[key] = fmt.Sprintf(mountedConfigMapValueFormat, configMap.Name, configMap.ResourceVersion)
podTemplateSpec.Labels = labels
}
}
return nil
}
|
[
"\"DISABLE_OPERAND_RESOURCE_PROTECTION\""
] |
[] |
[
"DISABLE_OPERAND_RESOURCE_PROTECTION"
] |
[]
|
["DISABLE_OPERAND_RESOURCE_PROTECTION"]
|
go
| 1 | 0 | |
cmd/main.go
|
package main
import (
"os"
"os/exec"
"os/signal"
"github.com/Sirupsen/logrus"
"github.com/inoc603/btk"
"github.com/pkg/errors"
)
func exitOnError(msg string, err error) {
if err != nil {
logrus.WithError(errors.Cause(err)).Fatal(msg)
}
}
func userInterrupt() chan os.Signal {
ch := make(chan os.Signal)
signal.Notify(ch, os.Interrupt)
return ch
}
func main() {
if os.Getenv("DEBUG") == "1" {
logrus.SetLevel(logrus.DebugLevel)
}
kb, err := btk.NewKeyboard()
exitOnError("Failed to create keyboard", err)
hidp, err := btk.NewHidProfile("/red/potch/profile")
exitOnError("Failed to create HID profile", err)
exitOnError("Failed to export profile", hidp.Export())
exitOnError("Failed to register profile", hidp.Register(kb.Desc()))
// make the device discoverable
exitOnError(
"Failed to set to piscan",
exec.Command("hciconfig", "hci0", "piscan").Run(),
)
// set the device class to keyboard
exitOnError(
"Failed to set device class",
exec.Command("hciconfig", "hci0", "class", "02540").Run(),
)
logrus.WithField("desc", kb.Desc()).Infoln("HID profile registered")
go kb.HandleHID()
Loop:
for {
select {
case sig := <-userInterrupt():
logrus.WithField("signal", sig.String()).
Warnln("Exiting on user interrupt")
kb.Stop()
break Loop
case client := <-hidp.Connection():
if err := kb.Connect(client); err != nil {
client.Sctrl.Close()
client.Sintr.Close()
}
// case client := <-hidp.Disconnection():
// logrus.Warnln("disconnect")
// kb.Disconnect(client)
}
}
// Profile will be automatically unregistered by dbus
hidp.Close()
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
pkg/config/config.go
|
package config
import (
"fmt"
"log"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/BurntSushi/toml"
"github.com/gomods/athens/pkg/download/mode"
"github.com/gomods/athens/pkg/errors"
"github.com/kelseyhightower/envconfig"
"gopkg.in/go-playground/validator.v9"
)
const defaultConfigFile = "athens.toml"
// Config provides configuration values for all components
type Config struct {
TimeoutConf
GoEnv string `validate:"required" envconfig:"GO_ENV"`
GoBinary string `validate:"required" envconfig:"GO_BINARY_PATH"`
GoProxy string `envconfig:"GOPROXY"`
GoBinaryEnvVars EnvList `envconfig:"ATHENS_GO_BINARY_ENV_VARS"`
GoGetWorkers int `validate:"required" envconfig:"ATHENS_GOGET_WORKERS"`
GoGetDir string `envconfig:"ATHENS_GOGOET_DIR"`
ProtocolWorkers int `validate:"required" envconfig:"ATHENS_PROTOCOL_WORKERS"`
LogLevel string `validate:"required" envconfig:"ATHENS_LOG_LEVEL"`
CloudRuntime string `validate:"required" envconfig:"ATHENS_CLOUD_RUNTIME"`
EnablePprof bool `envconfig:"ATHENS_ENABLE_PPROF"`
PprofPort string `envconfig:"ATHENS_PPROF_PORT"`
FilterFile string `envconfig:"ATHENS_FILTER_FILE"`
TraceExporterURL string `envconfig:"ATHENS_TRACE_EXPORTER_URL"`
TraceExporter string `envconfig:"ATHENS_TRACE_EXPORTER"`
StatsExporter string `envconfig:"ATHENS_STATS_EXPORTER"`
StorageType string `validate:"required" envconfig:"ATHENS_STORAGE_TYPE"`
GlobalEndpoint string `envconfig:"ATHENS_GLOBAL_ENDPOINT"` // This feature is not yet implemented
Port string `envconfig:"ATHENS_PORT"`
BasicAuthUser string `envconfig:"BASIC_AUTH_USER"`
BasicAuthPass string `envconfig:"BASIC_AUTH_PASS"`
ForceSSL bool `envconfig:"PROXY_FORCE_SSL"`
ValidatorHook string `envconfig:"ATHENS_PROXY_VALIDATOR"`
PathPrefix string `envconfig:"ATHENS_PATH_PREFIX"`
NETRCPath string `envconfig:"ATHENS_NETRC_PATH"`
GithubToken string `envconfig:"ATHENS_GITHUB_TOKEN"`
HGRCPath string `envconfig:"ATHENS_HGRC_PATH"`
TLSCertFile string `envconfig:"ATHENS_TLSCERT_FILE"`
TLSKeyFile string `envconfig:"ATHENS_TLSKEY_FILE"`
SumDBs []string `envconfig:"ATHENS_SUM_DBS"`
NoSumPatterns []string `envconfig:"ATHENS_GONOSUM_PATTERNS"`
DownloadMode mode.Mode `envconfig:"ATHENS_DOWNLOAD_MODE"`
DownloadURL string `envconfig:"ATHENS_DOWNLOAD_URL"`
NetworkMode string `validate:"oneof=strict offline fallback" envconfig:"ATHENS_NETWORK_MODE"`
SingleFlightType string `envconfig:"ATHENS_SINGLE_FLIGHT_TYPE"`
RobotsFile string `envconfig:"ATHENS_ROBOTS_FILE"`
IndexType string `envconfig:"ATHENS_INDEX_TYPE"`
SingleFlight *SingleFlight
Storage *Storage
Index *Index
}
// EnvList is a list of key-value environment
// variables that are passed to the Go command
type EnvList []string
// HasKey returns whether a key-value entry
// is present by only checking the left of
// key=value
func (el EnvList) HasKey(key string) bool {
for _, env := range el {
if strings.HasPrefix(env, key+"=") {
return true
}
}
return false
}
func (el EnvList) GetValue(key string) (string, bool){
prefix := key+"="
for _, env := range el {
if strings.HasPrefix(env, prefix) {
return env[len(prefix):], true
}
}
return "", false
}
// Add adds a key=value entry to the environment
// list
func (el *EnvList) Add(key, value string) {
*el = append(*el, key+"="+value)
}
// Decode implements envconfig.Decoder. Please see the below link for more information on
// that interface:
//
// https://github.com/kelseyhightower/envconfig#custom-decoders
//
// We are doing this to allow for very long lists of assignments to be set inside of
// a single environment variable. For example:
//
// ATHENS_GO_BINARY_ENV_VARS="GOPRIVATE=*.corp.example.com,rsc.io/private; GOPROXY=direct"
//
// See the below link for more information:
// https://github.com/gomods/athens/issues/1404
func (el *EnvList) Decode(value string) error {
const op errors.Op = "envList.Decode"
if value == "" {
return nil
}
*el = EnvList{} // env vars must override config file
assignments := strings.Split(value, ";")
for _, assignment := range assignments {
*el = append(*el, strings.TrimSpace(assignment))
}
return el.Validate()
}
// Validate validates that all strings inside the
// list are of the key=value format
func (el EnvList) Validate() error {
const op errors.Op = "EnvList.Validate"
for _, env := range el {
// some strings can have multiple "=", such as GODEBUG=netdns=cgo
if strings.Count(env, "=") < 1 {
return errors.E(op, fmt.Errorf("incorrect env format: %v", env))
}
}
return nil
}
// Load loads the config from a file.
// If file is not present returns default config
func Load(configFile string) (*Config, error) {
// User explicitly specified a config file
if configFile != "" {
return ParseConfigFile(configFile)
}
// There is a config in the current directory
if fi, err := os.Stat(defaultConfigFile); err == nil {
return ParseConfigFile(fi.Name())
}
// Use default values
log.Println("Running dev mode with default settings, consult config when you're ready to run in production")
cfg := defaultConfig()
return cfg, envOverride(cfg)
}
func defaultConfig() *Config {
return &Config{
GoBinary: "go",
GoBinaryEnvVars: EnvList{"GOPROXY=direct"},
GoEnv: "development",
GoProxy: "direct",
GoGetWorkers: 10,
ProtocolWorkers: 30,
LogLevel: "debug",
CloudRuntime: "none",
EnablePprof: false,
PprofPort: ":3001",
StatsExporter: "prometheus",
TimeoutConf: TimeoutConf{Timeout: 300},
StorageType: "memory",
Port: ":3000",
SingleFlightType: "memory",
GlobalEndpoint: "http://localhost:3001",
TraceExporterURL: "http://localhost:14268",
SumDBs: []string{"https://sum.golang.org"},
NoSumPatterns: []string{},
DownloadMode: "sync",
DownloadURL: "",
NetworkMode: "strict",
RobotsFile: "robots.txt",
IndexType: "none",
SingleFlight: &SingleFlight{
Etcd: &Etcd{"localhost:2379,localhost:22379,localhost:32379"},
Redis: &Redis{"127.0.0.1:6379", ""},
RedisSentinel: &RedisSentinel{
Endpoints: []string{"127.0.0.1:26379"},
MasterName: "redis-1",
SentinelPassword: "sekret",
},
},
Index: &Index{
MySQL: &MySQL{
Protocol: "tcp",
Host: "localhost",
Port: 3306,
User: "root",
Password: "",
Database: "athens",
Params: map[string]string{
"parseTime": "true",
"timeout": "30s",
},
},
Postgres: &Postgres{
Host: "localhost",
Port: 5432,
User: "postgres",
Password: "",
Database: "athens",
Params: map[string]string{
"connect_timeout": "30",
"sslmode": "disable",
},
},
},
}
}
// BasicAuth returns BasicAuthUser and BasicAuthPassword
// and ok if neither of them are empty
func (c *Config) BasicAuth() (user, pass string, ok bool) {
user = c.BasicAuthUser
pass = c.BasicAuthPass
ok = user != "" && pass != ""
return user, pass, ok
}
// TLSCertFiles returns certificate and key files and an error if
// both files doesn't exist and have approperiate file permissions
func (c *Config) TLSCertFiles() (cert, key string, err error) {
if c.TLSCertFile == "" && c.TLSKeyFile == "" {
return "", "", nil
}
certFile, err := os.Stat(c.TLSCertFile)
if err != nil {
return "", "", fmt.Errorf("Could not access TLSCertFile: %v", err)
}
keyFile, err := os.Stat(c.TLSKeyFile)
if err != nil {
return "", "", fmt.Errorf("Could not access TLSKeyFile: %v", err)
}
if keyFile.Mode()&077 != 0 && runtime.GOOS != "windows" {
return "", "", fmt.Errorf("TLSKeyFile should not be accessible by others")
}
return certFile.Name(), keyFile.Name(), nil
}
// FilterOff returns true if the FilterFile is empty
func (c *Config) FilterOff() bool {
return c.FilterFile == ""
}
// ParseConfigFile parses the given file into an athens config struct
func ParseConfigFile(configFile string) (*Config, error) {
var config Config
// attempt to read the given config file
if _, err := toml.DecodeFile(configFile, &config); err != nil {
return nil, err
}
// override values with environment variables if specified
if err := envOverride(&config); err != nil {
return nil, err
}
// Check file perms from config
if config.GoEnv == "production" {
if err := checkFilePerms(configFile, config.FilterFile); err != nil {
return nil, err
}
}
// validate all required fields have been populated
if err := validateConfig(config); err != nil {
return nil, err
}
return &config, nil
}
// envOverride uses Environment variables to override unspecified properties
func envOverride(config *Config) error {
const defaultPort = ":3000"
err := envconfig.Process("athens", config)
if err != nil {
return err
}
portEnv := os.Getenv("PORT")
// ATHENS_PORT takes precedence over PORT
if portEnv != "" && os.Getenv("ATHENS_PORT") == "" {
config.Port = portEnv
}
if config.Port == "" {
config.Port = defaultPort
}
config.Port = ensurePortFormat(config.Port)
return nil
}
func ensurePortFormat(s string) string {
if _, err := strconv.Atoi(s); err == nil {
return ":" + s
}
return s
}
func validateConfig(config Config) error {
validate := validator.New()
err := validate.StructExcept(config, "Storage", "Index")
if err != nil {
return err
}
err = validateStorage(validate, config.StorageType, config.Storage)
if err != nil {
return err
}
err = validateIndex(validate, config.IndexType, config.Index)
if err != nil {
return err
}
return nil
}
func validateStorage(validate *validator.Validate, storageType string, config *Storage) error {
switch storageType {
case "memory":
return nil
case "mongo":
return validate.Struct(config.Mongo)
case "disk":
return validate.Struct(config.Disk)
case "minio":
return validate.Struct(config.Minio)
case "gcp":
return validate.Struct(config.GCP)
case "s3":
return validate.Struct(config.S3)
case "azureblob":
return validate.Struct(config.AzureBlob)
case "external":
return validate.Struct(config.External)
default:
return fmt.Errorf("storage type %q is unknown", storageType)
}
}
func validateIndex(validate *validator.Validate, indexType string, config *Index) error {
switch indexType {
case "", "none", "memory":
return nil
case "mysql":
return validate.Struct(config.MySQL)
case "postgres":
return validate.Struct(config.Postgres)
default:
return fmt.Errorf("index type %q is unknown", indexType)
}
}
// GetConf accepts the path to a file, constructs an absolute path to the file,
// and attempts to parse it into a Config struct.
func GetConf(path string) (*Config, error) {
absPath, err := filepath.Abs(path)
if err != nil {
return nil, fmt.Errorf("Unable to construct absolute path to test config file")
}
conf, err := ParseConfigFile(absPath)
if err != nil {
return nil, fmt.Errorf("Unable to parse test config file: %s", err.Error())
}
return conf, nil
}
// checkFilePerms given a list of files
func checkFilePerms(files ...string) error {
const op = "config.checkFilePerms"
for _, f := range files {
if f == "" {
continue
}
// TODO: Do not ignore errors when a file is not found
// There is a subtle bug in the filter module which ignores the filter file if it does not find it.
// This check can be removed once that has been fixed
fInfo, err := os.Stat(f)
if err != nil {
continue
}
// Assume unix based system (MacOS and Linux)
// the bit mask is calculated using the umask command which tells which permissions
// should not be allowed for a particular user, group or world
if fInfo.Mode()&0077 != 0 && runtime.GOOS != "windows" {
return errors.E(op, f+" should have at most rwx,-, - (bit mask 077) as permission")
}
}
return nil
}
|
[
"\"PORT\"",
"\"ATHENS_PORT\""
] |
[] |
[
"PORT",
"ATHENS_PORT"
] |
[]
|
["PORT", "ATHENS_PORT"]
|
go
| 2 | 0 | |
pkg/controller/operatingsystemconfig/oscommon/app/app.go
|
// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"context"
extcontroller "github.com/gardener/gardener-extensions/pkg/controller"
controllercmd "github.com/gardener/gardener-extensions/pkg/controller/cmd"
oscommoncmd "github.com/gardener/gardener-extensions/pkg/controller/operatingsystemconfig/oscommon/cmd"
"github.com/gardener/gardener-extensions/pkg/controller/operatingsystemconfig/oscommon/generator"
"github.com/spf13/cobra"
"os"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// NewControllerCommand creates a new command for running an OS controller.
func NewControllerCommand(ctx context.Context, osName string, generator generator.Generator) *cobra.Command {
var (
restOpts = &controllercmd.RESTOptions{}
mgrOpts = &controllercmd.ManagerOptions{
LeaderElection: true,
LeaderElectionID: controllercmd.LeaderElectionNameID(osName),
LeaderElectionNamespace: os.Getenv("LEADER_ELECTION_NAMESPACE"),
}
ctrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
controllerSwitches = oscommoncmd.SwitchOptions(osName, generator)
aggOption = controllercmd.NewOptionAggregator(
restOpts,
mgrOpts,
ctrlOpts,
controllerSwitches,
)
)
cmd := &cobra.Command{
Use: "os-" + osName + "-controller-manager",
Run: func(cmd *cobra.Command, args []string) {
if err := aggOption.Complete(); err != nil {
controllercmd.LogErrAndExit(err, "Error completing options")
}
mgr, err := manager.New(restOpts.Completed().Config, mgrOpts.Completed().Options())
if err != nil {
controllercmd.LogErrAndExit(err, "Could not instantiate manager")
}
if err := extcontroller.AddToScheme(mgr.GetScheme()); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
if err := controllerSwitches.Completed().AddToManager(mgr); err != nil {
controllercmd.LogErrAndExit(err, "Could not add controller to manager")
}
if err := mgr.Start(ctx.Done()); err != nil {
controllercmd.LogErrAndExit(err, "Error running manager")
}
},
}
aggOption.AddFlags(cmd.Flags())
return cmd
}
|
[
"\"LEADER_ELECTION_NAMESPACE\""
] |
[] |
[
"LEADER_ELECTION_NAMESPACE"
] |
[]
|
["LEADER_ELECTION_NAMESPACE"]
|
go
| 1 | 0 | |
python/vineyard/deploy/local.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import atexit
import contextlib
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import textwrap
import time
from .._C import connect
from .etcd import start_etcd
from .utils import check_socket
from .utils import find_vineyardd_path
logger = logging.getLogger('vineyard')
@contextlib.contextmanager
def start_vineyardd(
etcd_endpoints=None,
etcd_prefix=None,
vineyardd_path=None,
size='256M',
socket=None,
rpc=True,
rpc_socket_port=9600,
debug=False,
):
'''Launch a local vineyard cluster.
Parameters:
etcd_endpoint: str
Launching vineyard using specified etcd endpoints. If not specified,
vineyard will launch its own etcd instance.
etcd_prefix: str
Specify a common prefix to establish a local vineyard cluster.
vineyardd_path: str
Location of vineyard server program. If not specified, vineyard will
use its own bundled vineyardd binary.
size: int
The memory size limit for vineyard's shared memory. The memory size
can be a plain integer or as a fixed-point number using one of these
suffixes:
.. code::
E, P, T, G, M, K.
You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki.
For example, the following represent roughly the same value:
.. code::
128974848, 129k, 129M, 123Mi, 1G, 10Gi, ...
socket: str
The UNIX domain socket socket path that vineyard server will listen on.
Default is None.
When the socket parameter is None, a random path under temporary directory
will be generated and used.
rpc_socket_port: int
The port that vineyard will use to privode RPC service.
debug: bool
Whether print debug logs.
Returns:
(proc, socket):
Yields a tuple with the subprocess as the first element and the UNIX-domain
IPC socket as the second element.
'''
if not vineyardd_path:
vineyardd_path = find_vineyardd_path()
if not vineyardd_path:
raise RuntimeError('Unable to find the "vineyardd" executable')
if not socket:
socketfp = tempfile.NamedTemporaryFile(
delete=True, prefix='vineyard-', suffix='.sock'
)
socket = socketfp.name
socketfp.close()
if etcd_endpoints is None:
etcd_ctx = start_etcd()
etcd_proc, etcd_endpoints = etcd_ctx.__enter__() # pylint: disable=no-member
else:
etcd_ctx = None
env = os.environ.copy()
if debug:
env['GLOG_v'] = 11
command = [
vineyardd_path,
'--deployment',
'local',
'--size',
str(size),
'--socket',
socket,
'--rpc' if rpc else '--norpc',
'--rpc_socket_port',
str(rpc_socket_port),
'--etcd_endpoint',
etcd_endpoints,
]
if etcd_prefix is not None:
command.extend(('--etcd_prefix', etcd_prefix))
proc = None
try:
proc = subprocess.Popen(
command,
env=env,
stdout=subprocess.PIPE,
stderr=sys.__stderr__,
universal_newlines=True,
encoding='utf-8',
)
# wait for vineyardd ready: check the rpc port and ipc sockets
rc = proc.poll()
while rc is None:
if check_socket(socket) and (
(not rpc) or check_socket(('0.0.0.0', rpc_socket_port))
):
break
time.sleep(1)
rc = proc.poll()
if rc is not None:
err = textwrap.indent(proc.stdout.read(), ' ' * 4)
raise RuntimeError(
'vineyardd exited unexpectedly '
'with code %d, error is:\n%s' % (rc, err)
)
logger.debug('vineyardd is ready.............')
yield proc, socket, etcd_endpoints
finally:
logger.debug('Local vineyardd being killed')
if proc is not None and proc.poll() is None:
proc.terminate()
proc.wait()
try:
shutil.rmtree(socket)
except Exception:
pass
if etcd_ctx is not None:
etcd_ctx.__exit__(None, None, None) # pylint: disable=no-member
__default_instance_contexts = {}
def init(num_instances=1, **kw):
'''
Launching a local vineyardd instance and get a client as easy as possible
In a clean environment, simply use:
.. code:: python
vineyard.init()
It will launch a local vineyardd and return a connected client to the
vineyardd.
It will also setup the environment variable :code:`VINEYARD_IPC_SOCKET`.
For the case to establish a local vineyard cluster consists of multiple
vineyardd instances, using the :code:`num_instances` parameter:
.. code:: python
client1, client2, client3 = vineyard.init(num_instances=3)
In this case, three vineyardd instances will be launched.
The init method can only be called once in a process, to get the established
sockets or clients later in the process, use :code:`get_current_socket` or
:code:`get_current_client` respectively.
'''
assert __default_instance_contexts == {}
if 'VINEYARD_IPC_SOCKET' in os.environ:
raise ValueError(
"VINEYARD_IPC_SOCKET has already been set: %s, which "
"means there might be a vineyard daemon already running "
"locally" % os.environ['VINEYARD_IPC_SOCKET']
)
etcd_endpoints = None
etcd_prefix = f'vineyard_init_at_{time.time()}'
for idx in range(num_instances):
ctx = start_vineyardd(
etcd_endpoints=etcd_endpoints, etcd_prefix=etcd_prefix, rpc=False, **kw
)
_, ipc_socket, etcd_endpoints = ctx.__enter__()
client = connect(ipc_socket)
__default_instance_contexts[ipc_socket] = (ctx, client)
if idx == 0:
os.environ['VINEYARD_IPC_SOCKET'] = ipc_socket
return get_current_client()
def get_current_client():
'''
Get current vineyard IPC clients established by :code:`vineyard.init()`.
Raises:
ValueError if vineyard is not initialized.
'''
if not __default_instance_contexts:
raise ValueError(
'Vineyard has not been initialized, '
'use vineyard.init() to launch vineyard instances'
)
clients = [__default_instance_contexts[k][1] for k in __default_instance_contexts]
return clients if len(clients) > 1 else clients[0]
def get_current_socket():
'''
Get current vineyard UNIX-domain socket established by :code:`vineyard.init()`.
Raises:
ValueError if vineyard is not initialized.
'''
if not __default_instance_contexts:
raise ValueError(
'Vineyard has not been initialized, '
'use vineyard.init() to launch vineyard instances'
)
sockets = __default_instance_contexts.keys()
return sockets if len(sockets) > 1 else sockets[0]
def shutdown():
'''
Shutdown the vineyardd instances launched by previous :code:`vineyard.init()`.
'''
global __default_instance_contexts
if __default_instance_contexts:
for ipc_socket in reversed(__default_instance_contexts):
__default_instance_contexts[ipc_socket][0].__exit__(None, None, None)
# NB. don't pop pre-existing env if we not launch
os.environ.pop('VINEYARD_IPC_SOCKET', None)
__default_instance_contexts = {}
@atexit.register
def __shutdown_handler():
try:
shutdown()
except Exception: # pylint: disable=broad-except
pass
__all__ = ['start_vineyardd']
|
[] |
[] |
[
"VINEYARD_IPC_SOCKET"
] |
[]
|
["VINEYARD_IPC_SOCKET"]
|
python
| 1 | 0 | |
lattes_qualis/_Classes/PyscopusModified.py
|
import pandas as pd
import numpy as np
import requests
import json
import os
class ScopusModified(object):
def __init__(self, apikey=None):
self.apikey = apikey
def _parse_author(self, entry):
#print(entry)
author_id = entry['dc:identifier'].split(':')[-1]
lastname = entry['preferred-name']['surname']
firstname = entry['preferred-name']['given-name']
doc_count = int(entry['document-count'])
# affiliations
if 'affiliation-current' in entry:
affil = entry['affiliation-current']
try:
institution_name = affil['affiliation-name']
except:
institution_name = None
try:
institution_id = affil['affiliation-id']
except:
institution_id = None
else:
institution_name = None
institution_id = None
#city = affil.find('affiliation-city').text
#country = affil.find('affiliation-country').text
#affiliation = institution + ', ' + city + ', ' + country
return pd.Series({'author_id': author_id, 'name': firstname + ' ' + lastname, 'document_count': doc_count,\
'affiliation': institution_name, 'affiliation_id': institution_id})
def _parse_article(self, entry):
try:
scopus_id = entry['dc:identifier'].split(':')[-1]
except:
scopus_id = None
try:
title = entry['dc:title']
except:
title = None
try:
publicationname = entry['prism:publicationName']
except:
publicationname = None
try:
issn = entry['prism:issn']
except:
issn = None
try:
isbn = entry['prism:isbn']
except:
isbn = None
try:
eissn = entry['prism:eIssn']
except:
eissn = None
try:
volume = entry['prism:volume']
except:
volume = None
try:
pagerange = entry['prism:pageRange']
except:
pagerange = None
try:
coverdate = entry['prism:coverDate']
except:
coverdate = None
try:
doi = entry['prism:doi']
except:
doi = None
try:
citationcount = int(entry['citedby-count'])
except:
citationcount = None
try:
affiliation = _parse_affiliation(entry['affiliation'])
except:
affiliation = None
try:
aggregationtype = entry['prism:aggregationType']
except:
aggregationtype = None
try:
sub_dc = entry['subtypeDescription']
except:
sub_dc = None
try:
author_entry = entry['author']
author_id_list = [auth_entry['authid'] for auth_entry in author_entry]
except:
author_id_list = list()
try:
link_list = entry['link']
full_text_link = None
for link in link_list:
if link['@ref'] == 'full-text':
full_text_link = link['@href']
except:
full_text_link = None
return pd.Series({'scopus_id': scopus_id, 'title': title, 'publication_name':publicationname,\
'issn': issn, 'isbn': isbn, 'eissn': eissn, 'volume': volume, 'page_range': pagerange,\
'cover_date': coverdate, 'doi': doi,'citation_count': citationcount, 'affiliation': affiliation,\
'aggregation_type': aggregationtype, 'subtype_description': sub_dc, 'authors': author_id_list,\
'full_text': full_text_link})
def _parse_entry(self, entry, type_):
if type_ == 1 or type_ == 'article':
return self._parse_article(entry)
else:
return self._parse_author(entry)
def _search_scopus(self, key, query, type_, view, index=0):
par = {'query': query, 'start': index,
'httpAccept': 'application/json', 'view': view}
# insttoken = os.environ.get('INSTTOKEN')
# headers = {'X-ELS-Insttoken': insttoken, 'X-ELS-APIKey': key}
headers = {'X-ELS-APIKey': key}
if type_ == 'article' or type_ == 1:
r = requests.get("https://api.elsevier.com/content/search/scopus", params=par, headers=headers)
else:
par['view'] = 'STANDARD'
r = requests.get("https://api.elsevier.com/content/search/author", params=par, headers=headers)
js = r.json()
#print(r.url)
total_count = int(js['search-results']['opensearch:totalResults'])
entries = js['search-results']['entry']
result_df = pd.DataFrame([self._parse_entry(entry, type_) for entry in entries])
if index == 0:
return(result_df, total_count)
else:
return(result_df)
def search(self, query, count=100, type_=1, view='COMPLETE'):
if type(count) is not int:
raise ValueError("%s is not a valid input for the number of entries to return." %number)
result_df, total_count = self._search_scopus(self.apikey, query, type_, view)
if total_count <= count:
count = total_count
if count <= 25:
# if less than 25, just one page of response is enough
return result_df[:count]
# if larger than, go to next few pages until enough
i = 1
while True:
index = 25*i
result_df = result_df.append(self._search_scopus(self.apikey, query, type_, view=view, index=index),
ignore_index=True)
if result_df.shape[0] >= count:
return result_df[:count]
i += 1
def parse_citation(self, js_citation, year_range):
resp = js_citation['abstract-citations-response']
cite_info_list = resp['citeInfoMatrix']['citeInfoMatrixXML']['citationMatrix']['citeInfo']
year_range = (year_range[0], year_range[1]+1)
columns = ['scopus_id', 'previous_citation'] + [str(yr) for yr in range(*year_range)] + ['later_citation', 'total_citation', 'range_citation']
citation_df = pd.DataFrame(columns=columns)
year_arr = np.arange(year_range[0], year_range[1]+1)
for cite_info in cite_info_list:
cite_dict = {}
# dc:identifier: scopus id
cite_dict['scopus_id'] = cite_info['dc:identifier'].split(':')[-1]
# pcc: previous citation counts
try:
cite_dict['previous_citation'] = cite_info['pcc']
except:
cite_dict['previous_citation'] = pd.np.NaN
# cc: citation counts during year range
try:
cc = cite_info['cc']
except:
return pd.DataFrame()
for index in range(len(cc)):
year = str(year_arr[index])
cite_dict[year] = cc[index]['$']
# lcc: later citation counts
try:
cite_dict['later_citation'] = cite_info['lcc']
except:
cite_dict['later_citation'] = pd.np.NaN
# rowTotal: total citation counts
try:
cite_dict['total_citation'] = cite_info['rowTotal']
except:
cite_dict['total_citation'] = pd.np.NaN
try:
cite_dict['range_citation'] = cite_info['rangeCount']
except:
cite_dict['range_citation'] = pd.np.NaN
citation_df = citation_df.append(cite_dict, ignore_index=True)
return citation_df[columns]
def retrieve_citation(self, scopus_id_array, year_range):
date = '%i-%i' %(year_range[0], year_range[1])
par = {'scopus_id': ','.join(scopus_id_array), \
'httpAccept':'application/json', 'date': date}
# insttoken = os.environ.get('INSTTOKEN')
# headers = {'X-ELS-Insttoken': insttoken, 'X-ELS-APIKey': self.apikey}
headers = {'X-ELS-APIKey': self.apikey}
r = requests.get("https://api.elsevier.com/content/abstract/citations", params=par, headers=headers)
print(r)
js = r.json()
return self.parse_citation(js, year_range)
|
[] |
[] |
[
"INSTTOKEN"
] |
[]
|
["INSTTOKEN"]
|
python
| 1 | 0 | |
pygaggle/data/__init__.py
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
from .kaggle import *
from .relevance import *
from .msmarco import *
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
kenbot/linechat.py
|
import sys
import os
import inspect
import json
import logging
from asyncio import Queue, CancelledError
from sanic import Sanic, Blueprint, response
from sanic.request import Request
from typing import Text, List, Dict, Any, Optional, Callable, Iterable, Awaitable
from rasa.core.channels.channel import UserMessage, InputChannel, CollectingOutputChannel
try:
from urlparse import urljoin # pytype: disable=import-error
except ImportError:
from urllib.parse import urljoin
from linebot.exceptions import (
InvalidSignatureError
)
from linebot import WebhookParser
from linebot.models import MessageEvent, TextMessage
from .channel import LineChatBotOutputChannel
channel_secret = os.getenv('LINE_CHANNEL_SECRET', None)
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
parser = WebhookParser(channel_secret)
logger = logging.getLogger(__name__)
class LineChatInput(InputChannel):
"""A custom http input channel.
This implementation is the basis for a custom implementation of a chat
frontend. You can customize this to send messages to Rasa Core and
retrieve responses from the agent."""
@classmethod
def name(cls):
return "linechat"
async def _extract_sender(self, req: Request) -> Optional[Text]:
return req.json.get("sender", None)
# noinspection PyMethodMayBeStatic
def _extract_message(self, req: Request) -> Optional[Text]:
return req.json.get("message", None)
def _extract_input_channel(self, req: Request) -> Text:
return req.json.get("input_channel") or self.name()
async def send_message(self, text, on_new_message, reply_token, source):
output_channel = self.get_output_channel(reply_token, source)
user_msg = UserMessage(
text, output_channel, source.sender_id, input_channel=self.name()
)
await on_new_message(user_msg)
def blueprint(self, on_new_message: Callable[[UserMessage], Awaitable[None]]):
custom_webhook = Blueprint(
"custom_webhook_{}".format(type(self).__name__),
inspect.getmodule(self).__name__,
)
# noinspection PyUnusedLocal
@custom_webhook.route("/", methods=["GET"])
async def health(request: Request):
return response.json({"status": "ok", "name": "kenbot"})
@custom_webhook.route("/webhook", methods=["POST"])
async def receive(request: Request):
sender_id = await self._extract_sender(request)
text = self._extract_message(request)
input_channel = self._extract_input_channel(request)
collector = CollectingOutputChannel()
# noinspection PyBroadException
try:
await on_new_message(
UserMessage(
text, collector, sender_id, input_channel=input_channel
)
)
except CancelledError:
logger.error(
"Message handling timed out for "
"user message '{}'.".format(text)
)
except Exception:
logger.exception(
"An exception occured while handling "
"user message '{}'.".format(text)
)
return response.json(collector.messages)
@custom_webhook.route("/callback", methods=['POST'])
async def callback(request: Request):
signature = request.headers['x-line-signature']
body = request.json
logger.info("Request body: {}".format(body))
try:
print(parser)
events = parser.parse(json.dumps(body), signature)
except InvalidSignatureError:
abort(400)
print(events)
for event in events:
if not isinstance(event, MessageEvent):
continue
if not isinstance(event.message, TextMessage):
continue
if event.reply_token == '00000000000000000000000000000000' \
or event.reply_token == 'ffffffffffffffffffffffffffffffff':
continue
text = event.message.text
user_id = event.source.user_id
await self.send_message(text, on_new_message, event.reply_token, event.source)
return response.text("OK")
return custom_webhook
def get_output_channel(self, reply_token, source):
return LineChatBotOutputChannel(reply_token, source)
|
[] |
[] |
[
"LINE_CHANNEL_SECRET"
] |
[]
|
["LINE_CHANNEL_SECRET"]
|
python
| 1 | 0 | |
python/paddle/distributed/passes/ps_trainer_pass.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.compat as cpt
from ..ps.utils.public import *
from paddle.framework import core
from .pass_base import PassBase, register_pass
from paddle.fluid.transpiler.details.program_utils import delete_ops
from paddle.fluid.transpiler.collective import SingleProcessMultiThread
@register_pass("append_send_ops_pass")
class AppendSendOpsPass(PassBase): # 该 pass 被多种模式复用
def __init__(self):
super(AppendSendOpsPass, self).__init__()
def _check_self(self):
return True
def _check_conflict(self, other_pass):
return True
def _append_send_op(self, program, union_vars, queue, is_sparse, table_id,
ps_mode):
if queue == STEP_COUNTER:
send_input_vars = []
else:
send_input_vars = [
program.global_block().vars[union_var]
for union_var in union_vars
]
dummy_output = []
if ps_mode in [DistributedMode.SYNC, DistributedMode.HALF_ASYNC]:
dummy_output = program.global_block().create_var(
name=framework.generate_control_dev_var_name())
program.global_block().append_op(
type="send",
inputs={"X": send_input_vars},
outputs={"Out": dummy_output},
attrs={
"send_varnames": [queue],
"is_sparse": is_sparse,
"table_id": table_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
return dummy_output
def _append_barrier_op(self, program, dummys):
program.global_block().append_op(
type="send_barrier",
inputs={"X": dummys},
outputs={"Out": []},
attrs={
"trainer_id": trainer_id,
"half_async": True,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
def _apply_single_impl(self, main_program, startup_program, pass_ctx):
attrs = pass_ctx._attrs
ps_mode = attrs['ps_mode']
if ps_mode == DistributedMode.GEO:
send_ctx = get_geo_trainer_send_context(attrs) # geo 模式
else:
send_ctx = get_the_one_send_context(attrs) # async、sync 等各种模式
dummys = []
for merged_name, send in send_ctx.items():
if send.is_sparse() and ps_mode != DistributedMode.GEO:
continue
is_sparse = 1 if send.is_sparse() else 0
is_sparse = 2 if send.is_distributed() else is_sparse
dummys.append(
self._append_send_op(main_program,
send.origin_varnames(), merged_name,
is_sparse, send.table_id(), ps_mode))
if ps_mode in [DistributedMode.SYNC, DistributedMode.HALF_ASYNC]:
self._append_barrier_op(main_program, dummys)
@register_pass("distributed_ops_pass")
class DistributedOpsPass(PassBase):
def __init__(self):
super(DistributedOpsPass, self).__init__()
self.w_2_table_id = {}
self.emb_size = {}
def _check_self(self):
return True
def _check_conflict(self, other_pass):
return True
def _push_sparse_fuse(self, _program, push_sparse_ops, attrs):
if attrs['use_ps_gpu']:
return
if len(push_sparse_ops) == 0:
return
show = None
clk = None
use_entry = False
for param, ops in push_sparse_ops.items():
op_first = ops[0]
break
if op_first.has_attr("entry"):
entry = op_first.attr("entry")
entry = entry.split(':')
if len(entry) == 3 and entry[0] == 'show_click_entry':
show_var_name = entry[1]
click_var_name = entry[2]
if show_var_name in _program.global_block(
).vars and click_var_name in _program.global_block().vars:
show = _program.global_block().vars[show_var_name]
clk = _program.global_block().vars[click_var_name]
use_entry = True
else:
warnings.warn(
'ShowClickEntry configured, but cannot find show/click var, will not use'
)
if not use_entry:
print('ShowClickEntry not configured, will not use')
show = _program.global_block().create_var(
name="show",
dtype=core.VarDesc.VarType.INT64,
persistable=False,
stop_gradient=True)
_program.global_block()._insert_op(
index=0,
type='fill_constant',
inputs={},
outputs={'Out': show},
attrs={
'shape': [1],
'dtype': show.dtype,
'value': 1,
})
clk = _program.global_block().create_var(
name="clk",
dtype=core.VarDesc.VarType.INT64,
persistable=False,
stop_gradient=True)
_program.global_block()._insert_op(
index=0,
type='fill_constant',
inputs={},
outputs={'Out': clk},
attrs={
'shape': [1],
'dtype': clk.dtype,
'value': 0,
})
for param, ops in push_sparse_ops.items():
all_ops = _program.global_block().ops
op_idxs = [all_ops.index(op) for op in ops]
inputs = [
_program.global_block().vars[op.input("Ids")[0]] for op in ops
]
w = _program.global_block().vars[ops[0].output("W@GRAD")[0]]
table_id = self.w_2_table_id[param]
padding_idx = ops[0].attr("padding_idx")
is_distributed = ops[0].attr("is_distributed")
op_type = ops[0].type
outputs = [
_program.global_block().vars[op.input("Out@GRAD")[0]]
for op in ops
]
for idx in op_idxs[::-1]:
_program.global_block()._remove_op(idx)
_program.global_block().append_op(
type="distributed_push_sparse",
inputs={
"Ids": inputs,
'W': w,
"Outputs": outputs,
"Shows": show,
"Clicks": clk
},
outputs={"Outputs": outputs},
attrs={
"is_distributed": is_distributed,
"padding_idx": padding_idx,
"table_id": table_id,
"size": self.emb_size[param]
})
def _pull_sparse_fuse(self, _program, pull_sparse_ops, attrs, send_ctx):
def dag_check_up_and_reorder(program, inputs, outputs):
global_block = program.global_block()
min_output_index = len(global_block.ops)
max_input_index = -1
input_indexes = [0] * len(global_block.ops)
output_indexes = [0] * len(global_block.ops)
for idx, op in enumerate(global_block.ops):
for i in range(0, len(op.output_names)):
if input_indexes[idx] == 1:
break
outs = op.output(op.output_names[i])
for in_id, in_var in enumerate(inputs):
if in_var.name in outs:
input_indexes[idx] = 1
max_input_index = max(max_input_index, idx)
break
for i in range(0, len(op.input_names)):
if output_indexes[idx] == 1:
break
ins = op.input(op.input_names[i])
for out_id, out_var in enumerate(outputs):
if out_var.name in ins:
output_indexes[idx] = 1
min_output_index = min(min_output_index, idx)
for i in range(len(global_block.ops)):
if input_indexes[i] == 1 and output_indexes[i] == 1:
warnings.warn(
"unable to re-arrange dags order to combine distributed embedding ops because a op both needs embedding table's output as input and produces ids as the same embedding table's input"
)
return
if min_output_index < max_input_index:
move_ops = []
for i in range(min_output_index + 1, len(input_indexes)):
if input_indexes[i] == 1:
move_ops.append((global_block.ops[i], i))
for i, op in enumerate(move_ops):
queue = list()
visited = set()
queue.append(op[1])
visited.add(op[0])
start = 0
while start < len(queue):
pos = queue[start]
op = global_block.ops[pos]
op_inputs = []
for k in range(0, len(op.input_names)):
ins = op.input(op.input_names[k])
op_inputs.append(ins)
for j in range(pos - 1, min_output_index - 1, -1):
op1 = global_block.ops[j]
if op1 in visited:
continue
found = False
for k in range(0, len(op1.output_names)):
outs = op1.output(op1.output_names[k])
for t in range(len(op_inputs)):
for y in op_inputs[t]:
if y in outs:
found = True
break
if found:
break
if found:
break
if found:
if output_indexes[j] == True:
warnings.warn(
"unable to re-arrange dags order to combine distributed embedding ops"
)
return
queue.append(j)
visited.add(global_block.ops[j])
start = start + 1
queue.sort()
for index in queue:
desc = global_block.desc._insert_op(min_output_index)
desc.copy_from(global_block.ops[index].desc)
global_block.desc._remove_op(index + 1, index + 2)
global_block.ops[index].desc = desc
insert_op = global_block.ops.pop(index)
input_state = input_indexes.pop(index)
output_state = output_indexes.pop(index)
global_block.ops.insert(min_output_index, insert_op)
input_indexes.insert(min_output_index, input_state)
output_indexes.insert(min_output_index, output_state)
min_output_index = min_output_index + 1
assert global_block.desc.op_size() == len(global_block.ops)
for i in range(len(global_block.ops)):
assert global_block.desc.op(i) == global_block.ops[i].desc
for param, ops in pull_sparse_ops.items():
all_ops = _program.global_block().ops
op_device = ""
if attrs['is_heter_ps_mode']:
op_device = ops[0].attr("op_device")
inputs = [
_program.global_block().vars[op.input("Ids")[0]] for op in ops
]
w = _program.global_block().vars[ops[0].input("W")[0]]
self.emb_size[param] = w.shape[1]
grad_name = attrs['param_name_to_grad_name'][w.name]
table_id = -1
for name, ctx in send_ctx.items():
if grad_name in ctx.origin_varnames():
table_id = ctx.table_id()
if table_id == -1:
raise ValueError(
"can not find suitable sparse table, please check")
self.w_2_table_id[param] = table_id
padding_idx = ops[0].attr("padding_idx")
is_distributed = ops[0].attr("is_distributed")
op_type = ops[0].type
outputs = [
_program.global_block().vars[op.output("Out")[0]] for op in ops
]
dag_check_up_and_reorder(_program, inputs, outputs)
op_idxs = [all_ops.index(op) for op in ops]
for idx in op_idxs[::-1]:
_program.global_block()._remove_op(idx)
inputs_idxs = [-1] * len(inputs)
outputs_idxs = [len(_program.global_block().ops) + 1] * len(outputs)
for idx, op in enumerate(_program.global_block().ops):
for i in range(0, len(op.output_names)):
outs = op.output(op.output_names[i])
for in_id, in_var in enumerate(inputs):
if in_var.name in outs:
inputs_idxs[in_id] = max(idx, inputs_idxs[in_id])
for i in range(0, len(op.input_names)):
ins = op.input(op.input_names[i])
for out_id, out_var in enumerate(outputs):
if out_var.name in ins:
outputs_idxs[out_id] = min(idx,
outputs_idxs[out_id])
if min(outputs_idxs) - max(inputs_idxs) >= 1:
if max(inputs_idxs) == -1:
distributed_idx = min(op_idxs)
else:
distributed_idx = max(inputs_idxs) + 1
if attrs['use_ps_gpu']:
_program.global_block()._insert_op(
index=distributed_idx,
type="pull_box_sparse",
inputs={"Ids": inputs,
'W': w},
outputs={"Out": outputs},
attrs={
"size": w.shape[1],
"is_distributed": True,
"is_sparse": True
})
else:
_program.global_block()._insert_op(
index=distributed_idx,
type="distributed_lookup_table",
inputs={"Ids": inputs,
'W': w},
outputs={"Outputs": outputs},
attrs={
"is_distributed": is_distributed,
"padding_idx": padding_idx,
"table_id": table_id,
"lookup_table_version": op_type,
"op_device": op_device
})
else:
for i in range(len(inputs_idxs)):
distributed_idx = op_idxs[i]
_program.global_block()._insert_op(
index=distributed_idx,
type="distributed_lookup_table",
inputs={"Ids": [inputs[i]],
'W': w},
outputs={"Outputs": [outputs[i]]},
attrs={
"is_distributed": is_distributed,
"padding_idx": padding_idx,
"table_id": table_id,
"lookup_table_version": op_type,
"op_device": op_device
})
def _get_pull_sparse_ops(self, _program, attrs):
pull_sparse_ops = {}
pull_sparse_ids = {}
push_sparse_ops = {}
ops = {}
for op in _program.global_block().ops:
if op.type in SPARSE_OP_TYPE_DICT.keys() \
and op.attr('remote_prefetch') is True:
param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0]
if attrs['is_heter_ps_mode']:
# trick for matchnet, need to modify
param_name += op.input("Ids")[0][0]
ops = pull_sparse_ops.get(param_name, [])
ops.append(op)
pull_sparse_ops[param_name] = ops
ids = pull_sparse_ids.get(param_name, [])
ids.append(op.input("Ids")[0])
pull_sparse_ids[param_name] = ids
for op in _program.global_block().ops:
if op.type in SPARSE_GRAD_OP_TYPE_DICT.keys():
param_name = op.input(SPARSE_GRAD_OP_TYPE_DICT[op.type])[0]
if param_name in pull_sparse_ids and op.input("Ids")[
0] in pull_sparse_ids[param_name]:
ops = push_sparse_ops.get(param_name, [])
ops.append(op)
push_sparse_ops[param_name] = ops
return pull_sparse_ops, push_sparse_ops
def _apply_single_impl(self, main_program, startup_program, pass_ctx):
attrs = pass_ctx._attrs
pull_sparse_ops, push_sparse_ops = self._get_pull_sparse_ops(
main_program, attrs)
send_ctx = get_the_one_send_context(
attrs, split_dense_table=attrs['is_heter_ps_mode'])
self._pull_sparse_fuse(main_program, pull_sparse_ops, attrs, send_ctx)
self._push_sparse_fuse(main_program, push_sparse_ops, attrs)
@register_pass("delete_optimizer_pass")
class DeleteOptimizesPass(PassBase):
def __init__(self):
super(DeleteOptimizesPass, self).__init__()
def _check_self(self):
return True
def _check_conflict(self, other_pass):
return True
def _delete_optimizer_op_and_vars(self, _program, optimize_ops):
optimize_vars = []
optimize_op_role_vars = []
optimize_need_delete_vars = []
for op in optimize_ops:
optimize_vars.extend(op.input_arg_names)
optimize_op_role_vars.extend(op.attr("op_role_var"))
optimize_vars = list(set(optimize_vars))
optimize_op_role_vars = list(set(optimize_op_role_vars))
for var in optimize_vars:
if var not in optimize_op_role_vars:
optimize_need_delete_vars.append(var)
need_delete_optimize_vars = list(set(optimize_need_delete_vars))
delete_ops(_program.global_block(), optimize_ops)
for var in need_delete_optimize_vars:
if _program.global_block().has_var(var):
_program.global_block()._remove_var(var)
def _add_lr_var(self, main_program, attrs):
# Todo: hard code for pe
lr_var = attrs['origin_main_program'].global_block().vars[
"learning_rate_0"]
main_program.global_block().create_var(
name=lr_var.name,
shape=lr_var.shape,
dtype=lr_var.dtype,
type=lr_var.type,
lod_level=lr_var.lod_level,
persistable=True)
def _apply_single_impl(self, main_program, startup_program, pass_ctx):
attrs = pass_ctx._attrs
optimizer_ops = get_optimize_ops(main_program)
lr_ops = get_lr_ops(main_program)
optimizer_ops.extend(lr_ops)
self._delete_optimizer_op_and_vars(main_program, optimizer_ops)
if hasattr(attrs['origin_main_program'], 'lr_sheduler'):
self._add_lr_var(main_program, attrs)
@register_pass("delete_extra_optimizer_pass")
class DeleteExtraOptimizerPass(PassBase):
def __init__(self):
super(DeleteExtraOptimizerPass, self).__init__()
def _check_self(self):
return True
def _check_conflict(self, other_pass):
return True
def _apply_single_impl(self, main_program, startup_program, pass_ctx):
attrs = pass_ctx._attrs
optimize_vars = []
optimize_op_role_vars = []
optimize_need_delete_vars = []
for op in get_optimize_ops(main_program):
optimize_vars.extend(op.input_arg_names)
optimize_op_role_vars.extend(op.attr("op_role_var"))
optimize_vars = list(set(optimize_vars))
optimize_op_role_vars = list(set(optimize_op_role_vars))
for var in optimize_vars:
if var not in optimize_op_role_vars:
optimize_need_delete_vars.append(var)
need_delete_optimize_vars = list(set(optimize_need_delete_vars))
init_ops = []
for var in need_delete_optimize_vars:
param_init_op = []
for op in startup_program.global_block().ops:
if var in op.output_arg_names:
param_init_op.append(op)
init_ops.extend(param_init_op)
delete_ops(startup_program.global_block(), init_ops)
for var in need_delete_optimize_vars:
if startup_program.global_block().has_var(var):
startup_program.global_block()._remove_var(var)
@register_pass("fake_init_ops_pass")
class FakeInitOpsPass(PassBase):
def __init__(self):
super(FakeInitOpsPass, self).__init__()
def _check_self(self):
return True
def _check_conflict(self, other_pass):
return True
def _get_sparse_table_names(self, attrs):
dist_varnames = get_sparse_tablenames(attrs['origin_main_program'],
True)
sparse_varnames = get_sparse_tablenames(attrs['origin_main_program'],
False)
return list(set(dist_varnames + sparse_varnames))
def _fake_init_sparsetable(self, program, sparse_table_names):
# delete table init op
for table_name in sparse_table_names:
table_var = program.global_block().vars[table_name]
table_param_init_op = []
for op in program.global_block().ops:
if table_name in op.output_arg_names:
table_param_init_op.append(op)
init_op_num = len(table_param_init_op)
if init_op_num != 1:
raise ValueError("table init op num should be 1, now is " + str(
init_op_num))
table_init_op = table_param_init_op[0]
program.global_block().append_op(
type="fake_init",
inputs={},
outputs={"Out": table_var},
attrs={"shape": table_init_op.attr('shape')})
delete_ops(program.global_block(), table_param_init_op)
def _apply_single_impl(self, main_program, startup_program, pass_ctx):
attrs = pass_ctx._attrs
sparse_tables = self._get_sparse_table_names(attrs)
self._fake_init_sparsetable(startup_program, sparse_tables)
@register_pass("ps_gpu_pass")
class PsGpuPass(PassBase):
def __init__(self):
super(PsGpuPass, self).__init__()
def _check_self(self):
return True
def _check_conflict(self, other_pass):
return True
def _add_push_box_sparse_op(self, program):
for op in program.global_block().ops:
if op.type != "pull_box_sparse":
continue
grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
op.desc, cpt.to_text(set()), [])
for op_desc in grad_op_desc:
new_op_desc = program.global_block().desc.append_op()
new_op_desc.copy_from(op_desc)
new_op_desc._set_attr(op_role_attr_name, backward)
def _remove_optimizer_var(self, program):
embedding_w = {}
for idx, op in list(enumerate(program.global_block().ops)):
if op.type == "lookup_table_grad":
for name in op.input("W"):
embedding_w[name] = 1
optimize_vars = []
optimize_op_role_vars = []
optimize_need_delete_vars = []
for op in get_optimize_ops(program):
for name in op.input("Param"):
if name in embedding_w:
optimize_op_role_vars.extend(op.attr("op_role_var"))
for key_name in op.input_names:
if key_name == "LearningRate":
continue
for var in op.input(key_name):
optimize_vars.append(var)
optimize_vars = list(set(optimize_vars))
optimize_op_role_vars = list(set(optimize_op_role_vars))
for var in optimize_vars:
if var not in optimize_op_role_vars:
optimize_need_delete_vars.append(var)
need_delete_optimize_vars = list(set(optimize_need_delete_vars))
for name in need_delete_optimize_vars:
if program.global_block().has_var(name):
program.global_block()._remove_var(name)
def _remove_lookup_table_grad_op_and_var(self, program):
lookup_table_grad_var = {}
remove_op_index = []
remove_var = []
for idx, op in list(enumerate(program.global_block().ops)):
if op.type == "lookup_table_grad":
for name in op.output("W@GRAD"):
lookup_table_grad_var[name] = 1
remove_op_index.append(idx)
remove_var.append(name)
for name in op.input("W"):
lookup_table_grad_var[name] = 1
for idx, op in list(enumerate(program.global_block().ops)):
if op.type == "pull_box_sparse":
continue
for key_name in op.input_names:
for var in op.input(key_name):
if var in lookup_table_grad_var:
remove_op_index.append(idx)
break
remove_op_index = list(set(remove_op_index))
remove_op_index.sort(reverse=True)
for idx in remove_op_index:
program.global_block()._remove_op(idx)
for name in remove_var:
program.global_block()._remove_var(name)
def _apply_single_impl(self, main_program, startup_program, pass_ctx):
attrs = pass_ctx._attrs
self._add_push_box_sparse_op(main_program)
self._remove_optimizer_var(main_program)
self._remove_lookup_table_grad_op_and_var(main_program)
@register_pass("ps_transpile_pass")
class PsTranspilePass(PassBase):
def __init__(self):
super(PsTranspilePass, self).__init__()
def _check_self(self):
return True
def _check_conflict(self, other_pass):
return True
def _apply_single_impl(self, main_program, startup_program, pass_ctx):
attrs = pass_ctx._attrs
t = SingleProcessMultiThread()
env = get_dist_env()
t.transpile(
startup_program=startup_program,
main_program=main_program,
rank=env["trainer_id"],
endpoints=env["trainer_endpoints"],
current_endpoint=env['current_endpoint'],
wait_port=False)
@register_pass("split_heter_worker_ops_pass")
class SplitHeterWorkerOpsPass(PassBase):
def __init__(self):
super(SplitHeterWorkerOpsPass, self).__init__()
def _check_self(self):
return True
def _check_conflict(self, other_pass):
return True
def _create_heter_program(self, program, attrs, heter_program,
program_block_ops_list, heter_ops,
block_var_detail):
# This function mainly includes the following contents:
# 1. For every heter block:
# a) copy heter device op from origin program
# b) create variables which belong to heter op:
# -> if variable is persistable, clone it in global_scope
# -> if variable is temp, create it in heter block
# c) create communicate related op as follow:
# joint_var.0_1 -> slice -> reshape -> origin_var
# origin_var -> origin_program
# reshape -> concat -> joint_var.1_2
# d) copy send op from origin program for var@grad which loacted in current heter block
# e) re-check every op in current blcok if its device is not current heter devie
# 2. Create send op for step counter in last heter-block
# 3. Create Listen&Serv OP and Send&Recv OP for distributed training
# 4. update CompileTimeStrategy for heter_program
optimizer_block = []
grad_to_block_id = []
send_grad_var_list = []
pre_block_idx = heter_program.num_blocks - 1
role_maker = attrs['role_maker']
current_device = role_maker._heter_device_type().lower()
stage_id = int(role_maker._get_stage_id())
heter_block_ops_forward = program_block_ops_list[stage_id - 1][
"forward"]
heter_block_ops_backward = program_block_ops_list[stage_id - 1][
"backward"]
heter_block = heter_program._create_block(pre_block_idx)
optimizer_block.append(heter_block)
for _, op in enumerate(heter_block_ops_forward):
block_append_op(heter_program, program, heter_block, op)
entrance_vars = block_var_detail[stage_id - 1]["forward"]["entrance"]
add_vars_by_var_list(entrance_vars, program, heter_program, heter_block)
exit_vars = block_var_detail[stage_id - 1]["forward"]["exit"]
add_vars_by_var_list(exit_vars, program, heter_program, heter_block)
first_op_index_fp = len(heter_block.ops)
if stage_id < len(program_block_ops_list):
heter_block_bp = heter_program._create_block(pre_block_idx)
optimizer_block.append(heter_block_bp)
for _, op in enumerate(heter_block_ops_backward):
block_append_op(heter_program, program, heter_block_bp, op)
bp_entrance_vars = block_var_detail[stage_id - 1]["backward"][
"entrance"]
add_vars_by_var_list(bp_entrance_vars, program, heter_program,
heter_block_bp)
bp_exit_vars = block_var_detail[stage_id - 1]["backward"]["exit"]
add_vars_by_var_list(bp_exit_vars, program, heter_program,
heter_block_bp)
backward_comm_info = get_communicate_var_info(
program, stage_id, bp_entrance_vars, type="backward")
grad_to_block_id.append(backward_comm_info["block_input_var_name"] +
":" + str(heter_block_bp.idx))
else:
for _, op in enumerate(heter_block_ops_backward):
block_append_op(heter_program, program, heter_block, op)
bp_entrance_vars = block_var_detail[stage_id - 1]["backward"][
"entrance"]
add_vars_by_var_list(bp_entrance_vars, program, heter_program,
heter_block)
bp_exit_vars = block_var_detail[stage_id - 1]["backward"]["exit"]
add_vars_by_var_list(bp_exit_vars, program, heter_program,
heter_block)
heter_block_bp = heter_block
forward_comm_info = get_communicate_var_info(
program, stage_id, entrance_vars, type="forward")
grad_to_block_id.append(forward_comm_info["block_input_var_name"] + ":"
+ str(heter_block.idx))
first_op_index_bp = len(heter_block_bp.ops)
if stage_id <= len(block_var_detail) - 1:
static_var = insert_communicate_op(program, role_maker, heter_block,
stage_id, first_op_index_fp,
block_var_detail, current_device)
static_var_bp = insert_communicate_op(
program, role_maker, heter_block_bp, stage_id, first_op_index_bp,
block_var_detail, current_device, False)
# add send op
send_grad_var_list = add_heter_send_op(program, heter_program,
heter_block_bp,
block_var_detail[stage_id - 1])
# add step conter
send_input_vars = []
dummy_output = []
pserver_endpoints = get_ps_endpoints(role_maker)
attrs = {
"message_to_block_id": grad_to_block_id,
"optimize_blocks": optimizer_block,
# runtime attribute
"endpoint": get_heter_worker_endpoint(role_maker),
"fanin": len(get_previous_stage_trainers(role_maker)),
"pserver_id": get_role_id(role_maker),
"distributed_mode": attrs['ps_mode'],
"rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)),
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
}
# append the listen_and_serv op
heter_program.global_block().append_op(
type="heter_listen_and_serv",
inputs={'X': []},
outputs={},
attrs=attrs)
# TODO check heter program
def _apply_single_impl(self, main_program, startup_program, pass_ctx):
"""
split heter worker program from origin-program
1. find heter op (located on different device)
2. find input&output of every heter-block
3. create heter worker program, add listen&serv op
"""
attrs = pass_ctx._attrs
default_deveice = "cpu"
program, heter_ops, _, program_block_ops = find_heter_ops(
main_program, default_deveice)
if len(heter_ops) == 0:
warnings.warn(
"Currently running in Heter Parameter Server mode, but no OP running on heterogeneous devices, Please check your code."
)
main_program = program
return
program_block_ops = union_forward_gradient_op(program_block_ops)
block_vars_detail = find_block_joints(program, program_block_ops,
heter_ops)
heter_program = framework.Program()
self._create_heter_program(program, attrs, heter_program,
program_block_ops, heter_ops,
block_vars_detail)
main_program = heter_program
@register_pass("split_trainer_ops_pass")
class SplitTrainerOpsPass(PassBase):
def __init__(self):
super(SplitTrainerOpsPass, self).__init__()
def _check_self(self):
return True
def _check_conflict(self, other_pass):
return True
def _replace_ops_by_communicate_op(self, program, attrs, heter_block_index,
ops_list, block_var_detail):
all_op = program.global_block().ops
start_op = ops_list[0]
first_op_idx = -1
for op in all_op:
if str(op) == str(start_op):
first_op_idx = all_op.index(op)
break
assert first_op_idx != -1
self._delete_same_ops(program.global_block(), ops_list)
entrance_var = []
role_maker = attrs['role_maker']
if heter_block_index == 1:
next_heter_worker_endpoints = get_next_stage_trainers(role_maker)
entrance_var = block_var_detail[heter_block_index]["forward"][
"entrance"]
comm_info = get_communicate_var_info(program, heter_block_index + 1,
entrance_var)
program.global_block()._insert_op(
index=first_op_idx,
type="send_and_recv",
inputs={"X": program.global_block().vars[entrance_var[0]]},
outputs={"Out": []},
attrs={
"mode": "forward",
"send_var_name": entrance_var + ["microbatch_id"],
"recv_var_name": [],
"message_name": comm_info["block_input_var_name"],
"next_endpoints": next_heter_worker_endpoints,
"previous_endpoints": [],
"trainer_id": get_role_id(role_maker),
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
return entrance_var
def _delete_same_ops(self, block, ops):
for op in ops:
try:
for origin_op in block.ops:
if str(origin_op) == str(op):
idx = list(block.ops).index(origin_op)
block._remove_op(idx)
break
except Exception as e:
print(e)
def _remove_var_pair_by_grad(self, var_name, attrs):
for index, pair in enumerate(attrs['merged_variables_pairs']):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del attrs['merged_variables_pairs'][index]
for index, pair in enumerate(attrs['merged_dense_pairs']):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del attrs['merged_dense_pairs'][index]
return
for index, pair in enumerate(attrs['merged_sparse_pairs']):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del attrs['merged_sparse_pairs'][index]
return
def _remove_trainer_send_op(self, program, attrs, heter_block_index,
block_var_detail):
# if trainer do FF->BP->SEND, it has follow vars: var, var@GRAD
# if trainer only do SEND, it has one var: var@GRAD
# Delete Send op ,if trainer doesn't has pair var (var<->var@GRAD)
persistables = block_var_detail[heter_block_index]["forward"]["persistables"] + \
block_var_detail[heter_block_index]["backward"]["persistables"]
need_remove_send_op = []
need_remove_grad_var = []
for op in find_send_op(program):
input_list, _ = find_op_input_output(program,
program.global_block(), op)
for var_name in input_list:
origin_var_name = var_name.split("@GRAD")[0]
if origin_var_name in persistables:
need_remove_send_op.append(op)
need_remove_grad_var.append(var_name)
need_remove_send_op = list(set(need_remove_send_op))
delete_ops(program.global_block(), need_remove_send_op)
for grad_var_name in need_remove_grad_var:
self._remove_var_pair_by_grad(grad_var_name, attrs)
def _create_trainer_program(self, program, origin_program, attrs,
program_block_ops_list, block_var_detail):
# This function mainly includes the following contents:
# 1. For every heter block in origin program
# a) delete heter op and related variables
# b) add send&recv op
# c) add communicate ops as follows:
# origin_var -> reshape -> concat -> joint_var.0_1
# send&recv op(send joint_var.0_1; recv joint_var.1_2)
# joint_var.1_2 -> slice -> reshape -> origin_var
# d) remove send op which related var@grad is not in trainer program
# 2. check every op's device
static_var = []
for heter_block_index in range(1, len(program_block_ops_list)):
ops_list = program_block_ops_list[heter_block_index][
"forward"] + program_block_ops_list[heter_block_index][
"backward"]
static_var += self._replace_ops_by_communicate_op(
program, attrs, heter_block_index, ops_list, block_var_detail)
self._remove_trainer_send_op(program, attrs, heter_block_index,
block_var_detail)
optimizer_block = []
grad_to_block_id = []
bp_ops_list = program_block_ops_list[0]["backward"]
self._delete_same_ops(program.global_block(), bp_ops_list)
delete_trainer_useless_var(program, static_var)
backward_block = create_backward_block(program, origin_program,
bp_ops_list, block_var_detail)
bp_entrance_vars = block_var_detail[0]["backward"]["entrance"]
backward_comm_info = get_communicate_var_info(
origin_program, 1, bp_entrance_vars, type="backward")
grad_to_block_id.append(backward_comm_info["block_input_var_name"] + ":"
+ str(backward_block.idx))
optimizer_block.append(backward_block)
role_maker = attrs['role_maker']
attrs = {
"message_to_block_id": grad_to_block_id,
"optimize_blocks": optimizer_block,
# runtime attribute
"endpoint":
get_trainer_endpoint(role_maker), ## get trainer endpoint
"fanin": 0, ## get heter worker
"pserver_id": get_role_id(role_maker),
"distributed_mode": attrs['ps_mode'],
"rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)),
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
}
# append the listen_and_serv op
program.global_block()._insert_op(
index=0,
type="heter_listen_and_serv",
inputs={'X': []},
outputs={},
attrs=attrs)
## TODO add check for bp block
#check_op_device(program.global_block(), DEFAULT_DEVICE)
def _apply_single_impl(self, main_program, startup_program, pass_ctx):
"""
split cpu-trainer program from origin-program
1. find heter op (located on different device)
2. find input&output of every heter-block
3. create cpu-trainer program, add send&recv op
"""
attrs = pass_ctx._attrs
default_device_ = 'cpu'
program, heter_ops, default_ops, program_block_ops = find_heter_ops(
main_program, default_device_)
program_block_ops = union_forward_gradient_op(program_block_ops)
block_vars_detail = find_block_joints(program, program_block_ops,
heter_ops)
trainer_program = program.clone()
self._create_trainer_program(trainer_program, program, attrs,
program_block_ops, block_vars_detail)
main_program = trainer_program
@register_pass("set_heter_pipeline_opt_pass")
class SetHeterPipelineOptPass(PassBase):
def __init__(self):
super(SetHeterPipelineOptPass, self).__init__()
def _check_self(self):
return True
def _check_conflict(self, other_pass):
return True
def _apply_single_impl(self, main_program, startup_program, pass_ctx):
attrs = pass_ctx._attrs
role_maker = attrs['role_maker']
num_microbatches = attrs['user_defined_strategy'].pipeline_configs[
'accumulate_steps']
attrs['origin_startup_program']._heter_pipeline_opt = {
"startup_program": startup_program,
"pipeline_stage": int(role_maker._get_stage_id()) - 1,
"heter_place": role_maker._heter_device(),
}
attrs['origin_main_program']._heter_pipeline_opt = {
"trainer": "HeterPipelineTrainer",
"device_worker": "HeterSection",
"trainers":
role_maker._get_stage_trainers(), ## trainer num in each stage
"trainer_id": int(role_maker._role_id()),
"pipeline_stage": int(role_maker._get_stage_id()) - 1,
"num_pipeline_stages": int(role_maker._get_num_stage()),
"section_program": main_program,
"num_microbatches": num_microbatches,
"heter_place": role_maker._heter_device(),
}
|
[] |
[] |
[
"CPU_NUM"
] |
[]
|
["CPU_NUM"]
|
python
| 1 | 0 | |
inference-engine/ie_bridges/python/tests/test_InferRequest.py
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import os
import pytest
import threading
from datetime import datetime
import time
from openvino.inference_engine import ie_api as ie
from conftest import model_path, image_path, create_encoder
import ngraph as ng
from ngraph.impl import Function, Type
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
test_net_xml, test_net_bin = model_path(is_myriad)
path_to_img = image_path()
def create_function_with_memory(input_shape, data_type):
input_data = ng.parameter(input_shape, name="input_data", dtype=data_type)
rv = ng.read_value(input_data, "var_id_667")
add = ng.add(rv, input_data, name="MemoryAdd")
node = ng.assign(add, "var_id_667")
res = ng.result(add, "res")
func = Function(results=[res], sinks=[node], parameters=[input_data], name="name")
caps = Function.to_capsule(func)
return caps
def read_image():
import cv2
n, c, h, w = (1, 3, 32, 32)
image = cv2.imread(path_to_img)
if image is None:
raise FileNotFoundError("Input image not found")
image = cv2.resize(image, (h, w)) / 255
image = image.transpose((2, 0, 1)).astype(np.float32)
image = image.reshape((n, c, h, w))
return image
def load_sample_model(device, num_requests=1):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=num_requests)
return executable_network
def test_input_blobs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
td = ie.TensorDesc("FP32", (1, 3, 32, 32), "NCHW")
assert executable_network.requests[0].input_blobs['data'].tensor_desc == td
def test_output_blobs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
td = ie.TensorDesc("FP32", (1, 10), "NC")
assert executable_network.requests[0].output_blobs['fc_out'].tensor_desc == td
def test_inputs_list(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
for req in executable_network.requests:
assert len(req._inputs_list) == 1
assert "data" in req._inputs_list
del ie_core
def test_outputs_list(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
for req in executable_network.requests:
assert len(req._outputs_list) == 1
assert "fc_out" in req._outputs_list
del ie_core
def test_access_input_buffer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
buffer = executable_network.requests[0]._get_blob_buffer("data".encode()).to_numpy()
assert buffer.shape == (1, 3, 32, 32)
assert buffer.strides == (12288, 4096, 128, 4)
assert buffer.dtype == np.float32
del executable_network
del ie_core
del net
def test_access_output_buffer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
buffer = executable_network.requests[0]._get_blob_buffer("fc_out".encode()).to_numpy()
assert buffer.shape == (1, 10)
assert buffer.strides == (40, 4)
assert buffer.dtype == np.float32
del executable_network
del ie_core
del net
def test_write_to_input_blobs_directly(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = executable_network.requests[0]
input_data = request.input_blobs["data"]
input_data.buffer[:] = img
assert np.array_equal(executable_network.requests[0].input_blobs["data"].buffer, img)
del executable_network
del ie_core
del net
def test_write_to_input_blobs_copy(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = executable_network.requests[0]
request.input_blobs["data"].buffer[:] = img
assert np.allclose(executable_network.requests[0].input_blobs["data"].buffer, img)
del executable_network
del ie_core
del net
def test_infer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.infer({'data': img})
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
del exec_net
del ie_core
del net
def test_async_infer_default_timeout(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait()
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
del exec_net
del ie_core
del net
def test_async_infer_wait_finish(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait(ie.WaitMode.RESULT_READY)
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
del exec_net
del ie_core
del net
def test_async_infer_wait_time(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=2)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
start_time = datetime.utcnow()
status = request.wait(ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
time_delta = datetime.utcnow() - start_time
latency_ms = (time_delta.microseconds / 1000) + (time_delta.seconds * 1000)
timeout = max(100, latency_ms)
request = exec_net.requests[1]
request.async_infer({'data': img})
max_repeat = 10
status = ie.StatusCode.REQUEST_BUSY
i = 0
while i < max_repeat and status != ie.StatusCode.OK:
status = request.wait(timeout)
i += 1
assert status == ie.StatusCode.OK
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
del exec_net
del ie_core
del net
def test_async_infer_wait_status(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait(ie.WaitMode.RESULT_READY)
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
status = request.wait(ie.WaitMode.STATUS_ONLY)
assert status == ie.StatusCode.OK
del exec_net
del ie_core
del net
def test_async_infer_fill_inputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.input_blobs['data'].buffer[:] = img
request.async_infer()
status_end = request.wait()
assert status_end == ie.StatusCode.OK
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res[0]) == 2
del exec_net
del ie_core
del net
def test_infer_modify_outputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
outputs0 = exec_net.infer({'data': img})
status_end = request.wait()
assert status_end == ie.StatusCode.OK
assert np.argmax(outputs0['fc_out']) == 2
outputs0['fc_out'][:] = np.zeros(shape=(1, 10), dtype=np.float32)
outputs1 = request.output_blobs
assert np.argmax(outputs1['fc_out'].buffer) == 2
outputs1['fc_out'].buffer[:] = np.ones(shape=(1, 10), dtype=np.float32)
outputs2 = request.output_blobs
assert np.argmax(outputs2['fc_out'].buffer) == 2
del exec_net
del ie_core
del net
def test_async_infer_callback(device):
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(callback_called=0)
def callback(self, status):
callback.callback_called = 1
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.set_completion_callback(callback)
request.async_infer({'data': img})
status = request.wait()
assert status == ie.StatusCode.OK
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
assert callback.callback_called == 1
del exec_net
del ie_core
def test_async_infer_callback_wait_before_start(device):
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(callback_called=0)
def callback(self, status):
callback.callback_called = 1
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.set_completion_callback(callback)
status = request.wait()
assert status == ie.StatusCode.INFER_NOT_STARTED
request.async_infer({'data': img})
status = request.wait()
assert status == ie.StatusCode.OK
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
assert callback.callback_called == 1
del exec_net
del ie_core
def test_async_infer_callback_wait_in_callback(device):
class InferReqWrap:
def __init__(self, request):
self.request = request
self.cv = threading.Condition()
self.request.set_completion_callback(self.callback)
self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY)
assert self.status_code == ie.StatusCode.INFER_NOT_STARTED
def callback(self, statusCode, userdata):
self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY)
self.cv.acquire()
self.cv.notify()
self.cv.release()
def execute(self, input_data):
self.request.async_infer(input_data)
self.cv.acquire()
self.cv.wait()
self.cv.release()
status = self.request.wait(ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
assert self.status_code == ie.StatusCode.RESULT_NOT_READY
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request_wrap = InferReqWrap(exec_net.requests[0])
request_wrap.execute({'data': img})
del exec_net
del ie_core
def test_async_infer_wait_while_callback_will_not_finish(device):
def callback(status, callback_status):
time.sleep(0.01)
callback_status['finished'] = True
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
callback_status = {}
callback_status['finished'] = False
request = exec_net.requests[0]
request.set_completion_callback(callback, py_data=callback_status)
img = read_image()
request.async_infer({'data': img})
request.wait()
assert callback_status['finished'] == True
def test_get_perf_counts(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
ie_core.set_config({"PERF_COUNT": "YES"}, device)
exec_net = ie_core.load_network(net, device)
img = read_image()
request = exec_net.requests[0]
request.infer({'data': img})
pc = request.get_perf_counts()
assert pc['29']["status"] == "EXECUTED"
del exec_net
del ie_core
del net
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, "
"Dynamic batch fully supported only on CPU")
def test_set_batch_size(device):
ie_core = ie.IECore()
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to dynamic batch isn't supported")
ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device)
net = ie_core.read_network(test_net_xml, test_net_bin)
net.batch_size = 10
data = np.zeros(shape=net.input_info['data'].input_data.shape)
exec_net = ie_core.load_network(net, device)
data[0] = read_image()[0]
request = exec_net.requests[0]
request.set_batch(1)
request.infer({'data': data})
assert np.allclose(int(round(request.output_blobs['fc_out'].buffer[0][2])), 1), "Incorrect data for 1st batch"
del exec_net
del ie_core
del net
def test_set_zero_batch_size(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
request = exec_net.requests[0]
with pytest.raises(ValueError) as e:
request.set_batch(0)
assert "Batch size should be positive integer number but 0 specified" in str(e.value)
del exec_net
del ie_core
del net
def test_set_negative_batch_size(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
request = exec_net.requests[0]
with pytest.raises(ValueError) as e:
request.set_batch(-1)
assert "Batch size should be positive integer number but -1 specified" in str(e.value)
del exec_net
del ie_core
del net
def test_blob_setter(device):
ie_core = ie.IECore()
if device == "CPU":
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin")
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net_1 = ie_core.load_network(network=net, device_name=device, num_requests=1)
net.input_info['data'].layout = "NHWC"
exec_net_2 = ie_core.load_network(network=net, device_name=device, num_requests=1)
img = read_image()
res_1 = np.sort(exec_net_1.infer({"data": img})['fc_out'])
img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32)
tensor_desc = ie.TensorDesc("FP32", [1, 3, 32, 32], "NHWC")
img_blob = ie.Blob(tensor_desc, img)
request = exec_net_2.requests[0]
request.set_blob('data', img_blob)
request.infer()
res_2 = np.sort(request.output_blobs['fc_out'].buffer)
assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)
def test_blob_setter_with_preprocess(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
img = read_image()
tensor_desc = ie.TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
img_blob = ie.Blob(tensor_desc, img)
preprocess_info = ie.PreProcessInfo()
preprocess_info.mean_variant = ie.MeanVariant.MEAN_IMAGE
request = exec_net.requests[0]
request.set_blob('data', img_blob, preprocess_info)
pp = request.preprocess_info["data"]
assert pp.mean_variant == ie.MeanVariant.MEAN_IMAGE
def test_getting_preprocess(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
request = exec_net.requests[0]
preprocess_info = request.preprocess_info["data"]
assert isinstance(preprocess_info, ie.PreProcessInfo)
assert preprocess_info.mean_variant == ie.MeanVariant.NONE
def test_resize_algorithm_work(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net_1 = ie_core.load_network(network=net, device_name=device, num_requests=1)
img = read_image()
res_1 = np.sort(exec_net_1.infer({"data": img})['fc_out'])
net.input_info['data'].preprocess_info.resize_algorithm = ie.ResizeAlgorithm.RESIZE_BILINEAR
exec_net_2 = ie_core.load_network(net, device)
import cv2
image = cv2.imread(path_to_img)
if image is None:
raise FileNotFoundError("Input image not found")
image = image / 255
image = image.transpose((2, 0, 1)).astype(np.float32)
image = np.expand_dims(image, 0)
tensor_desc = ie.TensorDesc("FP32", [1, 3, image.shape[2], image.shape[3]], "NCHW")
img_blob = ie.Blob(tensor_desc, image)
request = exec_net_2.requests[0]
assert request.preprocess_info["data"].resize_algorithm == ie.ResizeAlgorithm.RESIZE_BILINEAR
request.set_blob('data', img_blob)
request.infer()
res_2 = np.sort(request.output_blobs['fc_out'].buffer)
assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)
@pytest.mark.parametrize("mode", ["set_init_memory_state", "reset_memory_state", "normal"])
@pytest.mark.parametrize("data_type", ["FP32", "FP16", "I32"])
@pytest.mark.parametrize("input_shape", [[10], [10, 10], [10, 10, 10], [2, 10, 10, 10]])
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, "
"Memory layers fully supported only on CPU")
def test_query_state_write_buffer(device, input_shape, data_type, mode):
ie_core = ie.IECore()
if device == "CPU":
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin")
layout = ["C", "HW", "CHW", "NCHW"]
from openvino.inference_engine import TensorDesc, Blob, format_map
net = ie.IENetwork(create_function_with_memory(input_shape, format_map[data_type]))
ie_core = ie.IECore()
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
request = exec_net.requests[0]
mem_states = request.query_state()
mem_state = mem_states[0]
assert mem_state.name == 'var_id_667'
# todo: Uncomment after fix 45611,
# CPU plugin returns outputs and memory state in FP32 in case of FP16 original precision
#assert mem_state.state.tensor_desc.precision == data_type
for i in range(1, 10):
if mode == "set_init_memory_state":
# create initial value
const_init = 5
init_array = np.full(input_shape, const_init, dtype=format_map[mem_state.state.tensor_desc.precision])
tensor_desc = TensorDesc(mem_state.state.tensor_desc.precision, input_shape, layout[len(input_shape) - 1])
blob = Blob(tensor_desc, init_array)
mem_state.state = blob
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])})
expected_res = np.full(input_shape, 1 + const_init, dtype=format_map[data_type])
elif mode == "reset_memory_state":
# reset initial state of ReadValue to zero
mem_state.reset()
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])})
# always ones
expected_res = np.full(input_shape, 1, dtype=format_map[data_type])
else:
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])})
expected_res = np.full(input_shape, i, dtype=format_map[data_type])
assert np.allclose(res['MemoryAdd'], expected_res, atol=1e-6), \
"Expected values: {} \n Actual values: {} \n".format(expected_res, res)
@pytest.mark.template_plugin
@pytest.mark.parametrize("shape, p_shape, ref_shape", [
([1, 4, 20, 20], [-1, 4, 20, 20], [5, 4, 20, 20]),
([1, 4, 20, 20], [(0,5), 4, 20, 20], [3, 4, 20, 20]),
([1, 4, 20, 20], [(3,5), 4, 20, 20], [2, 4, 20, 20]),
([1, 4, 20, 20], [(3,5), 4, 20, 20], [6, 4, 20, 20]),
])
def test_infer_dynamic_network_with_set_shape(shape, p_shape, ref_shape):
function = create_encoder(shape)
net = ng.function_to_cnn(function)
net.reshape({"data": p_shape})
ie_core = ie.IECore()
ie_core.register_plugin("ov_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE")
exec_net.requests[0].input_blobs["data"].set_shape(ref_shape)
assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape
exec_net.infer({"data": np.ones(ref_shape)})
request = exec_net.requests[0]
request.async_infer({"data": np.ones(ref_shape)})
status = request.wait(ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
assert request.output_blobs['out'].tensor_desc.dims == ref_shape
@pytest.mark.template_plugin
@pytest.mark.parametrize("shape, p_shape, ref_shape", [
([1, 4, 20, 20], [-1, 4, 20, 20], [5, 4, 20, 20]),
([1, 4, 20, 20], [(0,5), 4, 20, 20], [3, 4, 20, 20]),
([1, 4, 20, 20], [(3,5), 4, 20, 20], [2, 4, 20, 20]),
([1, 4, 20, 20], [(3,5), 4, 20, 20], [6, 4, 20, 20]),
])
def test_infer_dynamic_network_without_set_shape(shape, p_shape, ref_shape):
function = create_encoder(shape)
net = ng.function_to_cnn(function)
net.reshape({"data": p_shape})
ie_core = ie.IECore()
ie_core.register_plugin("ov_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE")
exec_net.infer({"data": np.ones(ref_shape)})
assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape
request = exec_net.requests[0]
request.async_infer({"data": np.ones(ref_shape)})
status = request.wait(ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
assert request.output_blobs['out'].tensor_desc.dims == ref_shape
@pytest.mark.template_plugin
@pytest.mark.parametrize("shape, p_shape, ref_shape", [
([1, 4, 20, 20], [-1, 4, 20, 20], [5, 4, 20, 20]),
([1, 4, 20, 20], [(0,5), 4, 20, 20], [3, 4, 20, 20]),
([1, 4, 20, 20], [(3,5), 4, 20, 20], [2, 4, 20, 20]),
([1, 4, 20, 20], [(3,5), 4, 20, 20], [6, 4, 20, 20]),
])
def test_infer_dynamic_network_with_set_blob(shape, p_shape, ref_shape):
function = create_encoder(shape)
net = ng.function_to_cnn(function)
net.reshape({"data": p_shape})
ie_core = ie.IECore()
ie_core.register_plugin("ov_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE")
tensor_desc = exec_net.requests[0].input_blobs["data"].tensor_desc
tensor_desc.dims = ref_shape
blob = ie.Blob(tensor_desc)
exec_net.requests[0].set_blob("data", blob)
assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape
request = exec_net.requests[0]
request.infer({"data": np.ones(ref_shape)})
request.async_infer({"data": np.ones(ref_shape)})
status = request.wait(ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
assert request.output_blobs["out"].tensor_desc.dims == ref_shape
@pytest.mark.template_plugin
def test_infer_dynamic_network_twice():
shape, p_shape = [1, 4, 20, 20], [(0,5), 4, 20, 20]
ref_shape1, ref_shape2 = [2, 4, 20, 20], [3, 4, 20, 20]
function = create_encoder(shape)
net = ng.function_to_cnn(function)
net.reshape({"data": p_shape})
ie_core = ie.IECore()
ie_core.register_plugin("ov_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE")
request = exec_net.requests[0]
request.infer({"data": np.ones(ref_shape1)})
assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape1
assert request.output_blobs['out'].tensor_desc.dims == ref_shape1
request.infer({"data": np.ones(ref_shape2)})
assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape2
assert request.output_blobs['out'].tensor_desc.dims == ref_shape2
@pytest.mark.template_plugin
def test_infer_dynamic_network_with_set_blob_twice():
shape, p_shape = [1, 4, 20, 20], [(0,5), 4, 20, 20]
ref_shape1, ref_shape2 = [2, 4, 20, 20], [3, 4, 20, 20]
function = create_encoder(shape)
net = ng.function_to_cnn(function)
net.reshape({"data": p_shape})
ie_core = ie.IECore()
ie_core.register_plugin("ov_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE")
request = exec_net.requests[0]
td = request.input_blobs['data'].tensor_desc
td.dims = ref_shape1
blob = ie.Blob(td)
request.set_blob("data", blob)
request.infer({"data": np.ones(ref_shape1)})
assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape1
assert request.output_blobs['out'].tensor_desc.dims == ref_shape1
td = request.input_blobs['data'].tensor_desc
td.dims = ref_shape2
blob = ie.Blob(td)
request.set_blob("data", blob)
request.infer({"data": np.ones(ref_shape2)})
assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape2
assert request.output_blobs['out'].tensor_desc.dims == ref_shape2
@pytest.mark.template_plugin
@pytest.mark.parametrize("shapes", [
([3, 4, 20, 20], [3, 4, 20, 20], [3, 4, 20, 20]),
([3, 4, 20, 20], [3, 4, 28, 28], [3, 4, 45, 45]),
])
def test_async_infer_dynamic_network_3_requests(shapes):
function = create_encoder([3, 4, 20, 20])
net = ng.function_to_cnn(function)
net.reshape({"data": [3, 4, (20, 50), (20, 50)]})
ie_core = ie.IECore()
ie_core.register_plugin("ov_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE", num_requests=3)
for i,request in enumerate(exec_net.requests):
request.async_infer({"data": np.ones(shapes[i])})
for i,request in enumerate(exec_net.requests):
status = request.wait(ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
assert request.output_blobs['out'].tensor_desc.dims == shapes[i]
@pytest.mark.template_plugin
def test_set_blob_with_incorrect_name():
function = create_encoder([4, 4, 20, 20])
net = ng.function_to_cnn(function)
ie_core = ie.IECore()
ie_core.register_plugin("ov_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE")
tensor_desc = exec_net.requests[0].input_blobs["data"].tensor_desc
tensor_desc.dims = [4, 4, 20, 20]
blob = ie.Blob(tensor_desc)
with pytest.raises(RuntimeError) as e:
exec_net.requests[0].set_blob("incorrect_name", blob)
assert f"Failed to find input or output with name: 'incorrect_name'" in str(e.value)
@pytest.mark.template_plugin
def test_set_blob_with_incorrect_size():
function = create_encoder([4, 4, 20, 20])
net = ng.function_to_cnn(function)
ie_core = ie.IECore()
ie_core.register_plugin("ov_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE")
tensor_desc = exec_net.requests[0].input_blobs["data"].tensor_desc
tensor_desc.dims = [tensor_desc.dims[0]*2, 4, 20, 20]
blob = ie.Blob(tensor_desc)
print(exec_net.requests[0].output_blobs)
with pytest.raises(RuntimeError) as e:
exec_net.requests[0].set_blob("data", blob)
assert f"Input blob size is not equal network input size" in str(e.value)
with pytest.raises(RuntimeError) as e:
exec_net.requests[0].set_blob("out", blob)
assert f"Output blob size is not equal network output size" in str(e.value)
@pytest.mark.template_plugin
def test_set_blob_after_async_infer():
function = create_encoder([1, 4, 20, 20])
net = ng.function_to_cnn(function)
net.reshape({"data": [(0, 5), 4, 20, 20]})
ie_core = ie.IECore()
ie_core.register_plugin("ov_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE")
request = exec_net.requests[0]
tensor_desc = request.input_blobs['data'].tensor_desc
tensor_desc.dims = [2, 4, 20, 20]
blob = ie.Blob(tensor_desc)
request.async_infer({"data": np.ones([4, 4, 20, 20])})
with pytest.raises(RuntimeError) as e:
request.set_blob("data", blob)
assert "REQUEST_BUSY" in str(e.value)
request.wait()
|
[] |
[] |
[
"TEST_DEVICE"
] |
[]
|
["TEST_DEVICE"]
|
python
| 1 | 0 | |
models/step_status_test.go
|
package models
import (
"fmt"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
func TestShouldCreateANewStepStatus(t *testing.T) {
stepStatusRequest := StepsStatus{
ID: "1",
ServiceRequestID: uuid.New(),
WorkflowName: "testWF",
Status: STATUS_STARTED,
CreatedAt: time.Now(),
TotalTimeInMs: 0,
StepName: "firstStep",
Reason: "Success",
}
stepStatusResponse := CreateStepsStatus(stepStatusRequest)
assert.NotEmpty(t, stepStatusResponse.ID)
assert.NotEmpty(t, stepStatusResponse.ServiceRequestID)
assert.NotNil(t, stepStatusResponse.CreatedAt)
assert.Equal(t, stepStatusResponse.WorkflowName, stepStatusRequest.WorkflowName, fmt.Sprintf("Expected Step status name to be %s but was %s", stepStatusRequest.WorkflowName, stepStatusRequest.WorkflowName))
assert.Equal(t, stepStatusResponse.Status, stepStatusRequest.Status, fmt.Sprintf("Expected Step status's status to be %s but was %s", stepStatusRequest.Status, stepStatusRequest.Status))
assert.Equal(t, stepStatusResponse.StepName, stepStatusRequest.StepName, fmt.Sprintf("Expected Step status status to be %s but was %s", stepStatusRequest.StepName, stepStatusRequest.StepName))
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
students/k3342/practical_works/Nikonchuk_Anna/Pr1/django_project_nikonchuk_back/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project_nikonchuk.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
heron/tools/cli/src/python/execute.py
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' execute.py '''
import contextlib
import os
import subprocess
import shlex
import tarfile
import tempfile
import traceback
from heron.common.src.python.utils.log import Log
from heron.tools.cli.src.python.result import SimpleResult, ProcessResult, Status
from heron.common.src.python import pex_loader
from heron.tools.cli.src.python import opts
from heron.tools.cli.src.python import jars
from heron.tools.common.src.python.utils import config
################################################################################
def heron_class(class_name, lib_jars, extra_jars=None, args=None, java_defines=None):
'''
Execute a heron class given the args and the jars needed for class path
:param class_name:
:param lib_jars:
:param extra_jars:
:param args:
:param java_defines:
:return:
'''
# default optional params to empty list if not provided
if extra_jars is None:
extra_jars = []
if args is None:
args = []
if java_defines is None:
java_defines = []
# Format all java -D options that need to be passed while running
# the class locally.
java_opts = ['-D' + opt for opt in java_defines]
java_path = config.get_java_path()
if java_path is None:
err_context = "Unable to find java command"
return SimpleResult(Status.InvocationError, err_context)
# Construct the command line for the sub process to run
# Because of the way Python execute works,
# the java opts must be passed as part of the list
all_args = [java_path, "-client", "-Xmx1g"] + \
java_opts + \
["-cp", config.get_classpath(extra_jars + lib_jars)]
all_args += [class_name] + list(args)
# set heron_config environment variable
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
# print the verbose message
Log.debug("Invoking class using command: `%s`", ' '.join(shlex.quote(a) for a in all_args))
Log.debug("Heron options: {%s}", str(heron_env["HERON_OPTIONS"]))
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
process = subprocess.Popen(all_args, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
# stdout message has the information Java program sends back
# stderr message has extra information, such as debugging message
return ProcessResult(process)
def heron_tar(class_name, topology_tar, arguments, tmpdir_root, java_defines):
'''
:param class_name:
:param topology_tar:
:param arguments:
:param tmpdir_root:
:param java_defines:
:return:
'''
# Extract tar to a tmp folder.
tmpdir = tempfile.mkdtemp(dir=tmpdir_root, prefix='tmp')
with contextlib.closing(tarfile.open(topology_tar)) as tar:
tar.extractall(path=tmpdir)
# A tar generated by pants has all dependency jars under libs/
# in addition to the topology jar at top level. Pants keeps
# filename for jar and tar the same except for extension.
topology_jar = os.path.basename(topology_tar).replace(".tar.gz", "").replace(".tar", "") + ".jar"
extra_jars = [
os.path.join(tmpdir, topology_jar),
os.path.join(tmpdir, "*"),
os.path.join(tmpdir, "libs/*")
]
lib_jars = config.get_heron_libs(jars.topology_jars())
# Now execute the class
return heron_class(class_name, lib_jars, extra_jars, arguments, java_defines)
def heron_pex(topology_pex, topology_class_name, args=None):
"""Use a topology defined in a PEX."""
Log.debug("Importing %s from %s", topology_class_name, topology_pex)
if topology_class_name == '-':
# loading topology by running its main method (if __name__ == "__main__")
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_pex]
if args is not None:
cmd.extend(args)
Log.debug("Invoking class using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
process = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
# pylint: disable=fixme
# todo(rli): improve python topology submission workflow
return ProcessResult(process)
try:
# loading topology from Topology's subclass (no main method)
# to support specifying the name of topology
Log.debug("args: %s", args)
if args is not None and isinstance(args, (list, tuple)) and len(args) > 0:
opts.set_config('cmdline.topology.name', args[0])
os.environ["HERON_OPTIONS"] = opts.get_heron_config()
Log.debug("Heron options: {%s}", os.environ["HERON_OPTIONS"])
pex_loader.load_pex(topology_pex)
topology_class = pex_loader.import_and_get_class(topology_pex, topology_class_name)
topology_class.write()
return SimpleResult(Status.Ok)
except Exception as ex:
Log.debug(traceback.format_exc())
err_context = f"Topology {topology_class_name} failed to be loaded from the given pex: {ex}"
return SimpleResult(Status.HeronError, err_context)
return None
# pylint: disable=superfluous-parens
def heron_cpp(topology_binary, args=None):
Log.debug("Executing %s", topology_binary)
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_binary]
if args is not None:
cmd.extend(args)
Log.debug("Invoking binary using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
print(f"""Invoking class using command: ``{' '.join(cmd)}''""")
print(f"Heron options: {str(heron_env['HERON_OPTIONS'])}")
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
proc = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
return ProcessResult(proc)
|
[] |
[] |
[
"HERON_OPTIONS"
] |
[]
|
["HERON_OPTIONS"]
|
python
| 1 | 0 | |
swig_muesli/muesli/da/setup_da.py
|
import os
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from Cython.Distutils import build_ext
import numpy as np
from os.path import join as pjoin
from setup_cuda import cuda_setup
mpi_compile_args = os.popen("mpic++ --showme:compile").read().strip().split(' ')
mpi_link_args = os.popen("mpic++ --showme:link").read().strip().split(' ')
def find_in_path(name, path):
"""Find a file in a search path"""
# Adapted fom http://code.activestate.com/recipes/52224
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
nvcc = find_in_path('nvcc', os.environ['PATH'])
if isinstance(nvcc, str):
print('CUDA')
# setup(name='PackageName',
# author='Nina Herrmann',
# version='1.0',
# description='This is a package for Muesli',
# ext_modules=cythonize(cuda_setup.get_module()),
# cmdclass={'build_ext': cuda_setup.custom_build_ext()}
# )
else:
module = Extension('_da', sources=['da.cxx', 'da_wrap.cxx'],
include_dirs=[np.get_include(), 'src'],
library_dirs=['/usr/include/boost/'],
language="c++",
swig_opts=['-c++'],
libraries=['/usr/include/boost/chrono'],
extra_compile_args=(["-fopenmp"] + mpi_compile_args),
extra_link_args=(["-fopenmp"] + mpi_link_args)
)
setup(name='da',
author='Nina Herrmann',
version='1.0',
description='This is a package for Muesli',
ext_modules=[module],
py_modules=["da"]
)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
dask_saturn/core.py
|
"""
Saturn-specific override of ``dask.distributed.deploy.SpecCluster``
See https://distributed.dask.org/en/latest/_modules/distributed/deploy/spec.html
for details on the parent class.
"""
import os
import json
import logging
import warnings
import weakref
from distutils.version import LooseVersion
from typing import Any, Dict, List, Optional
from urllib.parse import urljoin
import requests
from distributed import Client, SpecCluster
from distributed.security import Security
from tornado.ioloop import PeriodicCallback
from .backoff import ExpBackoff
from .external import _security, ExternalConnection # noqa # pylint: disable=unused-import
from .plugins import SaturnSetup
from .settings import Settings
DEFAULT_WAIT_TIMEOUT_SECONDS = 1200
log = logging.getLogger("dask-saturn")
if log.level == logging.NOTSET:
logging.basicConfig()
log.setLevel(logging.INFO)
class SaturnCluster(SpecCluster):
"""
Create a ``SaturnCluster``, an extension of ``distributed.SpecCluster``
specific to Saturn Cloud.
:param n_workers: Number of workers to provision for the cluster.
:param cluster_url: URL for the "cluster" service running in kubernetes. This
component of a ``Dask`` cluster in kubernetes knows how to create pods
for workers and the scheduler.
:param worker_size: A string with the size to use for each worker. A list of
valid sizes and their details can be obtained with ``dask_saturn.describe_sizes()``.
If no size is provided, this will default to the size configured for Jupyter
in your Saturn Cloud project.
:param worker_is_spot: Flag to indicate if workers should be started on Spot Instances nodes.
Added in dask-saturn 0.1.0, Saturn 2020.08.28.
:param scheduler_size: A string with the size to use for the scheduler. A list of
valid sizes and their details can be obtained with ``dask_saturn.describe_sizes()``.
If no size is provided, this will default to the size configured for Jupyter
in your Saturn Cloud project.
:param nprocs: The number of ``dask-worker`` processes run on each host in a distributed
cluster.
:param nthreads: The number of threads available to each ``dask-worker`` process.
:param scheduler_service_wait_timeout: The maximum amout of time, in seconds, that
``SaturnCluster`` will wait for the scheduler to respond before deciding
that the scheduler is taking too long. By default, this is set to 1200 (20
minutes). Setting it to a lower value will help you catch problems earlier,
but may also lead to false positives if you don't give the cluster
enough to time to start up.
:param shutdown_on_close: Whether or not the cluster should be automatically destroyed
when its calling process is destroyed. Set this parameter to ``True`` if you want
your cluster to shutdown when the work is done.
By default, this is ``False`` if the cluster is attached to a Jupyter server,
deployment, or job. If the cluster is attached to a Prefect Cloud flow run, this option
is always set to ``True``.
Note: ``autoclose`` is accepted as an alias for now, but will be removed in the future.
"""
# pylint: disable=unused-argument,super-init-not-called,too-many-instance-attributes
_sizes = None
_instances = weakref.WeakSet()
def __init__(
self,
*args,
n_workers: Optional[int] = None,
cluster_url: Optional[str] = None,
worker_size: Optional[str] = None,
worker_is_spot: Optional[bool] = None,
scheduler_size: Optional[str] = None,
nprocs: Optional[int] = None,
nthreads: Optional[int] = None,
scheduler_service_wait_timeout: int = DEFAULT_WAIT_TIMEOUT_SECONDS,
shutdown_on_close: bool = False,
**kwargs,
):
if "external_connection" in kwargs:
raise RuntimeError(
"Passing external_connection as a key word argument is no longer supported. "
"Instead, set the env vars: ``SATURN_TOKEN`` and ``SATURN_BASE_URL`` "
"as indicated in the Saturn Cloud UI. If those env vars are set, an external "
"connection will be automatically set up."
)
if "autoclose" in kwargs:
warnings.warn(
"``autoclose`` has been deprecated and will be removed in a future version. "
"Please use ``shutdown_on_close`` instead.",
category=FutureWarning,
)
shutdown_on_close = kwargs.pop("autoclose")
self.settings = Settings()
if self.settings.is_prefect:
# defaults to True if related to prefect, else defaults to False
shutdown_on_close = True
if cluster_url is None:
self._start(
n_workers=n_workers,
worker_size=worker_size,
worker_is_spot=worker_is_spot,
scheduler_size=scheduler_size,
nprocs=nprocs,
nthreads=nthreads,
scheduler_service_wait_timeout=scheduler_service_wait_timeout,
)
else:
self.cluster_url = cluster_url if cluster_url.endswith("/") else cluster_url + "/"
self.dask_cluster_id = self.cluster_url.rstrip("/").split("/")[-1]
info = self._get_info()
self._name = self.dask_cluster_id
self._dashboard_link = info["dashboard_link"]
self._scheduler_address = info["scheduler_address"]
self.loop = None
self.periodic_callbacks: Dict[str, PeriodicCallback] = {}
self.shutdown_on_close = shutdown_on_close
self._adaptive = None
self._instances.add(self)
if self.settings.is_external:
self.security = _security(self.settings, self.dask_cluster_id)
else:
self.security = Security()
try:
self.register_default_plugin()
except Exception as e: # pylint: disable=broad-except
log.warning(
f"Registering default plugin failed: {e} Hint: you might "
"have a different dask-saturn version on your dask cluster."
)
def __await__(self):
async def _():
pass
return _().__await__()
@classmethod
def reset(
cls,
n_workers: Optional[int] = None,
worker_size: Optional[str] = None,
worker_is_spot: Optional[bool] = None,
scheduler_size: Optional[str] = None,
nprocs: Optional[int] = None,
nthreads: Optional[int] = None,
) -> "SaturnCluster":
"""Return a SaturnCluster
Destroy existing Dask cluster attached to the Jupyter Notebook or
Custom Deployment and recreate it with the given configuration.
For documentation on this method's parameters, see
``help(SaturnCluster)``.
"""
log.info("Resetting cluster.")
settings = Settings()
url = urljoin(settings.url, "api/dask_clusters/reset")
cluster_config = {
"n_workers": n_workers,
"worker_size": worker_size,
"worker_is_spot": worker_is_spot,
"scheduler_size": scheduler_size,
"nprocs": nprocs,
"nthreads": nthreads,
}
# only send kwargs that are explicity set by user
cluster_config = {k: v for k, v in cluster_config.items() if v is not None}
response = requests.post(url, data=json.dumps(cluster_config), headers=settings.headers)
if not response.ok:
raise ValueError(response.json()["message"])
return cls(**cluster_config)
@property
def status(self) -> Optional[str]:
"""
Status of the cluster
"""
if self.cluster_url is None:
return "closed"
url = urljoin(self.cluster_url, "status")
response = requests.get(url, headers=self.settings.headers)
if not response.ok:
return self._get_pod_status()
return response.json()["status"]
def _get_pod_status(self) -> Optional[str]:
"""
Status of the KubeCluster pod.
"""
response = requests.get(self.cluster_url[:-1], headers=self.settings.headers)
if response.ok:
return response.json()["status"]
else:
return None
@property
def _supports_scaling(self) -> bool:
"""
Property required by ``SpecCluster``, which describes
whether the cluster can be scaled after it's created.
"""
return True
@property
def scheduler_address(self) -> str:
"""
Address for the Dask schduler.
"""
return self._scheduler_address
@property
def dashboard_link(self) -> str:
"""
Link to the Dask dashboard. This is customized
to be inside of a Saturn project.
"""
return self._dashboard_link
@property
def scheduler_info(self) -> Dict[str, Any]:
"""
Information about the scheduler. Raises a
ValueError if the scheduler is in a bad state.
"""
url = urljoin(self.cluster_url, "scheduler_info")
response = requests.get(url, headers=self.settings.headers)
if not response.ok:
if self._get_pod_status() in ["error", "closed", "stopped"]:
for pc in self.periodic_callbacks.values():
pc.stop()
raise ValueError("Cluster is not running.")
raise ValueError(response.json()["message"])
try:
from distributed.objects import SchedulerInfo # pylint: disable=import-outside-toplevel
return SchedulerInfo(response.json())
except ImportError:
pass
return response.json()
# pylint: disable=invalid-overridden-method
def _start(
self,
n_workers: Optional[int] = None,
worker_size: Optional[str] = None,
worker_is_spot: Optional[bool] = None,
scheduler_size: Optional[str] = None,
nprocs: Optional[int] = None,
nthreads: Optional[int] = None,
scheduler_service_wait_timeout: int = DEFAULT_WAIT_TIMEOUT_SECONDS,
) -> None:
"""
Start a cluster that has already been defined for the project.
For documentation on this method's parameters, see
``help(SaturnCluster)``.
"""
url = urljoin(self.settings.url, "api/dask_clusters")
url_query = ""
if self.settings.is_external:
url_query = "?is_external=true"
self.cluster_url: Optional[str] = None
self._validate_sizes(worker_size, scheduler_size)
cluster_config = {
"n_workers": n_workers,
"worker_size": worker_size,
"worker_is_spot": worker_is_spot,
"scheduler_size": scheduler_size,
"nprocs": nprocs,
"nthreads": nthreads,
}
if self.settings.SATURN_VERSION >= LooseVersion("2021.08.16"):
cluster_config["prefectcloudflowrun_id"] = os.environ.get(
"PREFECT__CONTEXT__FLOW_RUN_ID"
)
# only send kwargs that are explicitly set by user
cluster_config = {k: v for k, v in cluster_config.items() if v is not None}
expBackoff = ExpBackoff(wait_timeout=scheduler_service_wait_timeout)
logged_warnings: Dict[str, bool] = {}
while self.cluster_url is None:
response = requests.post(
url + url_query,
data=json.dumps(cluster_config),
headers=self.settings.headers,
)
if not response.ok:
raise ValueError(response.json()["message"])
data = response.json()
for warning in data.get("warnings", []):
if not logged_warnings.get(warning):
logged_warnings[warning] = True
log.warning(warning)
if data["status"] == "error":
raise ValueError(" ".join(data["errors"]))
elif data["status"] == "ready":
self.dask_cluster_id = data["id"]
self.cluster_url = f"{url}/{self.dask_cluster_id}/"
log.info("Cluster is ready")
break
else:
log.info(f"Starting cluster. Status: {data['status']}")
if self.cluster_url is None:
if not expBackoff.wait():
raise ValueError(
"Retry in a few minutes. Check status in Saturn User Interface"
)
def register_default_plugin(self):
"""Register the default SaturnSetup plugin to all workers."""
log.info("Registering default plugins")
outputs = {}
with Client(self) as client:
output = client.register_worker_plugin(SaturnSetup())
outputs.update(output)
output_statuses = [v["status"] for v in outputs.values()]
if "OK" in output_statuses:
log.info("Success!")
elif "repeat" in output_statuses:
log.info("Success!")
elif len(output_statuses) == 0:
log.warning("No workers started up.")
else:
log.warning("Registering default plugins failed. Please check logs for more info.")
def _get_info(self) -> Dict[str, Any]:
url = urljoin(self.cluster_url, "info")
if self.settings.is_external:
url += "?is_external=true"
response = requests.get(url, headers=self.settings.headers)
if not response.ok:
raise ValueError(response.json()["message"])
return response.json()
def scale(self, n: int) -> None:
"""
Scale cluster to have ``n`` workers
:param n: number of workers to scale to.
"""
url = urljoin(self.cluster_url, "scale")
response = requests.post(url, json.dumps({"n": n}), headers=self.settings.headers)
if not response.ok:
raise ValueError(response.json()["message"])
def adapt(self, minimum: int, maximum: int) -> None:
"""Adapt cluster to have between ``minimum`` and ``maximum`` workers"""
url = urljoin(self.cluster_url, "adapt")
response = requests.post(
url,
json.dumps({"minimum": minimum, "maximum": maximum}),
headers=self.settings.headers,
)
if not response.ok:
raise ValueError(response.json()["message"])
def close(self) -> None:
"""
Defines what should be done when closing the cluster.
"""
url = urljoin(self.cluster_url, "close")
response = requests.post(url, headers=self.settings.headers)
if not response.ok:
raise ValueError(response.json()["message"])
for pc in self.periodic_callbacks.values():
pc.stop()
@property
def asynchronous(self) -> bool:
"""
Whether or not the cluster's ``_start`` method
is synchronous.
``SaturnCluster`` uses a synchronous ``_start()``
because it has to be called in the class
constructor, which is intended to be used interactively
in a notebook.
"""
return False
def __enter__(self) -> "SaturnCluster":
"""
magic method used to allow the use of ``SaturnCluster``
with a context manager.
.. code-block:: python
with SaturnCluster() as cluster:
"""
return self
def __exit__(self, typ, value, traceback) -> None:
"""
magic method that defines what should be done
when exiting a context manager's context. in other words
at the end of this
.. code-block:: python
with SaturnCluster() as cluster:
"""
if self.shutdown_on_close:
self.close()
# pylint: disable=access-member-before-definition
def _validate_sizes(
self,
worker_size: Optional[str] = None,
scheduler_size: Optional[str] = None,
):
"""Validate the options provided"""
if self._sizes is None:
self._sizes = list_sizes()
errors = []
if worker_size is not None:
if worker_size not in self._sizes:
errors.append(
f"Proposed worker_size: {worker_size} is not a valid option. "
f"Options are: {self._sizes}."
)
if scheduler_size is not None:
if scheduler_size not in self._sizes:
errors.append(
f"Proposed scheduler_size: {scheduler_size} is not a valid option. "
f"Options are: {self._sizes}."
)
if len(errors) > 0:
raise ValueError(" ".join(errors))
@classmethod
def from_name(cls, name: str):
"""Create an instance of this class to represent an existing cluster by name."""
log.warning(
"Only one dask cluster can be associated with a particular resource, so "
f"user provided name: {name} will not be used."
)
return cls()
def _options() -> Dict[str, Any]:
settings = Settings()
url = urljoin(settings.url, "api/dask_clusters/info")
response = requests.get(url, headers=settings.headers)
if not response.ok:
raise ValueError(response.json()["message"])
return response.json()["server_options"]
def list_sizes() -> List[str]:
"""Return a list of valid size options for worker_size and scheduler size."""
return [size["name"] for size in _options()["size"]]
def describe_sizes() -> Dict[str, str]:
"""Return a dict of size options with a description."""
return {size["name"]: size["display"] for size in _options()["size"]}
|
[] |
[] |
[
"PREFECT__CONTEXT__FLOW_RUN_ID\"\n "
] |
[]
|
["PREFECT__CONTEXT__FLOW_RUN_ID\"\n "]
|
python
| 1 | 0 | |
.github/actions/hello-world/main.go
|
package main
import (
"fmt"
"os"
)
func main() {
// Access Inputs as environment vars
firstGreeting := os.Getenv("INPUT_FIRSTGREETING")
secondGreeting := os.Getenv("INPUT_SECONDGREETING")
thirdGreeting := os.Getenv("INPUT_THIRDGREETING")
// Use those inputs in the actions logic
fmt.Println("Hello " + firstGreeting)
fmt.Println("Hello 2do" + secondGreeting)
// Sometimes inputs are not "required" and we can build around that
if thirdGreeting != "" {
fmt.Println("Hello " + thirdGreeting)
}
}
|
[
"\"INPUT_FIRSTGREETING\"",
"\"INPUT_SECONDGREETING\"",
"\"INPUT_THIRDGREETING\""
] |
[] |
[
"INPUT_SECONDGREETING",
"INPUT_FIRSTGREETING",
"INPUT_THIRDGREETING"
] |
[]
|
["INPUT_SECONDGREETING", "INPUT_FIRSTGREETING", "INPUT_THIRDGREETING"]
|
go
| 3 | 0 | |
samples/simpleweb/integration_test.go
|
package test
import (
"crypto/tls"
"os"
"strings"
"testing"
"time"
http_helper "github.com/gruntwork-io/terratest/modules/http-helper"
"github.com/gruntwork-io/terratest/modules/terraform"
)
func TestTerraformHttpExample(t *testing.T) {
t.Parallel()
// workspace := fmt.Sprintf("sw")
terraformOptions := &terraform.Options{
// The path to where our Terraform code is located
TerraformDir: ".",
Upgrade: true,
BackendConfig: map[string]interface{}{
"storage_account_name": os.Getenv("TF_VAR_remote_state_account"),
"container_name": os.Getenv("TF_VAR_remote_state_container"),
},
Vars: map[string]interface{}{
"name": "simpleweb",
"location": "eastus2",
"randomization_level": 8,
"docker_registry_server_url": "mcr.microsoft.com",
"deployment_targets": []interface{}{
map[string]interface{}{
"app_name": "app",
"image_name": "azuredocs/aci-helloworld",
"image_release_tag_prefix": "latest",
},
},
},
}
// Setup a TLS configuration to submit with the helper, a blank struct is acceptable
tlsConfig := tls.Config{}
maxRetries := 30
timeBetweenRetries := 5 * time.Second
// At the end of the test, run `terraform destroy` to clean up any resources that were created
// defer terraform.Destroy(t, terraformOptions)
// terraform.Init(t, terraformOptions)
// terraform.WorkspaceSelectOrNew(t, terraformOptions, workspace)
// terraform.Plan(t, terraformOptions)
// terraform.Apply(t, terraformOptions)
homepage := terraform.Output(t, terraformOptions, "app_service_default_hostname")
http_helper.HttpGetWithRetryWithCustomValidation(t, homepage, &tlsConfig, maxRetries, timeBetweenRetries, func(status int, content string) bool {
return status == 200 &&
strings.Contains(content, "Welcome to Azure Container Instances!")
})
}
|
[
"\"TF_VAR_remote_state_account\"",
"\"TF_VAR_remote_state_container\""
] |
[] |
[
"TF_VAR_remote_state_account",
"TF_VAR_remote_state_container"
] |
[]
|
["TF_VAR_remote_state_account", "TF_VAR_remote_state_container"]
|
go
| 2 | 0 | |
test/functional/test_framework/test_framework.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import argparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "bitcoin_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = False
self.bind_to_localhost_only = True
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
self.add_options(parser)
self.options = parser.parse_args()
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
self.options.bitcoind = os.getenv("BITCOIND", default=config["environment"]["BUILDDIR"] + '/src/particld' + config["environment"]["EXEEXT"])
self.options.bitcoincli = os.getenv("BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/particl-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
success = TestStatus.FAILED
try:
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
rpchost=rpchost,
timewait=self.rpc_timeout,
bitcoind=binary[i],
bitcoin_cli=self.options.bitcoincli,
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_blocks(self, nodes=None, **kwargs):
sync_blocks(nodes or self.nodes, **kwargs)
def sync_mempools(self, nodes=None, **kwargs):
sync_mempools(nodes or self.nodes, **kwargs)
def sync_all(self, nodes=None, **kwargs):
self.sync_blocks(nodes, **kwargs)
self.sync_mempools(nodes, **kwargs)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [self.options.bitcoind, "-datadir=" + datadir, '-disablewallet']
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.cachedir, i),
extra_conf=["bind=127.0.0.1"],
extra_args=[],
rpchost=None,
timewait=self.rpc_timeout,
bitcoind=self.options.bitcoind,
bitcoin_cli=self.options.bitcoincli,
coverage_dir=None,
cwd=self.options.tmpdir,
))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 199-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th node gets only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
for i in range(8):
self.nodes[0].generatetoaddress(25 if i != 7 else 24, self.nodes[i % 4].get_deterministic_priv_key().address)
self.sync_blocks()
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
os.rmdir(cache_path(i, 'wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path(i)):
if entry not in ['chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_cli(self):
"""Skip the running test if bitcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bitcoin-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_CLI")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_WALLET")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_ZMQ")
def is_usbdevice_compiled(self):
"""Checks whether the usbdevice module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_USBDEVICE")
|
[] |
[] |
[
"BITCOINCLI",
"PATH",
"BITCOIND"
] |
[]
|
["BITCOINCLI", "PATH", "BITCOIND"]
|
python
| 3 | 0 | |
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_test.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"testing"
lookup "github.com/vmware/govmomi/lookup/simulator"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/simulator"
"github.com/vmware/govmomi/simulator/vpx"
sts "github.com/vmware/govmomi/sts/simulator"
"github.com/vmware/govmomi/vapi/rest"
vapi "github.com/vmware/govmomi/vapi/simulator"
"github.com/vmware/govmomi/vapi/tags"
"github.com/vmware/govmomi/vim25/mo"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/fixtures"
)
// localhostCert was generated from crypto/tls/generate_cert.go with the following command:
// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
var localhostCert = `-----BEGIN CERTIFICATE-----
MIIBjzCCATmgAwIBAgIRAKpi2WmTcFrVjxrl5n5YDUEwDQYJKoZIhvcNAQELBQAw
EjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2
MDAwMFowEjEQMA4GA1UEChMHQWNtZSBDbzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgC
QQC9fEbRszP3t14Gr4oahV7zFObBI4TfA5i7YnlMXeLinb7MnvT4bkfOJzE6zktn
59zP7UiHs3l4YOuqrjiwM413AgMBAAGjaDBmMA4GA1UdDwEB/wQEAwICpDATBgNV
HSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MC4GA1UdEQQnMCWCC2V4
YW1wbGUuY29thwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3DQEBCwUA
A0EAUsVE6KMnza/ZbodLlyeMzdo7EM/5nb5ywyOxgIOCf0OOLHsPS9ueGLQX9HEG
//yjTXuhNcUugExIjM/AIwAZPQ==
-----END CERTIFICATE-----`
// localhostKey is the private key for localhostCert.
var localhostKey = `-----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBAL18RtGzM/e3XgavihqFXvMU5sEjhN8DmLtieUxd4uKdvsye9Phu
R84nMTrOS2fn3M/tSIezeXhg66quOLAzjXcCAwEAAQJBAKcRxH9wuglYLBdI/0OT
BLzfWPZCEw1vZmMR2FF1Fm8nkNOVDPleeVGTWoOEcYYlQbpTmkGSxJ6ya+hqRi6x
goECIQDx3+X49fwpL6B5qpJIJMyZBSCuMhH4B7JevhGGFENi3wIhAMiNJN5Q3UkL
IuSvv03kaPR5XVQ99/UeEetUgGvBcABpAiBJSBzVITIVCGkGc7d+RCf49KTCIklv
bGWObufAR8Ni4QIgWpILjW8dkGg8GOUZ0zaNA6Nvt6TIv2UWGJ4v5PoV98kCIQDx
rIiZs5QbKdycsv9gQJzwQAogC8o04X3Zz3dsoX+h4A==
-----END RSA PRIVATE KEY-----`
func configFromEnv() (cfg VSphereConfig, ok bool) {
var InsecureFlag bool
var err error
cfg.Global.VCenterIP = os.Getenv("VSPHERE_VCENTER")
cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT")
cfg.Global.User = os.Getenv("VSPHERE_USER")
cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD")
cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER")
cfg.Network.PublicNetwork = os.Getenv("VSPHERE_PUBLIC_NETWORK")
cfg.Global.DefaultDatastore = os.Getenv("VSPHERE_DATASTORE")
cfg.Disk.SCSIControllerType = os.Getenv("VSPHERE_SCSICONTROLLER_TYPE")
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
cfg.Global.VMName = os.Getenv("VSPHERE_VM_NAME")
if os.Getenv("VSPHERE_INSECURE") != "" {
InsecureFlag, err = strconv.ParseBool(os.Getenv("VSPHERE_INSECURE"))
} else {
InsecureFlag = false
}
if err != nil {
log.Fatal(err)
}
cfg.Global.InsecureFlag = InsecureFlag
ok = (cfg.Global.VCenterIP != "" &&
cfg.Global.User != "")
return
}
// configFromSim starts a vcsim instance and returns config for use against the vcsim instance.
// The vcsim instance is configured with an empty tls.Config.
func configFromSim() (VSphereConfig, func()) {
return configFromSimWithTLS(new(tls.Config), true)
}
// configFromSimWithTLS starts a vcsim instance and returns config for use against the vcsim instance.
// The vcsim instance is configured with a tls.Config. The returned client
// config can be configured to allow/decline insecure connections.
func configFromSimWithTLS(tlsConfig *tls.Config, insecureAllowed bool) (VSphereConfig, func()) {
var cfg VSphereConfig
model := simulator.VPX()
err := model.Create()
if err != nil {
log.Fatal(err)
}
model.Service.TLS = tlsConfig
s := model.Service.NewServer()
// STS simulator
path, handler := sts.New(s.URL, vpx.Setting)
model.Service.ServeMux.Handle(path, handler)
// vAPI simulator
path, handler = vapi.New(s.URL, vpx.Setting)
model.Service.ServeMux.Handle(path, handler)
// Lookup Service simulator
model.Service.RegisterSDK(lookup.New())
cfg.Global.InsecureFlag = insecureAllowed
cfg.Global.VCenterIP = s.URL.Hostname()
cfg.Global.VCenterPort = s.URL.Port()
cfg.Global.User = s.URL.User.Username()
cfg.Global.Password, _ = s.URL.User.Password()
cfg.Global.Datacenter = vclib.TestDefaultDatacenter
cfg.Network.PublicNetwork = vclib.TestDefaultNetwork
cfg.Global.DefaultDatastore = vclib.TestDefaultDatastore
cfg.Disk.SCSIControllerType = os.Getenv("VSPHERE_SCSICONTROLLER_TYPE")
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
cfg.Global.VMName = os.Getenv("VSPHERE_VM_NAME")
if cfg.Global.WorkingDir == "" {
cfg.Global.WorkingDir = "vm" // top-level Datacenter.VmFolder
}
uuid := simulator.Map.Any("VirtualMachine").(*simulator.VirtualMachine).Config.Uuid
getVMUUID = func() (string, error) { return uuid, nil }
return cfg, func() {
getVMUUID = GetVMUUID
s.Close()
model.Remove()
}
}
// configFromEnvOrSim returns config from configFromEnv if set, otherwise returns configFromSim.
func configFromEnvOrSim() (VSphereConfig, func()) {
cfg, ok := configFromEnv()
if ok {
return cfg, func() {}
}
return configFromSim()
}
func TestReadConfig(t *testing.T) {
_, err := readConfig(nil)
if err == nil {
t.Errorf("Should fail when no config is provided: %s", err)
}
cfg, err := readConfig(strings.NewReader(`
[Global]
server = 0.0.0.0
port = 443
user = user
password = password
insecure-flag = true
datacenter = us-west
vm-uuid = 1234
vm-name = vmname
ca-file = /some/path/to/a/ca.pem
`))
if err != nil {
t.Fatalf("Should succeed when a valid config is provided: %s", err)
}
if cfg.Global.VCenterIP != "0.0.0.0" {
t.Errorf("incorrect vcenter ip: %s", cfg.Global.VCenterIP)
}
if cfg.Global.Datacenter != "us-west" {
t.Errorf("incorrect datacenter: %s", cfg.Global.Datacenter)
}
if cfg.Global.VMUUID != "1234" {
t.Errorf("incorrect vm-uuid: %s", cfg.Global.VMUUID)
}
if cfg.Global.VMName != "vmname" {
t.Errorf("incorrect vm-name: %s", cfg.Global.VMName)
}
if cfg.Global.CAFile != "/some/path/to/a/ca.pem" {
t.Errorf("incorrect ca-file: %s", cfg.Global.CAFile)
}
}
func TestNewVSphere(t *testing.T) {
cfg, ok := configFromEnv()
if !ok {
t.Skipf("No config found in environment")
}
_, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
}
func TestVSphereLogin(t *testing.T) {
cfg, cleanup := configFromEnvOrSim()
defer cleanup()
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create vSphere client
vcInstance, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vcInstance.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
vcInstance.conn.Logout(ctx)
}
func TestVSphereLoginByToken(t *testing.T) {
cfg, cleanup := configFromSim()
defer cleanup()
// Configure for SAML token auth
cfg.Global.User = localhostCert
cfg.Global.Password = localhostKey
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
ctx := context.Background()
// Create vSphere client
vcInstance, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vcInstance.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
vcInstance.conn.Logout(ctx)
}
func TestVSphereLoginWithCaCert(t *testing.T) {
caCertPEM, err := ioutil.ReadFile(fixtures.CaCertPath)
if err != nil {
t.Fatalf("Could not read ca cert from file")
}
serverCert, err := tls.LoadX509KeyPair(fixtures.ServerCertPath, fixtures.ServerKeyPath)
if err != nil {
t.Fatalf("Could not load server cert and server key from files: %#v", err)
}
certPool := x509.NewCertPool()
if ok := certPool.AppendCertsFromPEM(caCertPEM); !ok {
t.Fatalf("Cannot add CA to CAPool")
}
tlsConfig := tls.Config{
Certificates: []tls.Certificate{serverCert},
RootCAs: certPool,
}
cfg, cleanup := configFromSimWithTLS(&tlsConfig, false)
defer cleanup()
cfg.Global.CAFile = fixtures.CaCertPath
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
ctx := context.Background()
// Create vSphere client
vcInstance, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vcInstance.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
vcInstance.conn.Logout(ctx)
}
func TestZonesNoConfig(t *testing.T) {
_, ok := new(VSphere).Zones()
if ok {
t.Fatalf("Zones() should return false without VCP configured")
}
}
func TestZones(t *testing.T) {
// Any context will do
ctx := context.Background()
// Create a vcsim instance
cfg, cleanup := configFromSim()
defer cleanup()
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
// Configure region and zone categories
vs.cfg.Labels.Region = "k8s-region"
vs.cfg.Labels.Zone = "k8s-zone"
// Create vSphere client
vsi, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vsi.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
// Lookup Datacenter for this test's Workspace
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
t.Fatal(err)
}
// Lookup VM's host where we'll attach tags
host, err := dc.GetHostByVMUUID(ctx, vs.vmUUID)
if err != nil {
t.Fatal(err)
}
// Property Collector instance
pc := property.DefaultCollector(vsi.conn.Client)
// Tag manager instance
m := tags.NewManager(rest.NewClient(vsi.conn.Client))
// Create a region category
regionID, err := m.CreateCategory(ctx, &tags.Category{Name: vs.cfg.Labels.Region})
if err != nil {
t.Fatal(err)
}
// Create a region tag
regionID, err = m.CreateTag(ctx, &tags.Tag{CategoryID: regionID, Name: "k8s-region-US"})
if err != nil {
t.Fatal(err)
}
// Create a zone category
zoneID, err := m.CreateCategory(ctx, &tags.Category{Name: vs.cfg.Labels.Zone})
if err != nil {
t.Fatal(err)
}
// Create a zone tag
zoneID, err = m.CreateTag(ctx, &tags.Tag{CategoryID: zoneID, Name: "k8s-zone-US-CA1"})
if err != nil {
t.Fatal(err)
}
// Create a random category
randomID, err := m.CreateCategory(ctx, &tags.Category{Name: "random-cat"})
if err != nil {
t.Fatal(err)
}
// Create a random tag
randomID, err = m.CreateTag(ctx, &tags.Tag{CategoryID: randomID, Name: "random-tag"})
if err != nil {
t.Fatal(err)
}
// Attach a random tag to VM's host
if err = m.AttachTag(ctx, randomID, host); err != nil {
t.Fatal(err)
}
// Expecting Zones() to return true, indicating VCP supports the Zones interface
zones, ok := vs.Zones()
if !ok {
t.Fatalf("zones=%t", ok)
}
// GetZone() tests, covering error and success paths
tests := []struct {
name string // name of the test for logging
fail bool // expect GetZone() to return error if true
prep func() // prepare vCenter state for the test
}{
{"no tags", true, func() {
// no prep
}},
{"no zone tag", true, func() {
if err = m.AttachTag(ctx, regionID, host); err != nil {
t.Fatal(err)
}
}},
{"host tags set", false, func() {
if err = m.AttachTag(ctx, zoneID, host); err != nil {
t.Fatal(err)
}
}},
{"host tags removed", true, func() {
if err = m.DetachTag(ctx, zoneID, host); err != nil {
t.Fatal(err)
}
if err = m.DetachTag(ctx, regionID, host); err != nil {
t.Fatal(err)
}
}},
{"dc region, cluster zone", false, func() {
var h mo.HostSystem
if err = pc.RetrieveOne(ctx, host.Reference(), []string{"parent"}, &h); err != nil {
t.Fatal(err)
}
// Attach region tag to Datacenter
if err = m.AttachTag(ctx, regionID, dc); err != nil {
t.Fatal(err)
}
// Attach zone tag to Cluster
if err = m.AttachTag(ctx, zoneID, h.Parent); err != nil {
t.Fatal(err)
}
}},
}
for _, test := range tests {
test.prep()
zone, err := zones.GetZone(ctx)
if test.fail {
if err == nil {
t.Errorf("%s: expected error", test.name)
} else {
t.Logf("%s: expected error=%s", test.name, err)
}
} else {
if err != nil {
t.Errorf("%s: %s", test.name, err)
}
t.Logf("zone=%#v", zone)
}
}
}
func TestInstances(t *testing.T) {
cfg, ok := configFromEnv()
if !ok {
t.Skipf("No config found in environment")
}
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
i, ok := vs.Instances()
if !ok {
t.Fatalf("Instances() returned false")
}
nodeName, err := vs.CurrentNodeName(context.TODO(), "")
if err != nil {
t.Fatalf("CurrentNodeName() failed: %s", err)
}
nonExistingVM := types.NodeName(rand.String(15))
instanceID, err := i.InstanceID(context.TODO(), nodeName)
if err != nil {
t.Fatalf("Instances.InstanceID(%s) failed: %s", nodeName, err)
}
t.Logf("Found InstanceID(%s) = %s\n", nodeName, instanceID)
_, err = i.InstanceID(context.TODO(), nonExistingVM)
if err == cloudprovider.InstanceNotFound {
t.Logf("VM %s was not found as expected\n", nonExistingVM)
} else if err == nil {
t.Fatalf("Instances.InstanceID did not fail as expected, VM %s was found", nonExistingVM)
} else {
t.Fatalf("Instances.InstanceID did not fail as expected, err: %v", err)
}
addrs, err := i.NodeAddresses(context.TODO(), nodeName)
if err != nil {
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", nodeName, err)
}
found := false
for _, addr := range addrs {
if addr.Type == v1.NodeHostName {
found = true
}
}
if found == false {
t.Fatalf("NodeAddresses does not report hostname, %s %s", nodeName, addrs)
}
t.Logf("Found NodeAddresses(%s) = %s\n", nodeName, addrs)
}
func TestVolumes(t *testing.T) {
cfg, ok := configFromEnv()
if !ok {
t.Skipf("No config found in environment")
}
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
nodeName, err := vs.CurrentNodeName(context.TODO(), "")
if err != nil {
t.Fatalf("CurrentNodeName() failed: %s", err)
}
volumeOptions := &vclib.VolumeOptions{
CapacityKB: 1 * 1024 * 1024,
Tags: nil,
Name: "kubernetes-test-volume-" + rand.String(10),
DiskFormat: "thin"}
volPath, err := vs.CreateVolume(volumeOptions)
if err != nil {
t.Fatalf("Cannot create a new VMDK volume: %v", err)
}
_, err = vs.AttachDisk(volPath, "", "")
if err != nil {
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, nodeName, err)
}
err = vs.DetachDisk(volPath, "")
if err != nil {
t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", volPath, nodeName, err)
}
// todo: Deleting a volume after detach currently not working through API or UI (vSphere)
// err = vs.DeleteVolume(volPath)
// if err != nil {
// t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err)
// }
}
func TestSecretVSphereConfig(t *testing.T) {
var vs *VSphere
var (
username = "user"
password = "password"
)
var testcases = []struct {
testName string
conf string
expectedIsSecretProvided bool
expectedUsername string
expectedPassword string
expectedError error
expectedThumbprints map[string]string
}{
{
testName: "Username and password with old configuration",
conf: `[Global]
server = 0.0.0.0
user = user
password = password
datacenter = us-west
working-dir = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace in old configuration",
conf: `[Global]
server = 0.0.0.0
datacenter = us-west
secret-name = "vccreds"
secret-namespace = "kube-system"
working-dir = kubernetes
`,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace with Username and Password in old configuration",
conf: `[Global]
server = 0.0.0.0
user = user
password = password
datacenter = us-west
secret-name = "vccreds"
secret-namespace = "kube-system"
working-dir = kubernetes
`,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace with Username missing in old configuration",
conf: `[Global]
server = 0.0.0.0
password = password
datacenter = us-west
secret-name = "vccreds"
secret-namespace = "kube-system"
working-dir = kubernetes
`,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "SecretNamespace missing with Username and Password in old configuration",
conf: `[Global]
server = 0.0.0.0
user = user
password = password
datacenter = us-west
secret-name = "vccreds"
working-dir = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
{
testName: "SecretNamespace and Username missing in old configuration",
conf: `[Global]
server = 0.0.0.0
password = password
datacenter = us-west
secret-name = "vccreds"
working-dir = kubernetes
`,
expectedError: ErrUsernameMissing,
},
{
testName: "SecretNamespace and Password missing in old configuration",
conf: `[Global]
server = 0.0.0.0
user = user
datacenter = us-west
secret-name = "vccreds"
working-dir = kubernetes
`,
expectedError: ErrPasswordMissing,
},
{
testName: "SecretNamespace, Username and Password missing in old configuration",
conf: `[Global]
server = 0.0.0.0
datacenter = us-west
secret-name = "vccreds"
working-dir = kubernetes
`,
expectedError: ErrUsernameMissing,
},
{
testName: "Username and password with new configuration but username and password in global section",
conf: `[Global]
user = user
password = password
datacenter = us-west
[VirtualCenter "0.0.0.0"]
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
{
testName: "Username and password with new configuration, username and password in virtualcenter section",
conf: `[Global]
server = 0.0.0.0
port = 443
insecure-flag = true
datacenter = us-west
[VirtualCenter "0.0.0.0"]
user = user
password = password
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace with new configuration",
conf: `[Global]
server = 0.0.0.0
secret-name = "vccreds"
secret-namespace = "kube-system"
datacenter = us-west
[VirtualCenter "0.0.0.0"]
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace with Username missing in new configuration",
conf: `[Global]
server = 0.0.0.0
port = 443
insecure-flag = true
datacenter = us-west
secret-name = "vccreds"
secret-namespace = "kube-system"
[VirtualCenter "0.0.0.0"]
password = password
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "virtual centers with a thumbprint",
conf: `[Global]
server = global
user = user
password = password
datacenter = us-west
thumbprint = "thumbprint:global"
working-dir = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
expectedThumbprints: map[string]string{
"global": "thumbprint:global",
},
},
{
testName: "Multiple virtual centers with different thumbprints",
conf: `[Global]
user = user
password = password
datacenter = us-west
[VirtualCenter "0.0.0.0"]
thumbprint = thumbprint:0
[VirtualCenter "no_thumbprint"]
[VirtualCenter "1.1.1.1"]
thumbprint = thumbprint:1
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
expectedThumbprints: map[string]string{
"0.0.0.0": "thumbprint:0",
"1.1.1.1": "thumbprint:1",
},
},
{
testName: "Multiple virtual centers use the global CA cert",
conf: `[Global]
user = user
password = password
datacenter = us-west
ca-file = /some/path/to/my/trusted/ca.pem
[VirtualCenter "0.0.0.0"]
user = user
password = password
[VirtualCenter "1.1.1.1"]
user = user
password = password
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
}
for _, testcase := range testcases {
t.Logf("Executing Testcase: %s", testcase.testName)
cfg, err := readConfig(strings.NewReader(testcase.conf))
if err != nil {
t.Fatalf("Should succeed when a valid config is provided: %s", err)
}
vs, err = buildVSphereFromConfig(cfg)
if err != testcase.expectedError {
t.Fatalf("Should succeed when a valid config is provided: %s", err)
}
if err != nil {
continue
}
if vs.isSecretInfoProvided != testcase.expectedIsSecretProvided {
t.Fatalf("SecretName and SecretNamespace was expected in config %s. error: %s",
testcase.conf, err)
}
if !testcase.expectedIsSecretProvided {
for _, vsInstance := range vs.vsphereInstanceMap {
if vsInstance.conn.Username != testcase.expectedUsername {
t.Fatalf("Expected username %s doesn't match actual username %s in config %s. error: %s",
testcase.expectedUsername, vsInstance.conn.Username, testcase.conf, err)
}
if vsInstance.conn.Password != testcase.expectedPassword {
t.Fatalf("Expected password %s doesn't match actual password %s in config %s. error: %s",
testcase.expectedPassword, vsInstance.conn.Password, testcase.conf, err)
}
}
}
// Check, if all the expected thumbprints are configured
for instanceName, expectedThumbprint := range testcase.expectedThumbprints {
instanceConfig, ok := vs.vsphereInstanceMap[instanceName]
if !ok {
t.Fatalf("Could not find configuration for instance %s", instanceName)
}
if actualThumbprint := instanceConfig.conn.Thumbprint; actualThumbprint != expectedThumbprint {
t.Fatalf(
"Expected thumbprint for instance '%s' to be '%s', got '%s'",
instanceName, expectedThumbprint, actualThumbprint,
)
}
}
// Check, if all all connections are configured with the global CA certificate
if expectedCaPath := cfg.Global.CAFile; expectedCaPath != "" {
for name, instance := range vs.vsphereInstanceMap {
if actualCaPath := instance.conn.CACert; actualCaPath != expectedCaPath {
t.Fatalf(
"Expected CA certificate path for instance '%s' to be the globally configured one ('%s'), got '%s'",
name, expectedCaPath, actualCaPath,
)
}
}
}
}
}
|
[
"\"VSPHERE_VCENTER\"",
"\"VSPHERE_VCENTER_PORT\"",
"\"VSPHERE_USER\"",
"\"VSPHERE_PASSWORD\"",
"\"VSPHERE_DATACENTER\"",
"\"VSPHERE_PUBLIC_NETWORK\"",
"\"VSPHERE_DATASTORE\"",
"\"VSPHERE_SCSICONTROLLER_TYPE\"",
"\"VSPHERE_WORKING_DIR\"",
"\"VSPHERE_VM_NAME\"",
"\"VSPHERE_INSECURE\"",
"\"VSPHERE_INSECURE\"",
"\"VSPHERE_SCSICONTROLLER_TYPE\"",
"\"VSPHERE_WORKING_DIR\"",
"\"VSPHERE_VM_NAME\""
] |
[] |
[
"VSPHERE_VCENTER",
"VSPHERE_WORKING_DIR",
"VSPHERE_INSECURE",
"VSPHERE_DATACENTER",
"VSPHERE_SCSICONTROLLER_TYPE",
"VSPHERE_USER",
"VSPHERE_PASSWORD",
"VSPHERE_PUBLIC_NETWORK",
"VSPHERE_DATASTORE",
"VSPHERE_VM_NAME",
"VSPHERE_VCENTER_PORT"
] |
[]
|
["VSPHERE_VCENTER", "VSPHERE_WORKING_DIR", "VSPHERE_INSECURE", "VSPHERE_DATACENTER", "VSPHERE_SCSICONTROLLER_TYPE", "VSPHERE_USER", "VSPHERE_PASSWORD", "VSPHERE_PUBLIC_NETWORK", "VSPHERE_DATASTORE", "VSPHERE_VM_NAME", "VSPHERE_VCENTER_PORT"]
|
go
| 11 | 0 | |
example_test.go
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os_test
import (
"fmt"
"log"
"github.com/rasa/os" /* "os" */
"time"
)
func ExampleOpenFile() {
f, err := os.OpenFile("notes.txt", os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
func ExampleChmod() {
if err := os.Chmod("some-filename", 0644); err != nil {
log.Fatal(err)
}
}
func ExampleChtimes() {
mtime := time.Date(2006, time.February, 1, 3, 4, 5, 0, time.UTC)
atime := time.Date(2007, time.March, 2, 4, 5, 6, 0, time.UTC)
if err := os.Chtimes("some-filename", atime, mtime); err != nil {
log.Fatal(err)
}
}
func ExampleFileMode() {
fi, err := os.Stat("some-filename")
if err != nil {
log.Fatal(err)
}
switch mode := fi.Mode(); {
case mode.IsRegular():
fmt.Println("regular file")
case mode.IsDir():
fmt.Println("directory")
case mode&os.ModeSymlink != 0:
fmt.Println("symbolic link")
case mode&os.ModeNamedPipe != 0:
fmt.Println("named pipe")
}
}
func ExampleIsNotExist() {
filename := "a-nonexistent-file"
if _, err := os.Stat(filename); os.IsNotExist(err) {
fmt.Printf("file does not exist")
}
// Output:
// file does not exist
}
func init() {
os.Setenv("USER", "gopher")
os.Setenv("HOME", "/usr/gopher")
os.Unsetenv("GOPATH")
}
func ExampleExpandEnv() {
fmt.Println(os.ExpandEnv("$USER lives in ${HOME}."))
// Output:
// gopher lives in /usr/gopher.
}
func ExampleLookupEnv() {
show := func(key string) {
val, ok := os.LookupEnv(key)
if !ok {
fmt.Printf("%s not set\n", key)
} else {
fmt.Printf("%s=%s\n", key, val)
}
}
show("USER")
show("GOPATH")
// Output:
// USER=gopher
// GOPATH not set
}
func ExampleGetenv() {
fmt.Printf("%s lives in %s.\n", os.Getenv("USER"), os.Getenv("HOME"))
// Output:
// gopher lives in /usr/gopher.
}
func ExampleUnsetenv() {
os.Setenv("TMPDIR", "/my/tmp")
defer os.Unsetenv("TMPDIR")
}
|
[
"\"USER\"",
"\"HOME\""
] |
[] |
[
"USER",
"HOME"
] |
[]
|
["USER", "HOME"]
|
go
| 2 | 0 | |
codegen/service/convert.go
|
package service
import (
"fmt"
"go/build"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"goa.design/goa/v3/codegen"
"goa.design/goa/v3/expr"
)
// convertData contains the info needed to render convert and create functions.
type convertData struct {
// Name is the name of the function.
Name string
// ReceiverTypeRef is a reference to the receiver type.
ReceiverTypeRef string
// TypeRef is a reference to the external type.
TypeRef string
// TypeName is the name of the external type.
TypeName string
// Code is the function code.
Code string
}
func commonPath(sep byte, paths ...string) string {
// Handle special cases.
switch len(paths) {
case 0:
return ""
case 1:
return path.Clean(paths[0])
}
// Note, we treat string as []byte, not []rune as is often
// done in Go. (And sep as byte, not rune). This is because
// most/all supported OS' treat paths as string of non-zero
// bytes. A filename may be displayed as a sequence of Unicode
// runes (typically encoded as UTF-8) but paths are
// not required to be valid UTF-8 or in any normalized form
// (e.g. "é" (U+00C9) and "é" (U+0065,U+0301) are different
// file names.
c := []byte(path.Clean(paths[0]))
// We add a trailing sep to handle the case where the
// common prefix directory is included in the path list
// (e.g. /home/user1, /home/user1/foo, /home/user1/bar).
// path.Clean will have cleaned off trailing / separators with
// the exception of the root directory, "/" (in which case we
// make it "//", but this will get fixed up to "/" bellow).
c = append(c, sep)
// Ignore the first path since it's already in c
for _, v := range paths[1:] {
// Clean up each path before testing it
v = path.Clean(v) + string(sep)
// Find the first non-common byte and truncate c
if len(v) < len(c) {
c = c[:len(v)]
}
for i := 0; i < len(c); i++ {
if v[i] != c[i] {
c = c[:i]
break
}
}
}
// Remove trailing non-separator characters and the final separator
for i := len(c) - 1; i >= 0; i-- {
if c[i] == sep {
c = c[:i]
break
}
}
return string(c)
}
// getPkgImport returns the correct import path of a package.
// It's needed because the "reflect" package provides the binary import path
// ("goa.design/goa/vendor/some/package") for vendored packages
// instead the source import path ("some/package")
func getPkgImport(pkg, cwd string) string {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
}
gosrc := path.Join(filepath.ToSlash(gopath), "src")
cwd = filepath.ToSlash(cwd)
// check for go modules
if !strings.HasPrefix(cwd, gosrc) {
return pkg
}
pkgpath := path.Join(gosrc, pkg)
parentpath := commonPath(os.PathSeparator, cwd, pkgpath)
// check for external packages
if parentpath == gosrc {
return pkg
}
rootpkg := string(parentpath[len(gosrc)+1:])
// check for vendored packages
vendorPrefix := path.Join(rootpkg, "vendor")
if strings.HasPrefix(pkg, vendorPrefix) {
return string(pkg[len(vendorPrefix)+1:])
}
return pkg
}
func getExternalTypeInfo(external interface{}) (string, string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", "", err
}
pkg := reflect.TypeOf(external)
pkgImport := getPkgImport(pkg.PkgPath(), cwd)
alias := strings.Split(pkg.String(), ".")[0]
return pkgImport, alias, nil
}
// ConvertFile returns the file containing the conversion and creation functions
// if any.
func ConvertFile(root *expr.RootExpr, service *expr.ServiceExpr) (*codegen.File, error) {
// Filter conversion and creation functions that are relevant for this
// service
svc := Services.Get(service.Name)
var conversions, creations []*expr.TypeMap
for _, c := range root.Conversions {
for _, m := range service.Methods {
if ut, ok := m.Payload.Type.(expr.UserType); ok {
if ut.Name() == c.User.Name() {
conversions = append(conversions, c)
break
}
}
}
for _, m := range service.Methods {
if ut, ok := m.Result.Type.(expr.UserType); ok {
if ut.Name() == c.User.Name() {
conversions = append(conversions, c)
break
}
}
}
for _, t := range svc.userTypes {
if c.User.Name() == t.Name {
conversions = append(conversions, c)
break
}
}
}
for _, c := range root.Creations {
for _, m := range service.Methods {
if ut, ok := m.Payload.Type.(expr.UserType); ok {
if ut.Name() == c.User.Name() {
creations = append(creations, c)
break
}
}
}
for _, m := range service.Methods {
if ut, ok := m.Result.Type.(expr.UserType); ok {
if ut.Name() == c.User.Name() {
creations = append(creations, c)
break
}
}
}
for _, t := range svc.userTypes {
if c.User.Name() == t.Name {
creations = append(creations, c)
break
}
}
}
if len(conversions) == 0 && len(creations) == 0 {
return nil, nil
}
// Retrieve external packages info
ppm := make(map[string]string)
for _, c := range conversions {
pkgImport, alias, err := getExternalTypeInfo(c.External)
if err != nil {
return nil, err
}
ppm[pkgImport] = alias
}
for _, c := range creations {
pkgImport, alias, err := getExternalTypeInfo(c.External)
if err != nil {
return nil, err
}
ppm[pkgImport] = alias
}
pkgs := make([]*codegen.ImportSpec, len(ppm))
i := 0
for pp, alias := range ppm {
pkgs[i] = &codegen.ImportSpec{Name: alias, Path: pp}
i++
}
// Build header section
pkgs = append(pkgs, &codegen.ImportSpec{Path: "context"})
pkgs = append(pkgs, codegen.GoaImport(""))
path := filepath.Join(codegen.Gendir, codegen.SnakeCase(service.Name), "convert.go")
sections := []*codegen.SectionTemplate{
codegen.Header(service.Name+" service type conversion functions", svc.PkgName, pkgs),
}
var (
names = map[string]struct{}{}
transFuncs []*codegen.TransformFunctionData
)
// Build conversion sections if any
for _, c := range conversions {
var dt expr.DataType
if err := buildDesignType(&dt, reflect.TypeOf(c.External), c.User); err != nil {
return nil, err
}
t := reflect.TypeOf(c.External)
tgtPkg := t.String()
tgtPkg = tgtPkg[:strings.Index(tgtPkg, ".")]
srcCtx := typeContext("", svc.Scope)
tgtCtx := codegen.NewAttributeContext(false, false, false, tgtPkg, codegen.NewNameScope())
srcAtt := &expr.AttributeExpr{Type: c.User}
code, tf, err := codegen.GoTransform(
&expr.AttributeExpr{Type: c.User}, &expr.AttributeExpr{Type: dt},
"t", "v", srcCtx, tgtCtx, "transform", true)
if err != nil {
return nil, err
}
transFuncs = codegen.AppendHelpers(transFuncs, tf)
base := "ConvertTo" + t.Name()
name := uniquify(base, names)
ref := t.String()
if expr.IsObject(c.User) {
ref = "*" + ref
}
data := convertData{
Name: name,
ReceiverTypeRef: svc.Scope.GoTypeRef(srcAtt),
TypeName: t.Name(),
TypeRef: ref,
Code: code,
}
sections = append(sections, &codegen.SectionTemplate{
Name: "convert-to",
Source: convertT,
Data: data,
})
}
// Build creation sections if any
for _, c := range creations {
var dt expr.DataType
if err := buildDesignType(&dt, reflect.TypeOf(c.External), c.User); err != nil {
return nil, err
}
t := reflect.TypeOf(c.External)
srcPkg := t.String()
srcPkg = srcPkg[:strings.Index(srcPkg, ".")]
srcCtx := codegen.NewAttributeContext(false, false, false, srcPkg, codegen.NewNameScope())
tgtCtx := typeContext("", svc.Scope)
tgtAtt := &expr.AttributeExpr{Type: c.User}
code, tf, err := codegen.GoTransform(
&expr.AttributeExpr{Type: dt}, tgtAtt,
"v", "temp", srcCtx, tgtCtx, "transform", true)
if err != nil {
return nil, err
}
transFuncs = codegen.AppendHelpers(transFuncs, tf)
base := "CreateFrom" + t.Name()
name := uniquify(base, names)
ref := t.String()
if expr.IsObject(c.User) {
ref = "*" + ref
}
data := convertData{
Name: name,
ReceiverTypeRef: codegen.NewNameScope().GoTypeRef(tgtAtt),
TypeRef: ref,
Code: code,
}
sections = append(sections, &codegen.SectionTemplate{
Name: "create-from",
Source: createT,
Data: data,
})
}
// Build transformation helper functions section if any.
seen := make(map[string]struct{})
for _, tf := range transFuncs {
if _, ok := seen[tf.Name]; ok {
continue
}
seen[tf.Name] = struct{}{}
sections = append(sections, &codegen.SectionTemplate{
Name: "convert-create-helper",
Source: transformHelperT,
Data: tf,
})
}
return &codegen.File{Path: path, SectionTemplates: sections}, nil
}
// uniquify checks if base is a key of taken and if not returns it. Otherwise
// uniquify appends integers to base starting at 2 and incremented by 1 each
// time a key already exists for the value. uniquify returns the unique value
// and updates taken with it.
func uniquify(base string, taken map[string]struct{}) string {
name := base
idx := 2
_, ok := taken[name]
for ok {
name = base + strconv.Itoa(idx)
idx++
_, ok = taken[name]
}
taken[name] = struct{}{}
return name
}
type dtRec struct {
path string
seen map[string]expr.DataType
}
func (r dtRec) append(p string) dtRec {
r.path += p
return r
}
// buildDesignType builds a user type that represents the given external type.
// ref is the user type the data type being built is converted to or created
// from. It's used to compute the non-generated type field names and can be nil
// if no matching attribute exists.
func buildDesignType(dt *expr.DataType, t reflect.Type, ref expr.DataType, recs ...dtRec) error {
// check compatibility
if ref != nil {
if err := compatible(ref, t); err != nil {
return fmt.Errorf("%q: %s", t.Name(), err)
}
}
// handle recursive data structures
var rec dtRec
if recs != nil {
rec = recs[0]
if s, ok := rec.seen[t.Name()]; ok {
*dt = s
return nil
}
} else {
rec.path = "<value>"
rec.seen = make(map[string]expr.DataType)
}
switch t.Kind() {
case reflect.Bool:
*dt = expr.Boolean
case reflect.Int:
*dt = expr.Int
case reflect.Int32:
*dt = expr.Int32
case reflect.Int64:
*dt = expr.Int64
case reflect.Uint:
*dt = expr.UInt
case reflect.Uint32:
*dt = expr.UInt32
case reflect.Uint64:
*dt = expr.UInt64
case reflect.Float32:
*dt = expr.Float32
case reflect.Float64:
*dt = expr.Float64
case reflect.String:
*dt = expr.String
case reflect.Slice:
e := t.Elem()
if e.Kind() == reflect.Uint8 {
*dt = expr.Bytes
return nil
}
var eref expr.DataType
if ref != nil {
eref = expr.AsArray(ref).ElemType.Type
}
var elem expr.DataType
if err := buildDesignType(&elem, e, eref, rec.append("[0]")); err != nil {
return fmt.Errorf("%s", err)
}
*dt = &expr.Array{ElemType: &expr.AttributeExpr{Type: elem}}
case reflect.Map:
var kref, vref expr.DataType
if ref != nil {
m := expr.AsMap(ref)
kref = m.KeyType.Type
vref = m.ElemType.Type
}
var kt expr.DataType
if err := buildDesignType(&kt, t.Key(), kref, rec.append(".key")); err != nil {
return fmt.Errorf("%s", err)
}
var vt expr.DataType
if err := buildDesignType(&vt, t.Elem(), vref, rec.append(".value")); err != nil {
return fmt.Errorf("%s", err)
}
*dt = &expr.Map{KeyType: &expr.AttributeExpr{Type: kt}, ElemType: &expr.AttributeExpr{Type: vt}}
case reflect.Struct:
var oref *expr.Object
if ref != nil {
oref = expr.AsObject(ref)
}
// Build list of fields that should not be ignored.
var fields []reflect.StructField
for i := 0; i < t.NumField(); i++ {
f := t.FieldByIndex([]int{i})
atn, _ := attributeName(oref, f.Name)
if oref != nil {
if at := oref.Attribute(atn); at != nil {
if m := at.Meta["struct.field.external"]; len(m) > 0 {
if m[0] == "-" {
continue
}
}
}
}
fields = append(fields, f)
}
// Avoid infinite recursions
obj := expr.Object(make([]*expr.NamedAttributeExpr, len(fields)))
ut := &expr.UserTypeExpr{
AttributeExpr: &expr.AttributeExpr{Type: &obj},
TypeName: t.Name(),
UID: t.PkgPath() + "#" + t.Name(),
}
*dt = ut
rec.seen[t.Name()] = ut
var required []string
for i, f := range fields {
recf := rec.append("." + f.Name)
atn, fn := attributeName(oref, f.Name)
var aref expr.DataType
if oref != nil {
if at := oref.Attribute(atn); at != nil {
aref = at.Type
}
}
var fdt expr.DataType
if f.Type.Kind() == reflect.Ptr {
if err := buildDesignType(&fdt, f.Type.Elem(), aref, recf); err != nil {
return fmt.Errorf("%q.%s: %s", t.Name(), f.Name, err)
}
if expr.IsArray(fdt) {
return fmt.Errorf("%s: field of type pointer to slice are not supported, use slice instead", rec.path)
}
if expr.IsMap(fdt) {
return fmt.Errorf("%s: field of type pointer to map are not supported, use map instead", rec.path)
}
} else if f.Type.Kind() == reflect.Struct {
return fmt.Errorf("%s: fields of type struct must use pointers", recf.path)
} else {
if isPrimitive(f.Type) {
required = append(required, atn)
}
if err := buildDesignType(&fdt, f.Type, aref, rec.append("."+f.Name)); err != nil {
return fmt.Errorf("%q.%s: %s", t.Name(), f.Name, err)
}
}
name := atn
if fn != "" {
name = name + ":" + fn
}
obj[i] = &expr.NamedAttributeExpr{
Name: name,
Attribute: &expr.AttributeExpr{Type: fdt},
}
}
if len(required) > 0 {
ut.Validation = &expr.ValidationExpr{Required: required}
}
return nil
case reflect.Ptr:
rec.path = "*(" + rec.path + ")"
if err := buildDesignType(dt, t.Elem(), ref, rec); err != nil {
return err
}
if !expr.IsObject(*dt) {
return fmt.Errorf("%s: only pointer to struct can be converted", rec.path)
}
default:
*dt = expr.Any
}
return nil
}
// attributeName computes the name of the attribute for the given field name and
// object that must contain the matching attribute.
func attributeName(obj *expr.Object, name string) (string, string) {
if obj == nil {
return name, ""
}
// first look for a "struct.field.external" meta
for _, nat := range *obj {
if m := nat.Attribute.Meta["struct.field.external"]; len(m) > 0 {
if m[0] == name {
return nat.Name, name
}
}
}
// next look for an exact match
for _, nat := range *obj {
if nat.Name == name {
return name, ""
}
}
// next try to lower case first letter
ln := strings.ToLower(name[0:1]) + name[1:]
for _, nat := range *obj {
if nat.Name == ln {
return ln, name
}
}
// finally look for a snake case representation
sn := codegen.SnakeCase(name)
for _, nat := range *obj {
if nat.Name == sn {
return sn, name
}
}
// no match, return field name
return name, ""
}
// isPrimitive is true if the given kind matches a goa primitive type.
func isPrimitive(t reflect.Type) bool {
switch t.Kind() {
case reflect.Bool:
fallthrough
case reflect.Int:
fallthrough
case reflect.Int32:
fallthrough
case reflect.Int64:
fallthrough
case reflect.Uint:
fallthrough
case reflect.Uint32:
fallthrough
case reflect.Uint64:
fallthrough
case reflect.Float32:
fallthrough
case reflect.Float64:
fallthrough
case reflect.Interface:
fallthrough
case reflect.String:
return true
case reflect.Slice:
e := t.Elem()
if e.Kind() == reflect.Uint8 {
return true
}
return false
default:
return false
}
}
type compRec struct {
path string
seen map[string]struct{}
}
func (r compRec) append(p string) compRec {
r.path += p
return r
}
// compatible checks the user and external type definitions map recursively . It
// returns nil if they do, an error otherwise.
func compatible(from expr.DataType, to reflect.Type, recs ...compRec) error {
// deference if needed
if to.Kind() == reflect.Ptr {
return compatible(from, to.Elem(), recs...)
}
toName := to.Name()
if toName == "" {
toName = to.Kind().String()
}
// handle recursive data structures
var rec compRec
if recs != nil {
rec = recs[0]
if _, ok := rec.seen[from.Hash()+"-"+toName]; ok {
return nil
}
} else {
rec = compRec{path: "<value>", seen: make(map[string]struct{})}
}
rec.seen[from.Hash()+"-"+toName] = struct{}{}
if expr.IsArray(from) {
if to.Kind() != reflect.Slice {
return fmt.Errorf("types don't match: %s must be a slice", rec.path)
}
return compatible(
expr.AsArray(from).ElemType.Type,
to.Elem(),
rec.append("[0]"),
)
}
if expr.IsMap(from) {
if to.Kind() != reflect.Map {
return fmt.Errorf("types don't match: %s is not a map", rec.path)
}
if err := compatible(
expr.AsMap(from).ElemType.Type,
to.Elem(),
rec.append(".value"),
); err != nil {
return err
}
return compatible(
expr.AsMap(from).KeyType.Type,
to.Key(),
rec.append(".key"),
)
}
if expr.IsObject(from) {
if to.Kind() != reflect.Struct {
return fmt.Errorf("types don't match: %s is a %s, expected a struct", rec.path, toName)
}
obj := expr.AsObject(from)
ma := expr.NewMappedAttributeExpr(&expr.AttributeExpr{Type: obj})
for _, nat := range *obj {
var (
fname string
ok bool
field reflect.StructField
)
{
if ef, k := nat.Attribute.Meta["struct.field.external"]; k {
fname = ef[0]
if fname == "-" {
continue
}
field, ok = to.FieldByName(ef[0])
} else {
ef := codegen.Goify(ma.ElemName(nat.Name), true)
fname = ef
field, ok = to.FieldByName(ef)
}
}
if !ok {
return fmt.Errorf("types don't match: could not find field %q of external type %q matching attribute %q of type %q",
fname, toName, nat.Name, from.Name())
}
err := compatible(
nat.Attribute.Type,
field.Type,
rec.append("."+fname),
)
if err != nil {
return err
}
}
return nil
}
if isPrimitive(to) {
var dt expr.DataType
if err := buildDesignType(&dt, to, nil); err != nil {
return err
}
if expr.Equal(dt, from) {
return nil
}
}
return fmt.Errorf("types don't match: type of %s is %s but type of corresponding attribute is %s", rec.path, toName, from.Name())
}
// input: convertData
const convertT = `{{ printf "%s creates an instance of %s initialized from t." .Name .TypeName | comment }}
func (t {{ .ReceiverTypeRef }}) {{ .Name }}() {{ .TypeRef }} {
{{ .Code }}
return v
}
`
// input: convertData
const createT = `{{ printf "%s initializes t from the fields of v" .Name | comment }}
func (t {{ .ReceiverTypeRef }}) {{ .Name }}(v {{ .TypeRef }}) {
{{ .Code }}
*t = *temp
}
`
// input: TransformFunctionData
const transformHelperT = `{{ printf "%s builds a value of type %s from a value of type %s." .Name .ResultTypeRef .ParamTypeRef | comment }}
func {{ .Name }}(v {{ .ParamTypeRef }}) {{ .ResultTypeRef }} {
{{ .Code }}
return res
}
`
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
quortex/provider_test.go
|
package quortex
import (
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
var testAccProviders map[string]*schema.Provider
var testAccProvider *schema.Provider
func init() {
testAccProvider = Provider()
testAccProviders = map[string]*schema.Provider{
"quortex": testAccProvider,
}
}
func TestProvider(t *testing.T) {
if err := Provider().InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProvider_impl(t *testing.T) {
var _ *schema.Provider = Provider()
}
func TestAccPreCheck(t *testing.T) {
if err := os.Getenv("QUORTEX_USERNAME"); err == "" {
t.Fatal("QUORTEX_USERNAME must be set for acceptance tests")
}
if err := os.Getenv("QUORTEX_PASSWORD"); err == "" {
t.Fatal("QUORTEX_PASSWORD must be set for acceptance tests")
}
}
|
[
"\"QUORTEX_USERNAME\"",
"\"QUORTEX_PASSWORD\""
] |
[] |
[
"QUORTEX_PASSWORD",
"QUORTEX_USERNAME"
] |
[]
|
["QUORTEX_PASSWORD", "QUORTEX_USERNAME"]
|
go
| 2 | 0 | |
storage/storage_test.go
|
/*
Copyright © 2020 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage_test
import (
"bytes"
"database/sql"
"database/sql/driver"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/RedHatInsights/insights-operator-utils/tests/helpers"
"github.com/RedHatInsights/insights-results-aggregator-data/testdata"
"github.com/Shopify/sarama"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/assert"
"github.com/RedHatInsights/insights-results-aggregator/storage"
"github.com/RedHatInsights/insights-results-aggregator/types"
ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers"
)
func init() {
zerolog.SetGlobalLevel(zerolog.WarnLevel)
}
func assertNumberOfReports(t *testing.T, mockStorage storage.Storage, expectedNumberOfReports int) {
numberOfReports, err := mockStorage.ReportsCount()
helpers.FailOnError(t, err)
assert.Equal(t, expectedNumberOfReports, numberOfReports)
}
func checkReportForCluster(
t *testing.T,
s storage.Storage,
orgID types.OrgID,
clusterName types.ClusterName,
expected []types.RuleOnReport,
) {
// try to read report for cluster
result, _, err := s.ReadReportForCluster(orgID, clusterName)
helpers.FailOnError(t, err)
// and check the read report with expected one
assert.ElementsMatch(t, expected, result)
}
func writeReportForCluster(
t *testing.T,
storage storage.Storage,
orgID types.OrgID,
clusterName types.ClusterName,
clusterReport types.ClusterReport,
rules []types.ReportItem,
) {
err := storage.WriteReportForCluster(orgID, clusterName, clusterReport, rules, time.Now(), testdata.KafkaOffset)
helpers.FailOnError(t, err)
}
// TestNewStorage checks whether constructor for new storage returns error for improper storage configuration
func TestNewStorageError(t *testing.T) {
_, err := storage.New(storage.Configuration{
Driver: "non existing driver",
})
assert.EqualError(t, err, "driver non existing driver is not supported")
}
// TestNewStorageWithLogging tests creating new storage with logs
func TestNewStorageWithLoggingError(t *testing.T) {
s, _ := storage.New(storage.Configuration{
Driver: "postgres",
PGPort: 1234,
PGUsername: "user",
LogSQLQueries: true,
})
err := s.Init()
assert.Contains(t, err.Error(), "connect: connection refused")
}
// TestDBStorageReadReportForClusterEmptyTable check the behaviour of method ReadReportForCluster
func TestDBStorageReadReportForClusterEmptyTable(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
_, _, err := mockStorage.ReadReportForCluster(testdata.OrgID, testdata.ClusterName)
if _, ok := err.(*types.ItemNotFoundError); err == nil || !ok {
t.Fatalf("expected ItemNotFoundError, got %T, %+v", err, err)
}
assert.EqualError(
t,
err,
fmt.Sprintf(
"Item with ID %v/%v was not found in the storage",
testdata.OrgID, testdata.ClusterName,
),
)
}
// TestDBStorageReadReportForClusterClosedStorage check the behaviour of method ReadReportForCluster
func TestDBStorageReadReportForClusterClosedStorage(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
// we need to close storage right now
closer()
_, _, err := mockStorage.ReadReportForCluster(testdata.OrgID, testdata.ClusterName)
assert.EqualError(t, err, "sql: database is closed")
}
// TestDBStorageReadReportForCluster check the behaviour of method ReadReportForCluster
func TestDBStorageReadReportForCluster(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed)
checkReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, nil)
}
// TestDBStorageGetOrgIDByClusterID check the behaviour of method GetOrgIDByClusterID
func TestDBStorageGetOrgIDByClusterID(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed)
orgID, err := mockStorage.GetOrgIDByClusterID(testdata.ClusterName)
helpers.FailOnError(t, err)
assert.Equal(t, orgID, testdata.OrgID)
}
func TestDBStorageGetOrgIDByClusterID_Error(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, false)
defer closer()
dbStorage := mockStorage.(*storage.DBStorage)
connection := dbStorage.GetConnection()
query := `
CREATE TABLE report (
org_id VARCHAR NOT NULL,
cluster VARCHAR NOT NULL,
report VARCHAR NOT NULL,
reported_at TIMESTAMP,
last_checked_at TIMESTAMP,
kafka_offset BIGINT NOT NULL DEFAULT 0,
PRIMARY KEY(org_id, cluster)
);
`
// create a table with a bad type for org_id
_, err := connection.Exec(query)
helpers.FailOnError(t, err)
// insert some data
_, err = connection.Exec(`
INSERT INTO report (org_id, cluster, report, reported_at, last_checked_at)
VALUES ($1, $2, $3, $4, $5);
`, "not-int", testdata.ClusterName, testdata.ClusterReportEmpty, time.Now(), time.Now())
helpers.FailOnError(t, err)
_, err = mockStorage.GetOrgIDByClusterID(testdata.ClusterName)
assert.EqualError(
t,
err,
`sql: Scan error on column index 0, name "org_id": `+
`converting driver.Value type string ("not-int") to a uint64: invalid syntax`,
)
}
// TestDBStorageGetOrgIDByClusterIDFailing check the behaviour of method GetOrgIDByClusterID for not existed ClusterID
func TestDBStorageGetOrgIDByClusterIDFailing(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
orgID, err := mockStorage.GetOrgIDByClusterID(testdata.ClusterName)
assert.EqualError(t, err, "sql: no rows in result set")
assert.Equal(t, orgID, types.OrgID(0))
}
// TestDBStorageReadReportNoTable check the behaviour of method ReadReportForCluster
// when the table with results does not exist
func TestDBStorageReadReportNoTable(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, false)
defer closer()
_, _, err := mockStorage.ReadReportForCluster(testdata.OrgID, testdata.ClusterName)
assert.EqualError(t, err, "no such table: report")
}
// TestDBStorageWriteReportForClusterClosedStorage check the behaviour of method WriteReportForCluster
func TestDBStorageWriteReportForClusterClosedStorage(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
// we need to close storage right now
closer()
err := mockStorage.WriteReportForCluster(
testdata.OrgID,
testdata.ClusterName,
testdata.ClusterReportEmpty,
testdata.ReportEmptyRulesParsed,
time.Now(),
testdata.KafkaOffset,
)
assert.EqualError(t, err, "sql: database is closed")
}
// TestDBStorageWriteReportForClusterClosedStorage check the behaviour of method WriteReportForCluster
func TestDBStorageWriteReportForClusterUnsupportedDriverError(t *testing.T) {
fakeStorage := storage.NewFromConnection(nil, -1)
// no need to close it
err := fakeStorage.WriteReportForCluster(
testdata.OrgID,
testdata.ClusterName,
testdata.ClusterReportEmpty,
testdata.ReportEmptyRulesParsed,
time.Now(),
testdata.KafkaOffset,
)
assert.EqualError(t, err, "writing report with DB -1 is not supported")
}
// TestDBStorageWriteReportForClusterMoreRecentInDB checks that older report
// will not replace a more recent one when writing a report to storage.
func TestDBStorageWriteReportForClusterMoreRecentInDB(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
newerTime := time.Now().UTC()
olderTime := newerTime.Add(-time.Hour)
// Insert newer report.
err := mockStorage.WriteReportForCluster(
testdata.OrgID,
testdata.ClusterName,
testdata.ClusterReportEmpty,
testdata.ReportEmptyRulesParsed,
newerTime,
testdata.KafkaOffset,
)
helpers.FailOnError(t, err)
// Try to insert older report.
err = mockStorage.WriteReportForCluster(
testdata.OrgID,
testdata.ClusterName,
testdata.ClusterReportEmpty,
testdata.ReportEmptyRulesParsed,
olderTime,
testdata.KafkaOffset,
)
assert.Equal(t, types.ErrOldReport, err)
}
// TestDBStorageWriteReportForClusterDroppedReportTable checks the error
// returned when trying to SELECT from a dropped/missing report table.
func TestDBStorageWriteReportForClusterDroppedReportTable(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
connection := storage.GetConnection(mockStorage.(*storage.DBStorage))
query := "DROP TABLE report"
if os.Getenv("INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB") == "postgres" {
query += " CASCADE"
}
query += ";"
_, err := connection.Exec(query)
helpers.FailOnError(t, err)
err = mockStorage.WriteReportForCluster(
testdata.OrgID, testdata.ClusterName, testdata.ClusterReportEmpty, testdata.ReportEmptyRulesParsed, time.Now(), testdata.KafkaOffset,
)
assert.EqualError(t, err, "no such table: report")
}
func TestDBStorageWriteReportForClusterExecError(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, false)
defer closer()
createReportTableWithBadClusterField(t, mockStorage)
err := mockStorage.WriteReportForCluster(
testdata.OrgID, testdata.ClusterName, testdata.Report3Rules, testdata.Report3RulesParsed, testdata.LastCheckedAt, testdata.KafkaOffset,
)
assert.Error(t, err)
const sqliteErrMessage = "CHECK constraint failed: report"
const postgresErrMessage = "pq: invalid input syntax for integer"
if err.Error() != sqliteErrMessage && !strings.HasPrefix(err.Error(), postgresErrMessage) {
t.Fatalf("expected on of: \n%v\n%v\ngot:\n%v", sqliteErrMessage, postgresErrMessage, err.Error())
}
}
func TestDBStorageWriteReportForClusterFakePostgresOK(t *testing.T) {
mockStorage, expects := ira_helpers.MustGetMockStorageWithExpectsForDriver(t, types.DBDriverPostgres)
defer ira_helpers.MustCloseMockStorageWithExpects(t, mockStorage, expects)
expects.ExpectBegin()
expects.ExpectQuery(`SELECT last_checked_at FROM report`).
WillReturnRows(expects.NewRows([]string{"last_checked_at"})).
RowsWillBeClosed()
expects.ExpectExec("DELETE FROM rule_hit").
WillReturnResult(driver.ResultNoRows)
for i := 0; i < len(testdata.Report3RulesParsed); i++ {
expects.ExpectExec("INSERT INTO rule_hit").
WillReturnResult(driver.ResultNoRows)
}
expects.ExpectExec("INSERT INTO report").
WillReturnResult(driver.ResultNoRows)
expects.ExpectCommit()
err := mockStorage.WriteReportForCluster(
testdata.OrgID, testdata.ClusterName, testdata.Report3Rules, testdata.Report3RulesParsed, testdata.LastCheckedAt, testdata.KafkaOffset,
)
helpers.FailOnError(t, err)
}
// TestDBStorageListOfOrgs check the behaviour of method ListOfOrgs
func TestDBStorageListOfOrgs(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
writeReportForCluster(t, mockStorage, 1, "1deb586c-fb85-4db4-ae5b-139cdbdf77ae", testdata.ClusterReportEmpty, testdata.ReportEmptyRulesParsed)
writeReportForCluster(t, mockStorage, 3, "a1bf5b15-5229-4042-9825-c69dc36b57f5", testdata.ClusterReportEmpty, testdata.ReportEmptyRulesParsed)
result, err := mockStorage.ListOfOrgs()
helpers.FailOnError(t, err)
assert.ElementsMatch(t, []types.OrgID{1, 3}, result)
}
func TestDBStorageListOfOrgsNoTable(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, false)
defer closer()
_, err := mockStorage.ListOfOrgs()
assert.EqualError(t, err, "no such table: report")
}
// TestDBStorageListOfOrgsClosedStorage check the behaviour of method ListOfOrgs
func TestDBStorageListOfOrgsClosedStorage(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
// we need to close storage right now
closer()
_, err := mockStorage.ListOfOrgs()
assert.EqualError(t, err, "sql: database is closed")
}
// TestDBStorageListOfClustersFor check the behaviour of method ListOfClustersForOrg
func TestDBStorageListOfClustersForOrg(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
cluster1ID, cluster2ID, cluster3ID := testdata.GetRandomClusterID(), testdata.GetRandomClusterID(), testdata.GetRandomClusterID()
// writeReportForCluster writes the report at time.Now()
writeReportForCluster(t, mockStorage, testdata.OrgID, cluster1ID, testdata.ClusterReportEmpty, testdata.ReportEmptyRulesParsed)
writeReportForCluster(t, mockStorage, testdata.OrgID, cluster2ID, testdata.ClusterReportEmpty, testdata.ReportEmptyRulesParsed)
// also pushing cluster for different org
writeReportForCluster(t, mockStorage, testdata.Org2ID, cluster3ID, testdata.ClusterReportEmpty, testdata.ReportEmptyRulesParsed)
result, err := mockStorage.ListOfClustersForOrg(testdata.OrgID, time.Now().Add(-time.Hour))
helpers.FailOnError(t, err)
assert.ElementsMatch(t, []types.ClusterName{
cluster1ID,
cluster2ID,
}, result)
result, err = mockStorage.ListOfClustersForOrg(testdata.Org2ID, time.Now().Add(-time.Hour))
helpers.FailOnError(t, err)
assert.Equal(t, []types.ClusterName{cluster3ID}, result)
}
func TestDBStorageListOfClustersTimeLimit(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
// writeReportForCluster writes the report at time.Now()
cluster1ID, cluster2ID := testdata.GetRandomClusterID(), testdata.GetRandomClusterID()
// writeReportForCluster writes the report at time.Now()
writeReportForCluster(t, mockStorage, testdata.OrgID, cluster1ID, testdata.ClusterReportEmpty, testdata.ReportEmptyRulesParsed)
writeReportForCluster(t, mockStorage, testdata.OrgID, cluster2ID, testdata.ClusterReportEmpty, testdata.ReportEmptyRulesParsed)
// since we can't easily change reported_at without changing the core source code, let's make a request from the "future"
// fetch org overview with T+2h
result, err := mockStorage.ListOfClustersForOrg(testdata.OrgID, time.Now().Add(time.Hour*2))
helpers.FailOnError(t, err)
// must fetch nothing
// assert.ElementsMatch(t, []types.ClusterName{}, result)
assert.Empty(t, result)
// request with T-2h
result, err = mockStorage.ListOfClustersForOrg(testdata.OrgID, time.Now().Add(-time.Hour*2))
helpers.FailOnError(t, err)
// must fetch all reports
assert.ElementsMatch(t, []types.ClusterName{
cluster1ID,
cluster2ID,
}, result)
}
func TestDBStorageListOfClustersNoTable(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, false)
defer closer()
_, err := mockStorage.ListOfClustersForOrg(5, time.Now().Add(-time.Hour))
assert.EqualError(t, err, "no such table: report")
}
// TestDBStorageListOfClustersClosedStorage check the behaviour of method ListOfOrgs
func TestDBStorageListOfClustersClosedStorage(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
// we need to close storage right now
closer()
_, err := mockStorage.ListOfClustersForOrg(5, time.Now().Add(-time.Hour))
assert.EqualError(t, err, "sql: database is closed")
}
// TestMockDBReportsCount check the behaviour of method ReportsCount
func TestMockDBReportsCount(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
assertNumberOfReports(t, mockStorage, 0)
writeReportForCluster(t, mockStorage, 5, "4016d01b-62a1-4b49-a36e-c1c5a3d02750", testdata.ClusterReportEmpty, testdata.ReportEmptyRulesParsed)
assertNumberOfReports(t, mockStorage, 1)
}
func TestMockDBReportsCountNoTable(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, false)
defer closer()
_, err := mockStorage.ReportsCount()
assert.EqualError(t, err, "no such table: report")
}
func TestMockDBReportsCountClosedStorage(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, false)
// we need to close storage right now
closer()
_, err := mockStorage.ReportsCount()
assert.EqualError(t, err, "sql: database is closed")
}
func TestDBStorageNewPostgresqlError(t *testing.T) {
s, _ := storage.New(storage.Configuration{
Driver: "postgres",
PGHost: "non-existing-host",
PGPort: 12345,
PGUsername: "user",
})
err := s.Init()
assert.Contains(t, err.Error(), "no such host")
}
func mustWriteReport(
t *testing.T,
connection *sql.DB,
orgID interface{},
clusterName interface{},
clusterReport interface{},
) {
query := `
INSERT INTO report(org_id, cluster, report, reported_at, last_checked_at)
VALUES ($1, $2, $3, $4, $5);
`
statement, err := connection.Prepare(query)
helpers.FailOnError(t, err)
_, err = statement.Exec(
orgID,
clusterName,
clusterReport,
time.Now(),
time.Now(),
)
helpers.FailOnError(t, err)
err = statement.Close()
helpers.FailOnError(t, err)
}
func TestDBStorageListOfOrgsLogError(t *testing.T) {
buf := new(bytes.Buffer)
log.Logger = zerolog.New(buf)
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
connection := storage.GetConnection(mockStorage.(*storage.DBStorage))
// write illegal negative org_id
mustWriteReport(t, connection, -1, testdata.ClusterName, testdata.ClusterReportEmpty)
_, err := mockStorage.ListOfOrgs()
helpers.FailOnError(t, err)
assert.Contains(t, buf.String(), "sql: Scan error")
}
func TestDBStorageCloseError(t *testing.T) {
const errString = "unable to close the database"
mockStorage, expects := ira_helpers.MustGetMockStorageWithExpects(t)
expects.ExpectClose().WillReturnError(fmt.Errorf(errString))
err := mockStorage.Close()
assert.EqualError(t, err, errString)
}
func TestDBStorageListOfClustersForOrgScanError(t *testing.T) {
// just for the coverage, because this error can't happen ever because we use
// not null in table creation
buf := new(bytes.Buffer)
log.Logger = zerolog.New(buf)
mockStorage, expects := ira_helpers.MustGetMockStorageWithExpects(t)
defer ira_helpers.MustCloseMockStorageWithExpects(t, mockStorage, expects)
expects.ExpectQuery("SELECT cluster FROM report").WillReturnRows(
sqlmock.NewRows([]string{"cluster"}).AddRow(nil),
)
_, err := mockStorage.ListOfClustersForOrg(testdata.OrgID, time.Now().Add(-time.Hour))
helpers.FailOnError(t, err)
assert.Contains(t, buf.String(), "converting NULL to string is unsupported")
}
func TestDBStorageDeleteReports(t *testing.T) {
for _, functionName := range []string{
"DeleteReportsForOrg", "DeleteReportsForCluster",
} {
func() {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
assertNumberOfReports(t, mockStorage, 0)
err := mockStorage.WriteReportForCluster(
testdata.OrgID,
testdata.ClusterName,
testdata.Report3Rules,
testdata.Report3RulesParsed,
testdata.LastCheckedAt,
testdata.KafkaOffset,
)
helpers.FailOnError(t, err)
assertNumberOfReports(t, mockStorage, 1)
switch functionName {
case "DeleteReportsForOrg":
err = mockStorage.DeleteReportsForOrg(testdata.OrgID)
case "DeleteReportsForCluster":
err = mockStorage.DeleteReportsForCluster(testdata.ClusterName)
default:
t.Fatal(fmt.Errorf("unexpected function name"))
}
helpers.FailOnError(t, err)
assertNumberOfReports(t, mockStorage, 0)
}()
}
}
func TestDBStorage_ReadReportForClusterByClusterName_OK(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
mustWriteReport3Rules(t, mockStorage)
report, lastCheckedAt, err := mockStorage.ReadReportForClusterByClusterName(testdata.ClusterName)
helpers.FailOnError(t, err)
assert.Equal(t, testdata.RuleOnReportResponses, report)
assert.Equal(t, types.Timestamp(testdata.LastCheckedAt.UTC().Format(time.RFC3339)), lastCheckedAt)
}
func TestDBStorage_CheckIfClusterExists_ClusterDoesNotExist(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
_, _, err := mockStorage.ReadReportForClusterByClusterName(testdata.ClusterName)
assert.EqualError(
t,
err,
fmt.Sprintf("Item with ID %v was not found in the storage", testdata.ClusterName),
)
}
func TestDBStorage_CheckIfClusterExists_DBError(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
closer()
_, _, err := mockStorage.ReadReportForClusterByClusterName(testdata.ClusterName)
assert.EqualError(t, err, "sql: database is closed")
}
func TestDBStorage_NewSQLite(t *testing.T) {
_, err := storage.New(storage.Configuration{
Driver: "sqlite3",
SQLiteDataSource: ":memory:",
})
helpers.FailOnError(t, err)
}
func TestDBStorageWriteConsumerError(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
testTopic := "topic"
var testPartition int32 = 2
var testOffset int64 = 10
testKey := []byte("key")
testMessage := []byte("value")
testProducedAt := time.Now().Add(-time.Hour).UTC()
testError := fmt.Errorf("Consumer error")
err := mockStorage.WriteConsumerError(&sarama.ConsumerMessage{
Topic: testTopic,
Partition: testPartition,
Offset: testOffset,
Key: testKey,
Value: testMessage,
Timestamp: testProducedAt,
}, testError)
assert.NoError(t, err)
conn := storage.GetConnection(mockStorage.(*storage.DBStorage))
row := conn.QueryRow(`
SELECT key, message, produced_at, consumed_at, error
FROM consumer_error
WHERE topic = $1 AND partition = $2 AND topic_offset = $3
`, testTopic, testPartition, testOffset)
var storageKey []byte
var storageMessage []byte
var storageProducedAt time.Time
var storageConsumedAt time.Time
var storageError string
err = row.Scan(&storageKey, &storageMessage, &storageProducedAt, &storageConsumedAt, &storageError)
assert.NoError(t, err)
assert.Equal(t, testKey, storageKey)
assert.Equal(t, testMessage, storageMessage)
assert.Equal(t, testProducedAt.Unix(), storageProducedAt.Unix())
assert.True(t, time.Now().UTC().After(storageConsumedAt))
assert.Equal(t, testError.Error(), storageError)
}
func TestDBStorage_GetLatestKafkaOffset(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
offset, err := mockStorage.GetLatestKafkaOffset()
helpers.FailOnError(t, err)
assert.Equal(t, types.KafkaOffset(0), offset)
mustWriteReport3Rules(t, mockStorage)
offset, err = mockStorage.GetLatestKafkaOffset()
helpers.FailOnError(t, err)
assert.Equal(t, types.KafkaOffset(1), offset)
}
func TestDBStorage_GetLatestKafkaOffset_ZeroOffset(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
offset, err := mockStorage.GetLatestKafkaOffset()
helpers.FailOnError(t, err)
assert.Equal(t, types.KafkaOffset(0), offset)
err = mockStorage.WriteReportForCluster(
testdata.OrgID,
testdata.ClusterName,
testdata.Report3Rules,
testdata.Report3RulesParsed,
testdata.LastCheckedAt,
types.KafkaOffset(0),
)
helpers.FailOnError(t, err)
offset, err = mockStorage.GetLatestKafkaOffset()
helpers.FailOnError(t, err)
assert.Equal(t, types.KafkaOffset(0), offset)
}
func TestDBStorage_Init(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
dbStorage := mockStorage.(*storage.DBStorage)
err := dbStorage.MigrateToLatest()
helpers.FailOnError(t, err)
mustWriteReport3Rules(t, mockStorage)
err = mockStorage.Init()
helpers.FailOnError(t, err)
clustersLastChecked := storage.GetClustersLastChecked(dbStorage)
assert.Len(t, clustersLastChecked, 1)
assert.Equal(t, testdata.LastCheckedAt.Unix(), clustersLastChecked[testdata.ClusterName].Unix())
}
func TestDBStorage_Init_Error(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, false)
defer closer()
createReportTableWithBadClusterField(t, mockStorage)
connection := storage.GetConnection(mockStorage.(*storage.DBStorage))
// create a table with a bad type
_, err := connection.Exec(`
INSERT INTO report (org_id, cluster, report, reported_at, last_checked_at)
VALUES($1, $2, $3, $4, $5)
`, testdata.OrgID, 1, testdata.ClusterReportEmpty, time.Now(), time.Now())
helpers.FailOnError(t, err)
err = mockStorage.Init()
assert.EqualError(
t,
err,
`sql: Scan error on column index 0, name "cluster": `+
`unsupported Scan, storing driver.Value type int64 into type *types.ClusterName`,
)
}
func createReportTableWithBadClusterField(t *testing.T, mockStorage storage.Storage) {
connection := storage.GetConnection(mockStorage.(*storage.DBStorage))
query := `
CREATE TABLE report (
org_id INTEGER NOT NULL,
cluster INTEGER NOT NULL UNIQUE CHECK(typeof(cluster) = 'integer'),
report VARCHAR NOT NULL,
reported_at TIMESTAMP,
last_checked_at TIMESTAMP,
kafka_offset BIGINT NOT NULL DEFAULT 0,
PRIMARY KEY(org_id, cluster)
)
`
if os.Getenv("INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB") == "postgres" {
query = `
CREATE TABLE report (
org_id INTEGER NOT NULL,
cluster INTEGER NOT NULL UNIQUE,
report VARCHAR NOT NULL,
reported_at TIMESTAMP,
last_checked_at TIMESTAMP,
kafka_offset BIGINT NOT NULL DEFAULT 0,
PRIMARY KEY(org_id, cluster)
)
`
}
// create a table with a bad type
_, err := connection.Exec(query)
helpers.FailOnError(t, err)
query = `
CREATE TABLE rule_hit (
org_id INTEGER NOT NULL,
cluster_id VARCHAR NOT NULL,
rule_fqdn VARCHAR NOT NULL,
error_key VARCHAR NOT NULL,
template_data VARCHAR NOT NULL,
PRIMARY KEY(cluster_id, org_id, rule_fqdn, error_key)
)
`
_, err = connection.Exec(query)
helpers.FailOnError(t, err)
}
// TestConstructInClausule checks the helper function constructInClausule
func TestConstructInClausule(t *testing.T) {
c0 := storage.ConstructInClausule(0)
assert.Equal(t, c0, "$1")
c1 := storage.ConstructInClausule(1)
assert.Equal(t, c1, "$1")
c2 := storage.ConstructInClausule(2)
assert.Equal(t, c2, "$1,$2")
c3 := storage.ConstructInClausule(3)
assert.Equal(t, c3, "$1,$2,$3")
}
// TestArgsWithClusterNames checks the helper function argsWithClusterNames
func TestArgsWithClusterNames(t *testing.T) {
cn0 := []types.ClusterName{}
args0 := storage.ArgsWithClusterNames(cn0)
assert.Equal(t, len(args0), 0)
cn1 := []types.ClusterName{"aaa"}
args1 := storage.ArgsWithClusterNames(cn1)
assert.Equal(t, len(args1), 1)
assert.Equal(t, args1[0], types.ClusterName("aaa"))
cn2 := []types.ClusterName{"aaa", "bbb"}
args2 := storage.ArgsWithClusterNames(cn2)
assert.Equal(t, len(args2), 2)
assert.Equal(t, args2[0], types.ClusterName("aaa"))
assert.Equal(t, args2[1], types.ClusterName("bbb"))
}
// TestDBStorageReadReportsForClusters1 check the behaviour of method
// ReadReportForClusters
func TestDBStorageReadReportsForClusters1(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed)
// try to read reports for clusters
cn1 := []types.ClusterName{"not-a-cluster"}
results, err := mockStorage.ReadReportsForClusters(cn1)
helpers.FailOnError(t, err)
// and check the read report with expected one
assert.Equal(t, len(results), 0)
}
// TestDBStorageReadReportsForClusters2 check the behaviour of method
// ReadReportForClusters
func TestDBStorageReadReportsForClusters2(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed)
// try to read reports for clusters
cn1 := []types.ClusterName{testdata.ClusterName}
results, err := mockStorage.ReadReportsForClusters(cn1)
helpers.FailOnError(t, err)
// and check the read report with expected one
assert.Equal(t, len(results), 1)
}
// TestDBStorageReadReportsForClusters3 check the behaviour of method
// ReadReportForClusters
func TestDBStorageReadReportsForClusters3(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed)
// try to read reports for clusters
cn1 := []types.ClusterName{}
_, err := mockStorage.ReadReportsForClusters(cn1)
// error is expected in this case
assert.NotNil(t, err)
}
// TestDBStorageReadOrgIDsForClusters1 check the behaviour of method
// ReadOrgIDsForClusters
func TestDBStorageReadOrgIDsForClusters1(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed)
// try to read org IDs for clusters
cn1 := []types.ClusterName{"not-a-cluster"}
results, err := mockStorage.ReadOrgIDsForClusters(cn1)
helpers.FailOnError(t, err)
// and check the read report with expected one
assert.Equal(t, len(results), 0)
}
// TestDBStorageReadOrgIDsForClusters2 check the behaviour of method
// ReadOrgIDsForClusters
func TestDBStorageReadOrgIDsForClusters2(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed)
// try to read org IDs for clusters
cn1 := []types.ClusterName{testdata.ClusterName}
results, err := mockStorage.ReadOrgIDsForClusters(cn1)
helpers.FailOnError(t, err)
// and check the read report with expected one
assert.Equal(t, len(results), 1)
assert.Equal(t, results[0], testdata.OrgID)
}
// TestDBStorageReadOrgIDsForClusters3 check the behaviour of method
// ReadOrgIDsForClusters
func TestDBStorageReadOrgIDsForClusters3(t *testing.T) {
mockStorage, closer := ira_helpers.MustGetMockStorage(t, true)
defer closer()
writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed)
// try to read org IDs for clusters
cn1 := []types.ClusterName{}
_, err := mockStorage.ReadOrgIDsForClusters(cn1)
// error is expected in this case
assert.NotNil(t, err)
}
|
[
"\"INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB\"",
"\"INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB\""
] |
[] |
[
"INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB"
] |
[]
|
["INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB"]
|
go
| 1 | 0 | |
contrib/spendfrom/spendfrom.py
|
#!/usr/bin/env python
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the abacoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Abacoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Abacoin")
return os.path.expanduser("~/.abacoin")
def read_bitcoin_config(dbdir):
"""Read the abacoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "abacoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a abacoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 12664 if testnet else 2664
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get abacoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send abacoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of abacoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
pytorch_lightning/accelerators/base_backend.py
|
import os
import math
from enum import Enum
from typing import Any
import torch
from pytorch_lightning.utilities import AMPType, rank_zero_warn
from pytorch_lightning.utilities.apply_func import move_data_to_device
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import AttributeDict
import torch.distributed as torch_distrib
from pytorch_lightning import _logger as log
try:
from apex import amp
except ImportError:
amp = None
EPSILON = 1e-6
EPSILON_FP16 = 1e-5
class Accelerator(object):
def __init__(self, trainer, cluster_environment=None):
self.trainer = trainer
self.cluster_environment = cluster_environment
self.dist = AttributeDict(rank=0, device=None)
def setup(self, model):
pass
def teardown(self):
pass
def barrier(self, name: str = None):
pass
def broadcast(self, obj, src=0):
return obj
def train_or_test(self):
if self.trainer.testing:
results = self.trainer.run_test()
else:
results = self.trainer.train()
return results
def batch_to_device(self, batch: Any, device: torch.device):
model = self.trainer.get_model()
if model is not None:
return model.transfer_batch_to_device(batch, device)
return move_data_to_device(batch, device)
def training_step_end(self, output):
return output
def test_step_end(self, output):
return output
def validation_step_end(self, output):
return output
def process_dataloader(self, dataloader):
return dataloader
def backward(self, closure_loss, optimizer, opt_idx):
model_ref = self.trainer.get_model()
# scale loss for 16 bit
if self.trainer.precision == 16:
closure_loss = model_ref.amp_scale_loss(
closure_loss,
optimizer,
opt_idx,
amp_backend=self.trainer.amp_backend
)
# enter amp context
if self.trainer.amp_backend == AMPType.APEX:
self.trainer.dev_debugger.track_event('AMP', str(AMPType.APEX))
context = closure_loss
closure_loss = closure_loss.__enter__()
# do backward pass
model_ref.backward(self, closure_loss, optimizer, opt_idx)
# exit amp context
if self.trainer.precision == 16 and self.trainer.amp_backend == AMPType.APEX:
a, b, c = None, None, None
error = context.__exit__(a, b, c)
if error:
rank_zero_warn(a, b, c)
raise Exception('apex unscale error')
# once backward has been applied, release graph
closure_loss = closure_loss.detach()
return closure_loss
def optimizer_step(self, optimizer, batch_idx, opt_idx, lambda_closure):
model_ref = self.trainer.get_model()
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli')
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
lambda_closure,
using_native_amp=native_amp,
using_lbfgs=is_lbfgs
)
# scale when native amp
if native_amp:
self.trainer.scaler.update()
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
model_ref = self.trainer.get_model()
model_ref.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def clip_gradients(self, optimizer):
if self.trainer.amp_backend == AMPType.NATIVE:
self.trainer.scaler.unscale_(optimizer)
# apply clip gradients
# TODO: separate TPU case from here
self._clip_gradients(optimizer)
def _clip_gradients(self, optimizer):
# this code is a modification of torch.nn.utils.clip_grad_norm_
# with TPU support based on https://github.com/pytorch/xla/blob/master/TROUBLESHOOTING.md
if self.trainer.gradient_clip_val <= 0:
return
model = self.trainer.get_model()
if self.trainer.amp_backend == AMPType.APEX:
parameters = amp.master_params(optimizer)
else:
parameters = model.parameters()
max_norm = float(self.trainer.gradient_clip_val)
norm_type = float(2.0)
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
if norm_type == math.inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
device = parameters[0].device
out = torch.empty(len(parameters), device=device)
for i, p in enumerate(parameters):
torch.norm(p.grad.data.to(device), norm_type, out=out[i])
total_norm = torch.norm(out, norm_type)
eps = EPSILON_FP16 if self.trainer.precision == 16 else EPSILON
clip_coef = torch.tensor(max_norm, device=device) / (total_norm + eps)
clip_coef = torch.min(clip_coef, torch.ones_like(clip_coef))
for p in parameters:
p.grad.data.mul_(clip_coef.to(p.grad.data.device))
def on_train_epoch_end(self):
pass
def on_train_end(self):
pass
def early_stopping_should_stop(self, pl_module):
return self.trainer.should_stop
def setup_optimizers(self, model):
if self.trainer.testing is True:
return
optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
self.trainer.optimizers = optimizers
self.trainer.lr_schedulers = lr_schedulers
self.trainer.optimizer_frequencies = optimizer_frequencies
def init_ddp_connection(
self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True
) -> None:
if is_slurm_managing_tasks:
self.trainer.slurm_connector.connect_ddp(global_rank, world_size)
else:
self.connect_torchelastic(global_rank, world_size)
def connect_torchelastic(
self, global_rank: int, world_size: int
) -> None:
"""
Override to define your custom way of setting up a distributed environment.
Lightning's implementation uses env:// init by default and sets the first node as root
for SLURM managed cluster.
Args:
global_rank: The global process idx.
world_size: Number of GPUs being use across all nodes. (num_nodes * num_gpus).
"""
if "MASTER_ADDR" not in os.environ:
rank_zero_warn(
"MASTER_ADDR environment variable is not defined. Set as localhost"
)
os.environ["MASTER_ADDR"] = "127.0.0.1"
log.debug(f"MASTER_ADDR: {os.environ['MASTER_ADDR']}")
if "MASTER_PORT" not in os.environ:
rank_zero_warn(
"MASTER_PORT environment variable is not defined. Set as 12910"
)
os.environ["MASTER_PORT"] = "12910"
log.debug(f"MASTER_PORT: {os.environ['MASTER_PORT']}")
if "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) != world_size:
rank_zero_warn(
f"WORLD_SIZE environment variable ({os.environ['WORLD_SIZE']}) "
f"is not equal to the computed world size ({world_size}). Ignored."
)
torch_backend = "nccl" if self.trainer.on_gpu else "gloo"
if not torch.distributed.is_initialized():
log.info(
f"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank + 1}/{world_size}"
)
torch_distrib.init_process_group(
torch_backend, rank=global_rank, world_size=world_size
)
# TODO: allow user to compare with string even internaly we shall use these Enum to prevent typos...
class BackendType(Enum):
DP = 'dp'
DDP = 'ddp'
DDP2 = 'ddp2'
DDP_SPAWN = 'ddp_spawn'
# decuple distrib and device
DDP_CPU = 'ddp_cpu'
HOROVOD = 'horovod'
# this is rather device
TPU = 'tpu'
|
[] |
[] |
[
"MASTER_ADDR",
"MASTER_PORT",
"WORLD_SIZE"
] |
[]
|
["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE"]
|
python
| 3 | 0 | |
tests.py
|
# -*- encoding: utf-8 -*-
import os
import pytest
import waterboy.testing
MONGO_TEST_DATABASE = waterboy.testing.MONGO_TEST_DATABASE
REDIS_RUNNING = bool(int(os.environ.get('REDIS_RUNNING', 0)))
MONGO_RUNNING = bool(int(os.environ.get('MONGO_RUNNING', 0)))
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
MONGO_PORT = os.environ.get('MONGO_PORT', 27107)
class TestDictConfig(waterboy.testing.ConfigTestCase):
"""Test the dummy 'dict' backend."""
BACKEND = 'dict'
###############################################################################
# Redis Tests #
###############################################################################
def skipifnoredis(*args, **kwargs):
return pytest.mark.skipif(not REDIS_RUNNING, reason='No redis server found.')(*args, **kwargs)
@skipifnoredis
def test_server_ping(redis):
assert redis.backend.client.ping() is True
@skipifnoredis
class TestRedisConfig(waterboy.testing.ConfigTestCase):
"""Test the redis backend."""
BACKEND = 'redis'
###############################################################################
# MongoDB Tests #
###############################################################################
def skipifnomongo(*args, **kwargs):
return pytest.mark.skipif(not MONGO_RUNNING, reason='No mongodb server found.')(*args, **kwargs)
if MONGO_RUNNING:
@pytest.mark.usefixtures("mongo_test_database")
class TestMongoConfig(waterboy.testing.ConfigTestCase):
"""Test the mongo backend."""
BACKEND = 'mongo'
BACKEND_PARAMS = [MONGO_TEST_DATABASE]
|
[] |
[] |
[
"REDIS_RUNNING",
"REDIS_PORT",
"MONGO_RUNNING",
"MONGO_PORT"
] |
[]
|
["REDIS_RUNNING", "REDIS_PORT", "MONGO_RUNNING", "MONGO_PORT"]
|
python
| 4 | 0 | |
shopping_mall_server/wsgi.py
|
"""
WSGI config for shopping_mall_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopping_mall_server.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/e2e/e2e_test.go
|
package e2e
import (
"bytes"
"context"
"crypto/x509"
"encoding/pem"
"fmt"
"math/rand"
"os"
"reflect"
"strings"
"testing"
"time"
prometheusv1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
admissionreg "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
admissionregclient "k8s.io/client-go/kubernetes/typed/admissionregistration/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/cert"
apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
apiserviceclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
apiserviceclientv1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
operatorv1client "github.com/openshift/client-go/operator/clientset/versioned"
routeclient "github.com/openshift/client-go/route/clientset/versioned"
"github.com/openshift/library-go/pkg/crypto"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
"github.com/openshift/library-go/test/library/metrics"
"github.com/openshift/service-ca-operator/pkg/controller/api"
"github.com/openshift/service-ca-operator/pkg/operator"
"github.com/openshift/service-ca-operator/pkg/operator/operatorclient"
"github.com/openshift/service-ca-operator/test/util"
)
const (
serviceCAOperatorNamespace = operatorclient.OperatorNamespace
serviceCAOperatorPodPrefix = operatorclient.OperatorName
serviceCAControllerNamespace = operatorclient.TargetNamespace
serviceCAPodPrefix = api.ServiceCADeploymentName
signingKeySecretName = api.ServiceCASecretName
// A label used to attach StatefulSet pods to a headless service created by
// createServingCertAnnotatedService
owningHeadlessServiceLabelName = "owning-headless-service"
pollInterval = time.Second
pollTimeout = 30 * time.Second
// Rotation of all certs and bundles is expected to take a considerable amount of time
// due to the operator having to restart each controller and then each controller having
// to acquire the leader election lease and update all targeted resources.
rotationTimeout = 5 * time.Minute
// Polling for resources related to rotation may be delayed by the number of resources
// that are updated in the cluster in response to rotation.
rotationPollTimeout = 2 * time.Minute
)
// checkComponents verifies that the components of the operator are running.
func checkComponents(t *testing.T, client *kubernetes.Clientset) {
componentConfigs := []struct {
namespace string
podPrefix string
}{
{serviceCAOperatorNamespace, serviceCAOperatorPodPrefix},
{serviceCAControllerNamespace, serviceCAPodPrefix},
}
for _, cfg := range componentConfigs {
pods, err := client.CoreV1().Pods(cfg.namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
t.Fatalf("Failed to list pods in namespace %q: %v", cfg.namespace, err)
}
podFound := false
for _, pod := range pods.Items {
if strings.HasPrefix(pod.GetName(), cfg.podPrefix) {
podFound = true
break
}
}
if !podFound {
t.Fatalf("No pods with prefix %q found running in namespace %q", cfg.podPrefix, cfg.namespace)
}
}
}
func createTestNamespace(t *testing.T, client *kubernetes.Clientset, namespaceName string) (*v1.Namespace, func(), error) {
ns, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespaceName,
},
}, metav1.CreateOptions{})
cleanup := func() {
if err := client.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}); err != nil {
t.Logf("Deleting namespace %s failed: %v", ns.Name, err)
}
}
return ns, cleanup, err
}
func createServingCertAnnotatedService(client *kubernetes.Clientset, secretName, serviceName, namespace string, headless bool) error {
service := &v1.Service{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Annotations: map[string]string{
api.ServingCertSecretAnnotation: secretName,
},
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Name: "tests",
Port: 8443,
},
},
},
}
if headless {
service.Spec.Selector = map[string]string{
owningHeadlessServiceLabelName: serviceName,
}
service.Spec.ClusterIP = v1.ClusterIPNone
}
_, err := client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
return err
}
func createStatefulSet(client *kubernetes.Clientset, secretName, statefulSetName, serviceName, namespace string, numReplicas int) error {
const podLabelName = "pod-label"
podLabelValue := statefulSetName + "-pod-label"
replicasInt32 := int32(numReplicas)
_, err := client.AppsV1().StatefulSets(namespace).Create(context.TODO(), &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: statefulSetName,
},
Spec: appsv1.StatefulSetSpec{
Replicas: &replicasInt32,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{podLabelName: podLabelValue},
},
ServiceName: serviceName,
PodManagementPolicy: appsv1.ParallelPodManagement, // We want changes to happen fast, there isn't really state to maintain.
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
podLabelName: podLabelValue,
owningHeadlessServiceLabelName: serviceName,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: statefulSetName + "-container",
Image: "nicolaka/netshoot:latest",
Ports: []v1.ContainerPort{{
ContainerPort: 8443,
}},
Command: []string{
"/bin/sh",
"-c",
`openssl s_server -port 8443 -cert /srv/certificates/tls.crt -key /srv/certificates/tls.key -www`,
},
WorkingDir: "/",
VolumeMounts: []v1.VolumeMount{{
Name: "serving-cert",
MountPath: "/srv/certificates",
}},
}},
Volumes: []v1.Volume{{
Name: "serving-cert",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
}},
},
},
},
}, metav1.CreateOptions{})
return err
}
func createAnnotatedCABundleInjectionConfigMap(client *kubernetes.Clientset, configMapName, namespace string) error {
obj := &v1.ConfigMap{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: configMapName,
},
}
setInjectionAnnotation(&obj.ObjectMeta)
_, err := client.CoreV1().ConfigMaps(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
return err
}
func pollForServiceServingSecret(client *kubernetes.Clientset, secretName, namespace string) error {
return wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
_, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
})
}
func pollForCABundleInjectionConfigMap(client *kubernetes.Clientset, configMapName, namespace string) error {
return wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
_, err := client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMapName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
})
}
func editServingSecretData(client *kubernetes.Clientset, secretName, namespace, keyName string) error {
sss, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil {
return err
}
scopy := sss.DeepCopy()
scopy.Data[keyName] = []byte("blah")
_, err = client.CoreV1().Secrets(namespace).Update(context.TODO(), scopy, metav1.UpdateOptions{})
if err != nil {
return err
}
return pollForSecretChange(client, scopy, keyName)
}
func editConfigMapCABundleInjectionData(client *kubernetes.Clientset, configMapName, namespace string) error {
cm, err := client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMapName, metav1.GetOptions{})
if err != nil {
return err
}
cmcopy := cm.DeepCopy()
if len(cmcopy.Data) != 1 {
return fmt.Errorf("ca bundle injection configmap missing data")
}
cmcopy.Data["foo"] = "blah"
_, err = client.CoreV1().ConfigMaps(namespace).Update(context.TODO(), cmcopy, metav1.UpdateOptions{})
if err != nil {
return err
}
return pollForConfigMapChange(client, cmcopy, "foo")
}
func checkServiceServingCertSecretData(client *kubernetes.Clientset, secretName, namespace string) ([]byte, bool, error) {
sss, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil {
return nil, false, err
}
if len(sss.Data) != 2 {
return nil, false, fmt.Errorf("unexpected service serving secret data map length: %v", len(sss.Data))
}
certBytes, ok := sss.Data[v1.TLSCertKey]
if !ok {
return nil, false, fmt.Errorf("unexpected service serving secret data: %v", sss.Data)
}
_, ok = sss.Data[v1.TLSPrivateKeyKey]
if !ok {
return nil, false, fmt.Errorf("unexpected service serving secret data: %v", sss.Data)
}
block, _ := pem.Decode(certBytes)
if block == nil {
return nil, false, fmt.Errorf("unable to decode TLSCertKey bytes")
}
_, err = x509.ParseCertificate(block.Bytes)
if err != nil {
return certBytes, false, nil
}
return certBytes, true, nil
}
func checkConfigMapCABundleInjectionData(client *kubernetes.Clientset, configMapName, namespace string) error {
cm, err := client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMapName, metav1.GetOptions{})
if err != nil {
return err
}
if len(cm.Data) != 1 {
return fmt.Errorf("unexpected ca bundle injection configmap data map length: %v", len(cm.Data))
}
ok := true
_, ok = cm.Data[api.InjectionDataKey]
if !ok {
return fmt.Errorf("unexpected ca bundle injection configmap data: %v", cm.Data)
}
return nil
}
func pollForConfigMapCAInjection(client *kubernetes.Clientset, configMapName, namespace string) error {
return wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
cm, err := client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMapName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
if len(cm.Data) != 1 {
return false, nil
}
_, ok := cm.Data[api.InjectionDataKey]
if !ok {
return false, nil
}
return true, nil
})
}
func pollForServiceServingSecretWithReturn(client *kubernetes.Clientset, secretName, namespace string) (*v1.Secret, error) {
var secret *v1.Secret
err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
s, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
secret = s
return true, nil
})
return secret, err
}
func pollForCABundleInjectionConfigMapWithReturn(client *kubernetes.Clientset, configMapName, namespace string) (*v1.ConfigMap, error) {
var configmap *v1.ConfigMap
err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
cm, err := client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMapName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
configmap = cm
return true, nil
})
return configmap, err
}
func pollForSecretChange(client *kubernetes.Clientset, secret *v1.Secret, keysToChange ...string) error {
return wait.PollImmediate(time.Second, 2*time.Minute, func() (bool, error) {
s, err := client.CoreV1().Secrets(secret.Namespace).Get(context.TODO(), secret.Name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
for _, key := range keysToChange {
if bytes.Equal(s.Data[key], secret.Data[key]) {
return false, nil
}
}
return true, nil
})
}
func pollForConfigMapChange(client *kubernetes.Clientset, compareConfigMap *v1.ConfigMap, keysToChange ...string) error {
return wait.PollImmediate(time.Second, 2*time.Minute, func() (bool, error) {
cm, err := client.CoreV1().ConfigMaps(compareConfigMap.Namespace).Get(context.TODO(), compareConfigMap.Name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, nil
}
for _, key := range keysToChange {
if cm.Data[key] == compareConfigMap.Data[key] {
return false, nil
}
}
return true, nil
})
}
type triggerRotationFunc func(*testing.T, *kubernetes.Clientset, *rest.Config)
func checkCARotation(t *testing.T, client *kubernetes.Clientset, config *rest.Config, triggerRotation triggerRotationFunc) {
ns, cleanup, err := createTestNamespace(t, client, "test-"+randSeq(5))
if err != nil {
t.Fatalf("could not create test namespace: %v", err)
}
defer cleanup()
// Prompt the creation of service cert secrets
testServiceName := "test-service-" + randSeq(5)
testSecretName := "test-secret-" + randSeq(5)
testHeadlessServiceName := "test-headless-service-" + randSeq(5)
testHeadlessSecretName := "test-headless-secret-" + randSeq(5)
err = createServingCertAnnotatedService(client, testSecretName, testServiceName, ns.Name, false)
if err != nil {
t.Fatalf("error creating annotated service: %v", err)
}
if err = createServingCertAnnotatedService(client, testHeadlessSecretName, testHeadlessServiceName, ns.Name, true); err != nil {
t.Fatalf("error creating annotated headless service: %v", err)
}
// Prompt the injection of the ca bundle into a configmap
testConfigMapName := "test-configmap-" + randSeq(5)
err = createAnnotatedCABundleInjectionConfigMap(client, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error creating annotated configmap: %v", err)
}
// Retrieve the pre-rotation service cert
oldCertPEM, oldKeyPEM, err := pollForUpdatedServingCert(t, client, ns.Name, testSecretName, rotationPollTimeout, nil, nil)
if err != nil {
t.Fatalf("error retrieving service cert: %v", err)
}
oldHeadlessCertPEM, oldHeadlessKeyPEM, err := pollForUpdatedServingCert(t, client, ns.Name, testHeadlessSecretName, rotationPollTimeout, nil, nil)
if err != nil {
t.Fatalf("error retrieving headless service cert: %v", err)
}
// Retrieve the pre-rotation ca bundle
oldBundlePEM, err := pollForInjectedCABundle(t, client, ns.Name, testConfigMapName, rotationPollTimeout, nil)
if err != nil {
t.Fatalf("error retrieving ca bundle: %v", err)
}
// Prompt CA rotation
triggerRotation(t, client, config)
// Retrieve the post-rotation service cert
newCertPEM, newKeyPEM, err := pollForUpdatedServingCert(t, client, ns.Name, testSecretName, rotationTimeout, oldCertPEM, oldKeyPEM)
if err != nil {
t.Fatalf("error retrieving service cert: %v", err)
}
newHeadlessCertPEM, newHeadlessKeyPEM, err := pollForUpdatedServingCert(t, client, ns.Name, testHeadlessSecretName, rotationTimeout, oldHeadlessCertPEM, oldHeadlessKeyPEM)
if err != nil {
t.Fatalf("error retrieving headless service cert: %v", err)
}
// Retrieve the post-rotation ca bundle
newBundlePEM, err := pollForInjectedCABundle(t, client, ns.Name, testConfigMapName, rotationTimeout, oldBundlePEM)
if err != nil {
t.Fatalf("error retrieving ca bundle: %v", err)
}
// Determine the dns name valid for the serving cert
certs, err := util.PemToCerts(newCertPEM)
if err != nil {
t.Fatalf("error decoding pem to certs: %v", err)
}
dnsName := certs[0].Subject.CommonName
util.CheckRotation(t, dnsName, oldCertPEM, oldKeyPEM, oldBundlePEM, newCertPEM, newKeyPEM, newBundlePEM)
for i := 0; i < 3; i++ { // 3 is an arbitrary number of hostnames to try
dnsName := fmt.Sprintf("some-statefulset-%d.%s.%s.svc", i, testHeadlessServiceName, ns.Name)
util.CheckRotation(t, dnsName, oldHeadlessCertPEM, oldHeadlessKeyPEM, oldBundlePEM, newHeadlessCertPEM, newHeadlessKeyPEM, newBundlePEM)
}
}
// triggerTimeBasedRotation replaces the current CA cert with one that
// is not valid for the minimum required duration and waits for the CA
// to be rotated.
func triggerTimeBasedRotation(t *testing.T, client *kubernetes.Clientset, config *rest.Config) {
// A rotation-prompting CA cert needs to be a renewed instance
// (i.e. share the same public and private keys) of the current
// cert to ensure that trust will be maintained for unrefreshed
// clients and servers.
// Retrieve current CA
secret, err := client.CoreV1().Secrets(serviceCAControllerNamespace).Get(context.TODO(), signingKeySecretName, metav1.GetOptions{})
if err != nil {
t.Fatalf("error retrieving signing key secret: %v", err)
}
currentCACerts, err := util.PemToCerts(secret.Data[v1.TLSCertKey])
if err != nil {
t.Fatalf("error unmarshaling %q: %v", v1.TLSCertKey, err)
}
currentCAKey, err := util.PemToKey(secret.Data[v1.TLSPrivateKeyKey])
if err != nil {
t.Fatalf("error unmarshalling %q: %v", v1.TLSPrivateKeyKey, err)
}
currentCAConfig := &crypto.TLSCertificateConfig{
Certs: currentCACerts,
Key: currentCAKey,
}
// Trigger rotation by renewing the current ca with an expiry that
// is sooner than the minimum required duration.
renewedCAConfig, err := operator.RenewSelfSignedCertificate(currentCAConfig, 1*time.Hour, true)
if err != nil {
t.Fatalf("error renewing ca to half-expired form: %v", err)
}
renewedCACertPEM, renewedCAKeyPEM, err := renewedCAConfig.GetPEMBytes()
if err != nil {
t.Fatalf("error encoding renewed ca to pem: %v", err)
}
// Write the renewed CA
secret = &v1.Secret{
Type: v1.SecretTypeTLS,
ObjectMeta: metav1.ObjectMeta{
Name: signingKeySecretName,
Namespace: serviceCAControllerNamespace,
},
Data: map[string][]byte{
v1.TLSCertKey: renewedCACertPEM,
v1.TLSPrivateKeyKey: renewedCAKeyPEM,
},
}
_, _, err = resourceapply.ApplySecret(context.Background(), client.CoreV1(), events.NewInMemoryRecorder("test"), secret)
if err != nil {
t.Fatalf("error updating secret with test CA: %v", err)
}
_ = pollForCARotation(t, client, renewedCACertPEM, renewedCAKeyPEM)
}
// triggerForcedRotation forces the rotation of the current CA via the
// operator config.
func triggerForcedRotation(t *testing.T, client *kubernetes.Clientset, config *rest.Config) {
// Retrieve the cert and key PEM of the current CA to be able to
// detect when rotation has completed.
secret, err := client.CoreV1().Secrets(serviceCAControllerNamespace).Get(context.TODO(), signingKeySecretName, metav1.GetOptions{})
if err != nil {
t.Fatalf("error retrieving signing key secret: %v", err)
}
caCertPEM := secret.Data[v1.TLSCertKey]
caKeyPEM := secret.Data[v1.TLSPrivateKeyKey]
// Set a custom validity duration longer than the default to
// validate that a custom expiry on rotation is possible.
defaultDuration := operator.SigningCertificateLifetimeInDays * time.Hour * 24
customDuration := defaultDuration + 1*time.Hour
// Trigger a forced rotation by updating the operator config
// with a reason.
forceUnsupportedServiceCAConfigRotation(t, config, secret, customDuration)
signingSecret := pollForCARotation(t, client, caCertPEM, caKeyPEM)
// Check that the expiry of the new CA is longer than the default
rawCert := signingSecret.Data[v1.TLSCertKey]
certs, err := cert.ParseCertsPEM(rawCert)
if err != nil {
t.Fatalf("Failed to parse signing secret cert: %v", err)
}
if !certs[0].NotAfter.After(time.Now().Add(defaultDuration)) {
t.Fatalf("Custom validity duration was not used to generate the new CA")
}
}
func forceUnsupportedServiceCAConfigRotation(t *testing.T, config *rest.Config, currentSigningKeySecret *v1.Secret, validityDuration time.Duration) {
operatorClient, err := operatorv1client.NewForConfig(config)
if err != nil {
t.Fatalf("error creating operator client: %v", err)
}
operatorConfig, err := operatorClient.OperatorV1().ServiceCAs().Get(context.TODO(), api.OperatorConfigInstanceName, metav1.GetOptions{})
if err != nil {
t.Fatalf("error retrieving operator config: %v", err)
}
var forceRotationReason string
for i := 0; ; i++ {
forceRotationReason = fmt.Sprintf("service-ca-e2e-force-rotation-reason-%d", i)
if currentSigningKeySecret.Annotations[api.ForcedRotationReasonAnnotationName] != forceRotationReason {
break
}
}
rawUnsupportedServiceCAConfig, err := operator.RawUnsupportedServiceCAConfig(forceRotationReason, validityDuration)
if err != nil {
t.Fatalf("failed to create raw unsupported config overrides: %v", err)
}
operatorConfig.Spec.UnsupportedConfigOverrides.Raw = rawUnsupportedServiceCAConfig
_, err = operatorClient.OperatorV1().ServiceCAs().Update(context.TODO(), operatorConfig, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("error updating operator config: %v", err)
}
}
// pollForCARotation polls for the signing secret to be changed in
// response to CA rotation.
func pollForCARotation(t *testing.T, client *kubernetes.Clientset, caCertPEM, caKeyPEM []byte) *v1.Secret {
secret, err := pollForUpdatedSecret(t, client, serviceCAControllerNamespace, signingKeySecretName, rotationPollTimeout, map[string][]byte{
v1.TLSCertKey: caCertPEM,
v1.TLSPrivateKeyKey: caKeyPEM,
api.BundleDataKey: nil,
api.IntermediateDataKey: nil,
})
if err != nil {
t.Fatalf("error waiting for CA rotation: %v", err)
}
return secret
}
// pollForCARecreation polls for the signing secret to be re-created in
// response to CA secret deletion.
func pollForCARecreation(client *kubernetes.Clientset) error {
return wait.PollImmediate(time.Second, rotationPollTimeout, func() (bool, error) {
_, err := client.CoreV1().Secrets(serviceCAControllerNamespace).Get(context.TODO(), signingKeySecretName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
})
}
// pollForUpdatedServingCert returns the cert and key PEM if it changes from
// that provided before the polling timeout.
func pollForUpdatedServingCert(t *testing.T, client *kubernetes.Clientset, namespace, name string, timeout time.Duration, oldCertValue, oldKeyValue []byte) ([]byte, []byte, error) {
secret, err := pollForUpdatedSecret(t, client, namespace, name, timeout, map[string][]byte{
v1.TLSCertKey: oldCertValue,
v1.TLSPrivateKeyKey: oldKeyValue,
})
if err != nil {
return nil, nil, err
}
return secret.Data[v1.TLSCertKey], secret.Data[v1.TLSPrivateKeyKey], nil
}
// pollForUpdatedSecret returns the given secret if its data changes from
// that provided before the polling timeout.
func pollForUpdatedSecret(t *testing.T, client *kubernetes.Clientset, namespace, name string, timeout time.Duration, oldData map[string][]byte) (*v1.Secret, error) {
resourceID := fmt.Sprintf("Secret \"%s/%s\"", namespace, name)
obj, err := pollForResource(t, resourceID, timeout, func() (kruntime.Object, error) {
secret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
err = util.CheckData(oldData, secret.Data)
if err != nil {
return nil, err
}
return secret, nil
})
if err != nil {
return nil, err
}
return obj.(*v1.Secret), nil
}
// pollForInjectedCABundle returns the bytes for the injection key in
// the targeted configmap if the value of the key changes from that
// provided before the polling timeout.
func pollForInjectedCABundle(t *testing.T, client *kubernetes.Clientset, namespace, name string, timeout time.Duration, oldValue []byte) ([]byte, error) {
return pollForUpdatedConfigMap(t, client, namespace, name, api.InjectionDataKey, timeout, oldValue)
}
// pollForSigningCABundle returns the bytes for the bundle key of the
// signing ca bundle configmap if the value is non-empty before the
// polling timeout.
func pollForSigningCABundle(t *testing.T, client *kubernetes.Clientset) ([]byte, error) {
return pollForUpdatedConfigMap(t, client, serviceCAControllerNamespace, api.SigningCABundleConfigMapName, api.BundleDataKey, pollTimeout, nil)
}
// pollForUpdatedConfigMap returns the given configmap if its data changes from
// that provided before the polling timeout.
func pollForUpdatedConfigMap(t *testing.T, client *kubernetes.Clientset, namespace, name, key string, timeout time.Duration, oldValue []byte) ([]byte, error) {
resourceID := fmt.Sprintf("ConfigMap \"%s/%s\"", namespace, name)
expectedDataSize := 1
obj, err := pollForResource(t, resourceID, timeout, func() (kruntime.Object, error) {
configMap, err := client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if len(configMap.Data) != expectedDataSize {
return nil, fmt.Errorf("expected data size %d, got %d", expectedDataSize, len(configMap.Data))
}
value, ok := configMap.Data[key]
if !ok {
return nil, fmt.Errorf("key %q is missing", key)
}
if value == string(oldValue) {
return nil, fmt.Errorf("value for key %q has not changed", key)
}
return configMap, nil
})
if err != nil {
return nil, err
}
configMap := obj.(*v1.ConfigMap)
return []byte(configMap.Data[key]), nil
}
// pollForAPIService returns the specified APIService if its ca bundle
// matches the provided value before the polling timeout.
func pollForAPIService(t *testing.T, client apiserviceclientv1.APIServiceInterface, name string, expectedCABundle []byte) (*apiregv1.APIService, error) {
resourceID := fmt.Sprintf("APIService %q", name)
obj, err := pollForResource(t, resourceID, pollTimeout, func() (kruntime.Object, error) {
apiService, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
actualCABundle := apiService.Spec.CABundle
if len(actualCABundle) == 0 {
return nil, fmt.Errorf("ca bundle not injected")
}
if !bytes.Equal(actualCABundle, expectedCABundle) {
return nil, fmt.Errorf("ca bundle does match the expected value")
}
return apiService, nil
})
if err != nil {
return nil, err
}
return obj.(*apiregv1.APIService), nil
}
// pollForCRD returns the specified CustomResourceDefinition if the ca
// bundle for its conversion webhook config matches the provided value
// before the polling timeout.
func pollForCRD(t *testing.T, client apiextclient.CustomResourceDefinitionInterface, name string, expectedCABundle []byte) (*apiext.CustomResourceDefinition, error) {
resourceID := fmt.Sprintf("CustomResourceDefinition %q", name)
obj, err := pollForResource(t, resourceID, pollTimeout, func() (kruntime.Object, error) {
crd, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if crd.Spec.Conversion == nil || crd.Spec.Conversion.Webhook == nil || crd.Spec.Conversion.Webhook.ClientConfig == nil {
return nil, fmt.Errorf("spec.conversion.webhook.webhook.clientConfig not set")
}
actualCABundle := crd.Spec.Conversion.Webhook.ClientConfig.CABundle
if len(actualCABundle) == 0 {
return nil, fmt.Errorf("ca bundle not injected")
}
if !bytes.Equal(actualCABundle, expectedCABundle) {
return nil, fmt.Errorf("ca bundle does match the expected value")
}
return crd, nil
})
if err != nil {
return nil, err
}
return obj.(*apiext.CustomResourceDefinition), nil
}
// pollForMutatingWebhookConfiguration returns the specified
// MutatingWebhookConfiguration if the ca bundle for all its webhooks match the
// provided value before the polling timeout.
func pollForMutatingWebhookConfiguration(t *testing.T, client admissionregclient.MutatingWebhookConfigurationInterface, name string, expectedCABundle []byte) (*admissionreg.MutatingWebhookConfiguration, error) {
resourceID := fmt.Sprintf("MutatingWebhookConfiguration %q", name)
obj, err := pollForResource(t, resourceID, pollTimeout, func() (kruntime.Object, error) {
webhookConfig, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
for _, webhook := range webhookConfig.Webhooks {
err := checkWebhookCABundle(webhook.Name, expectedCABundle, webhook.ClientConfig.CABundle)
if err != nil {
return nil, err
}
}
return webhookConfig, nil
})
if err != nil {
return nil, err
}
return obj.(*admissionreg.MutatingWebhookConfiguration), nil
}
// pollForValidatingWebhookConfiguration returns the specified
// ValidatingWebhookConfiguration if the ca bundle for all its webhooks match the
// provided value before the polling timeout.
func pollForValidatingWebhookConfiguration(t *testing.T, client admissionregclient.ValidatingWebhookConfigurationInterface, name string, expectedCABundle []byte) (*admissionreg.ValidatingWebhookConfiguration, error) {
resourceID := fmt.Sprintf("ValidatingWebhookConfiguration %q", name)
obj, err := pollForResource(t, resourceID, pollTimeout, func() (kruntime.Object, error) {
webhookConfig, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
for _, webhook := range webhookConfig.Webhooks {
err := checkWebhookCABundle(webhook.Name, expectedCABundle, webhook.ClientConfig.CABundle)
if err != nil {
return nil, err
}
}
return webhookConfig, nil
})
if err != nil {
return nil, err
}
return obj.(*admissionreg.ValidatingWebhookConfiguration), nil
}
// checkWebhookCABundle checks that the ca bundle for the named webhook matches
// the expected value.
func checkWebhookCABundle(webhookName string, expectedCABundle, actualCABundle []byte) error {
if len(actualCABundle) == 0 {
return fmt.Errorf("ca bundle not injected for webhook %q", webhookName)
}
if !bytes.Equal(actualCABundle, expectedCABundle) {
return fmt.Errorf("ca bundle does match the expected value for webhook %q", webhookName)
}
return nil
}
// setInjectionAnnotation sets the annotation that will trigger the
// injection of a ca bundle.
func setInjectionAnnotation(objMeta *metav1.ObjectMeta) {
if objMeta.Annotations == nil {
objMeta.Annotations = map[string]string{}
}
objMeta.Annotations[api.InjectCABundleAnnotationName] = "true"
}
// pollForResource returns a kruntime.Object if the accessor returns without error before the timeout.
func pollForResource(t *testing.T, resourceID string, timeout time.Duration, accessor func() (kruntime.Object, error)) (kruntime.Object, error) {
var obj kruntime.Object
err := wait.PollImmediate(pollInterval, timeout, func() (bool, error) {
o, err := accessor()
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
t.Logf("an error occurred while polling for %s: %v", resourceID, err)
return false, nil
}
obj = o
return true, nil
})
return obj, err
}
func checkClientPodRcvdUpdatedServerCert(t *testing.T, client *kubernetes.Clientset, testNS, host string, port int32, updatedServerCert string) {
timeout := 5 * time.Minute
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
podName := "client-pod-" + randSeq(5)
_, err := client.CoreV1().Pods(testNS).Create(context.TODO(), &v1.Pod{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: testNS,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "cert-checker",
Image: "nicolaka/netshoot:latest",
Command: []string{"/bin/bash"},
Args: []string{"-c", fmt.Sprintf("openssl s_client -no-CApath -no-CAfile -CAfile /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt "+
"-verify_return_error -verify_hostname %s -showcerts -connect %s:%d < /dev/null 2>/dev/null | openssl x509", host, host, port)},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}, metav1.CreateOptions{})
if err != nil {
t.Logf("creating client pod failed: %v", err)
return false, nil
}
defer deletePod(t, client, podName, testNS)
err = waitForPodPhase(t, client, podName, testNS, v1.PodSucceeded)
if err != nil {
t.Logf("wait on pod to complete failed: %v", err)
return false, nil
}
serverCertClientReceived, err := getPodLogs(t, client, podName, testNS)
if err != nil {
t.Logf("fetching pod logs failed: %v", err)
return false, nil
}
return strings.Contains(updatedServerCert, serverCertClientReceived), nil
})
if err != nil {
t.Fatalf("failed to verify updated certs within timeout(%v)", timeout)
}
}
func waitForPodPhase(t *testing.T, client *kubernetes.Clientset, name, namespace string, phase v1.PodPhase) error {
return wait.PollImmediate(10*time.Second, time.Minute, func() (bool, error) {
pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
t.Logf("fetching test pod from apiserver failed: %v", err)
return false, nil
}
return pod.Status.Phase == phase, nil
})
}
func getPodLogs(t *testing.T, client *kubernetes.Clientset, name, namespace string) (string, error) {
rc, err := client.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{}).Stream(context.TODO())
if err != nil {
return "", err
}
defer rc.Close()
buf := new(bytes.Buffer)
_, err = buf.ReadFrom(rc)
if err != nil {
return "", err
}
return buf.String(), nil
}
func deletePod(t *testing.T, client *kubernetes.Clientset, name, namespace string) {
err := client.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if errors.IsNotFound(err) {
return
}
if err != nil {
t.Errorf("failed to delete pod: %v", err)
}
}
func pollForRunningStatefulSet(t *testing.T, client *kubernetes.Clientset, statefulSetName, namespace string, timeout time.Duration) error {
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
set, err := client.AppsV1().StatefulSets(namespace).Get(context.TODO(), statefulSetName, metav1.GetOptions{})
if err != nil {
t.Logf("fetching StatefulSet failed: %v", err)
return false, err
}
res := set.Status.ObservedGeneration == set.Generation &&
set.Status.ReadyReplicas == *set.Spec.Replicas
return res, nil
})
if err != nil {
t.Logf("error waiting for StatefulSet restart: %v", err)
}
return err
}
// newPrometheusClientForConfig returns a new prometheus client for
// the provided kubeconfig.
func newPrometheusClientForConfig(config *rest.Config) (prometheusv1.API, error) {
routeClient, err := routeclient.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error creating route client: %v", err)
}
kubeClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error creating kube client: %v", err)
}
return metrics.NewPrometheusClient(context.TODO(), kubeClient, routeClient)
}
// checkMetricsCollection tests whether metrics are being successfully scraped from at
// least one target in a namespace.
func checkMetricsCollection(t *testing.T, promClient prometheusv1.API, namespace string) {
// Metrics are scraped every 30s. Wait as long as 2 intervals to avoid failing if
// the target is temporarily unhealthy.
timeout := 60 * time.Second
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
query := fmt.Sprintf("up{namespace=\"%s\"}", namespace)
resultVector, err := runPromQueryForVector(t, promClient, query, time.Now())
if err != nil {
t.Errorf("failed to execute prometheus query: %v", err)
return false, nil
}
metricsCollected := false
for _, sample := range resultVector {
metricsCollected = sample.Value == 1
if metricsCollected {
// Metrics are successfully being scraped for at least one target in the namespace
break
}
}
return metricsCollected, nil
})
if err != nil {
t.Fatalf("Health check of metrics collection in namespace %s did not succeed within %v", serviceCAOperatorNamespace, timeout)
}
}
func runPromQueryForVector(t *testing.T, promClient prometheusv1.API, query string, sampleTime time.Time) (model.Vector, error) {
results, warnings, err := promClient.Query(context.Background(), query, sampleTime)
if err != nil {
return nil, err
}
if len(warnings) > 0 {
t.Logf("prometheus query emitted warnings: %v", warnings)
}
result, ok := results.(model.Vector)
if !ok {
return nil, fmt.Errorf("expecting vector type result, found: %v ", reflect.TypeOf(results))
}
return result, nil
}
func getSampleForPromQuery(t *testing.T, promClient prometheusv1.API, query string, sampleTime time.Time) (*model.Sample, error) {
res, err := runPromQueryForVector(t, promClient, query, sampleTime)
if err != nil {
return nil, err
}
if len(res) == 0 {
return nil, fmt.Errorf("no matching metrics found for query %s", query)
}
return res[0], nil
}
func checkServiceCAMetrics(t *testing.T, client *kubernetes.Clientset, promClient prometheusv1.API) {
timeout := 60 * time.Second
secret, err := client.CoreV1().Secrets(serviceCAControllerNamespace).Get(context.TODO(), signingKeySecretName, metav1.GetOptions{})
if err != nil {
t.Fatalf("error retrieving signing key secret: %v", err)
}
currentCACerts, err := util.PemToCerts(secret.Data[v1.TLSCertKey])
if err != nil {
t.Fatalf("error unmarshaling %q: %v", v1.TLSCertKey, err)
}
if len(currentCACerts) == 0 {
t.Fatalf("no signing keys found")
}
want := currentCACerts[0].NotAfter
err = wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
rawExpiryTime, err := getSampleForPromQuery(t, promClient, `service_ca_expiry_time_seconds`, time.Now())
if err != nil {
t.Logf("failed to get sample value: %v", err)
return false, nil
}
if rawExpiryTime.Value == 0 { // The operator is starting
t.Logf("got zero value")
return false, nil
}
if float64(want.Unix()) != float64(rawExpiryTime.Value) {
t.Fatalf("service ca expiry time mismatch expected %v observed %v", float64(want.Unix()), float64(rawExpiryTime.Value))
}
return true, nil
})
if err != nil {
t.Fatalf("service ca expiry timer metrics collection failed: %v", err)
}
}
func TestE2E(t *testing.T) {
// use /tmp/admin.conf (placed by ci-operator) or KUBECONFIG env
confPath := "/tmp/admin.conf"
if conf := os.Getenv("KUBECONFIG"); conf != "" {
confPath = conf
}
// load client
client, err := clientcmd.LoadFromFile(confPath)
if err != nil {
t.Fatalf("error loading config: %v", err)
}
adminConfig, err := clientcmd.NewDefaultClientConfig(*client, &clientcmd.ConfigOverrides{}).ClientConfig()
if err != nil {
t.Fatalf("error loading admin config: %v", err)
}
adminClient, err := kubernetes.NewForConfig(adminConfig)
if err != nil {
t.Fatalf("error getting admin client: %v", err)
}
// the service-serving-cert-operator and controllers should be running as a stock OpenShift component. our first test is to
// verify that all of the components are running.
checkComponents(t, adminClient)
// test the main feature. annotate service -> created secret
t.Run("serving-cert-annotation", func(t *testing.T) {
for _, headless := range []bool{false, true} {
t.Run(fmt.Sprintf("headless=%v", headless), func(t *testing.T) {
ns, cleanup, err := createTestNamespace(t, adminClient, "test-"+randSeq(5))
if err != nil {
t.Fatalf("could not create test namespace: %v", err)
}
defer cleanup()
testServiceName := "test-service-" + randSeq(5)
testSecretName := "test-secret-" + randSeq(5)
err = createServingCertAnnotatedService(adminClient, testSecretName, testServiceName, ns.Name, headless)
if err != nil {
t.Fatalf("error creating annotated service: %v", err)
}
err = pollForServiceServingSecret(adminClient, testSecretName, ns.Name)
if err != nil {
t.Fatalf("error fetching created serving cert secret: %v", err)
}
_, is509, err := checkServiceServingCertSecretData(adminClient, testSecretName, ns.Name)
if err != nil {
t.Fatalf("error when checking serving cert secret: %v", err)
}
if !is509 {
t.Fatalf("TLSCertKey not valid pem bytes")
}
})
}
})
// test modified data in serving-cert-secret will regenerated
t.Run("serving-cert-secret-modify-bad-tlsCert", func(t *testing.T) {
for _, headless := range []bool{false, true} {
t.Run(fmt.Sprintf("headless=%v", headless), func(t *testing.T) {
ns, cleanup, err := createTestNamespace(t, adminClient, "test-"+randSeq(5))
if err != nil {
t.Fatalf("could not create test namespace: %v", err)
}
defer cleanup()
testServiceName := "test-service-" + randSeq(5)
testSecretName := "test-secret-" + randSeq(5)
err = createServingCertAnnotatedService(adminClient, testSecretName, testServiceName, ns.Name, headless)
if err != nil {
t.Fatalf("error creating annotated service: %v", err)
}
err = pollForServiceServingSecret(adminClient, testSecretName, ns.Name)
if err != nil {
t.Fatalf("error fetching created serving cert secret: %v", err)
}
originalBytes, _, err := checkServiceServingCertSecretData(adminClient, testSecretName, ns.Name)
if err != nil {
t.Fatalf("error when checking serving cert secret: %v", err)
}
err = editServingSecretData(adminClient, testSecretName, ns.Name, v1.TLSCertKey)
if err != nil {
t.Fatalf("error editing serving cert secret: %v", err)
}
updatedBytes, is509, err := checkServiceServingCertSecretData(adminClient, testSecretName, ns.Name)
if err != nil {
t.Fatalf("error when checking serving cert secret: %v", err)
}
if bytes.Equal(originalBytes, updatedBytes) {
t.Fatalf("expected TLSCertKey to be replaced with valid pem bytes")
}
if !is509 {
t.Fatalf("TLSCertKey not valid pem bytes")
}
})
}
})
// test extra data in serving-cert-secret will be removed
t.Run("serving-cert-secret-add-data", func(t *testing.T) {
for _, headless := range []bool{false, true} {
t.Run(fmt.Sprintf("headless=%v", headless), func(t *testing.T) {
ns, cleanup, err := createTestNamespace(t, adminClient, "test-"+randSeq(5))
if err != nil {
t.Fatalf("could not create test namespace: %v", err)
}
defer cleanup()
testServiceName := "test-service-" + randSeq(5)
testSecretName := "test-secret-" + randSeq(5)
err = createServingCertAnnotatedService(adminClient, testSecretName, testServiceName, ns.Name, headless)
if err != nil {
t.Fatalf("error creating annotated service: %v", err)
}
err = pollForServiceServingSecret(adminClient, testSecretName, ns.Name)
if err != nil {
t.Fatalf("error fetching created serving cert secret: %v", err)
}
originalBytes, _, err := checkServiceServingCertSecretData(adminClient, testSecretName, ns.Name)
if err != nil {
t.Fatalf("error when checking serving cert secret: %v", err)
}
err = editServingSecretData(adminClient, testSecretName, ns.Name, "foo")
if err != nil {
t.Fatalf("error editing serving cert secret: %v", err)
}
updatedBytes, _, err := checkServiceServingCertSecretData(adminClient, testSecretName, ns.Name)
if err != nil {
t.Fatalf("error when checking serving cert secret: %v", err)
}
if !bytes.Equal(originalBytes, updatedBytes) {
t.Fatalf("did not expect TLSCertKey to be replaced with a new cert")
}
})
}
})
// make sure that deleting service-cert-secret regenerates a secret again,
// and that the secret allows successful connections in practice.
t.Run("serving-cert-secret-delete-data", func(t *testing.T) {
serviceName := "metrics"
operatorNamespace := "openshift-service-ca-operator"
ns, cleanup, err := createTestNamespace(t, adminClient, "test-"+randSeq(5))
if err != nil {
t.Fatalf("could not create test namespace: %v", err)
}
defer cleanup()
service, err := adminClient.CoreV1().Services(operatorNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
if err != nil {
t.Fatalf("fetching service from apiserver failed: %v", err)
}
secretName, ok := service.ObjectMeta.Annotations[api.ServingCertSecretAnnotation]
if !ok {
t.Fatalf("secret name not found in service annotations")
}
err = adminClient.CoreV1().Secrets(operatorNamespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{})
if err != nil {
t.Fatalf("deleting secret %s in namespace %s failed: %v", secretName, operatorNamespace, err)
}
updatedBytes, _, err := pollForUpdatedServingCert(t, adminClient, operatorNamespace, secretName, rotationPollTimeout, nil, nil)
if err != nil {
t.Fatalf("error fetching re-created serving cert secret: %v", err)
}
metricsHost := fmt.Sprintf("%s.%s.svc", service.Name, service.Namespace)
checkClientPodRcvdUpdatedServerCert(t, adminClient, ns.Name, metricsHost, service.Spec.Ports[0].Port, string(updatedBytes))
})
// make sure that deleting aservice-cert-secret regenerates a secret again,
// and that the secret allows successful connections in practice.
t.Run("headless-stateful-serving-cert-secret-delete-data", func(t *testing.T) {
ns, cleanup, err := createTestNamespace(t, adminClient, "test-"+randSeq(5))
if err != nil {
t.Fatalf("could not create test namespace: %v", err)
}
defer cleanup()
testServiceName := "test-service-" + randSeq(5)
testStatefulSetName := "test-statefulset-" + randSeq(5)
testStatefulSetSize := 3
testSecretName := "test-secret-" + randSeq(5)
if err = createServingCertAnnotatedService(adminClient, testSecretName, testServiceName, ns.Name, true); err != nil {
t.Fatalf("error creating headless service: %v", err)
}
oldSecret, err := pollForServiceServingSecretWithReturn(adminClient, testSecretName, ns.Name)
if err != nil {
t.Fatalf("error fetching created serving cert secret: %v", err)
}
err = adminClient.CoreV1().Secrets(ns.Name).Delete(context.TODO(), testSecretName, metav1.DeleteOptions{})
if err != nil {
t.Fatalf("deleting secret %s in namespace %s failed: %v", testSecretName, ns.Name, err)
}
newCertPEM, _, err := pollForUpdatedServingCert(t, adminClient, ns.Name, testSecretName, rotationPollTimeout,
oldSecret.Data[v1.TLSCertKey], oldSecret.Data[v1.TLSPrivateKeyKey])
if err != nil {
t.Fatalf("error fetching re-created serving cert secret: %v", err)
}
if err := createStatefulSet(adminClient, testSecretName, testStatefulSetName, testServiceName, ns.Name, testStatefulSetSize); err != nil {
t.Fatalf("error creating annotated StatefulSet: %v", err)
}
if err := pollForRunningStatefulSet(t, adminClient, testStatefulSetName, ns.Name, 1*time.Minute); err != nil {
t.Fatalf("error starting StatefulSet: %v", err)
}
// Individual StatefulSet pods are reachable using the generated certificate
for i := 0; i < testStatefulSetSize; i++ {
host := fmt.Sprintf("%s-%d.%s.%s.svc", testStatefulSetName, i, testServiceName, ns.Name)
checkClientPodRcvdUpdatedServerCert(t, adminClient, ns.Name, host, 8443, string(newCertPEM))
}
// The (headless) service is reachable using the generated certificate
host := fmt.Sprintf("%s.%s.svc", testServiceName, ns.Name)
checkClientPodRcvdUpdatedServerCert(t, adminClient, ns.Name, host, 8443, string(newCertPEM))
})
// test ca bundle injection configmap
t.Run("ca-bundle-injection-configmap", func(t *testing.T) {
ns, cleanup, err := createTestNamespace(t, adminClient, "test-"+randSeq(5))
if err != nil {
t.Fatalf("could not create test namespace: %v", err)
}
defer cleanup()
testConfigMapName := "test-configmap-" + randSeq(5)
err = createAnnotatedCABundleInjectionConfigMap(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error creating annotated configmap: %v", err)
}
err = pollForCABundleInjectionConfigMap(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error fetching ca bundle injection configmap: %v", err)
}
err = checkConfigMapCABundleInjectionData(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error when checking ca bundle injection configmap: %v", err)
}
})
// test updated data in ca bundle injection configmap will be stomped on
t.Run("ca-bundle-injection-configmap-update", func(t *testing.T) {
ns, cleanup, err := createTestNamespace(t, adminClient, "test-"+randSeq(5))
if err != nil {
t.Fatalf("could not create test namespace: %v", err)
}
defer cleanup()
testConfigMapName := "test-configmap-" + randSeq(5)
err = createAnnotatedCABundleInjectionConfigMap(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error creating annotated configmap: %v", err)
}
err = pollForCABundleInjectionConfigMap(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error fetching ca bundle injection configmap: %v", err)
}
err = checkConfigMapCABundleInjectionData(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error when checking ca bundle injection configmap: %v", err)
}
err = editConfigMapCABundleInjectionData(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error editing ca bundle injection configmap: %v", err)
}
err = checkConfigMapCABundleInjectionData(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error when checking ca bundle injection configmap: %v", err)
}
})
// test vulnerable-legacy ca bundle injection configmap
t.Run("vulnerable-legacy-ca-bundle-injection-configmap", func(t *testing.T) {
ns, cleanup, err := createTestNamespace(t, adminClient, "test-"+randSeq(5))
if err != nil {
t.Fatalf("could not create test namespace: %v", err)
}
defer cleanup()
// names other than the one we need are never published to
neverPublished := &v1.ConfigMap{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "test-configmap-" + randSeq(5),
Annotations: map[string]string{api.VulnerableLegacyInjectCABundleAnnotationName: "true"},
},
}
_, err = adminClient.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), neverPublished, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// with this name, content should never be published. We wait ten seconds
err = pollForConfigMapCAInjection(adminClient, neverPublished.Name, ns.Name)
if err != wait.ErrWaitTimeout {
t.Fatal(err)
}
publishedConfigMap := &v1.ConfigMap{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "openshift-service-ca.crt",
Annotations: map[string]string{api.VulnerableLegacyInjectCABundleAnnotationName: "true"},
},
}
publishedConfigMap, err = adminClient.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), publishedConfigMap, metav1.CreateOptions{})
// tolerate "already exists" to handle the case where we're running the e2e on a cluster that already has this
// configmap present and injected.
if err != nil && !errors.IsAlreadyExists(err) {
t.Fatal(err)
}
publishedConfigMap, err = adminClient.CoreV1().ConfigMaps(ns.Name).Get(context.TODO(), "openshift-service-ca.crt", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
// this one should be injected
err = pollForConfigMapCAInjection(adminClient, publishedConfigMap.Name, ns.Name)
if err != nil {
t.Fatal(err)
}
originalContent := publishedConfigMap.Data[api.InjectionDataKey]
_, hasNewStyleAnnotation := publishedConfigMap.Annotations[api.InjectCABundleAnnotationName]
if hasNewStyleAnnotation {
// add old injection to be sure only new is honored
publishedConfigMap.Annotations[api.VulnerableLegacyInjectCABundleAnnotationName] = "true"
publishedConfigMap, err = adminClient.CoreV1().ConfigMaps(ns.Name).Update(context.TODO(), publishedConfigMap, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
} else {
// hand-off to new injector
publishedConfigMap.Annotations[api.InjectCABundleAnnotationName] = "true"
publishedConfigMap, err = adminClient.CoreV1().ConfigMaps(ns.Name).Update(context.TODO(), publishedConfigMap, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
}
// the content should now change pretty quick. We sleep because it's easier than writing a new poll and I'm pressed for time
time.Sleep(5 * time.Second)
publishedConfigMap, err = adminClient.CoreV1().ConfigMaps(ns.Name).Get(context.TODO(), publishedConfigMap.Name, metav1.GetOptions{})
// if we changed the injection, we should see different content
if hasNewStyleAnnotation {
if publishedConfigMap.Data[api.InjectionDataKey] != originalContent {
t.Fatal("Content switch and it should not have. The better ca bundle should win.")
}
} else {
if publishedConfigMap.Data[api.InjectionDataKey] == originalContent {
t.Fatal("Content did not update like it was supposed to. The better ca bundle should win.")
}
}
})
t.Run("metrics", func(t *testing.T) {
promClient, err := newPrometheusClientForConfig(adminConfig)
if err != nil {
t.Fatalf("error initializing prometheus client: %v", err)
}
// Test that the operator's metrics endpoint is being read by prometheus
t.Run("collection", func(t *testing.T) {
checkMetricsCollection(t, promClient, "openshift-service-ca-operator")
})
// Test that service CA metrics are collected
t.Run("service-ca-metrics", func(t *testing.T) {
checkServiceCAMetrics(t, adminClient, promClient)
})
})
t.Run("refresh-CA", func(t *testing.T) {
ns, cleanup, err := createTestNamespace(t, adminClient, "test-"+randSeq(5))
if err != nil {
t.Fatalf("could not create test namespace: %v", err)
}
defer cleanup()
// create secrets
testServiceName := "test-service-" + randSeq(5)
testSecretName := "test-secret-" + randSeq(5)
testHeadlessServiceName := "test-headless-service-" + randSeq(5)
testHeadlessSecretName := "test-headless-secret-" + randSeq(5)
err = createServingCertAnnotatedService(adminClient, testSecretName, testServiceName, ns.Name, false)
if err != nil {
t.Fatalf("error creating annotated service: %v", err)
}
if err = createServingCertAnnotatedService(adminClient, testHeadlessSecretName, testHeadlessServiceName, ns.Name, true); err != nil {
t.Fatalf("error creating annotated headless service: %v", err)
}
secret, err := pollForServiceServingSecretWithReturn(adminClient, testSecretName, ns.Name)
if err != nil {
t.Fatalf("error fetching created serving cert secret: %v", err)
}
secretCopy := secret.DeepCopy()
headlessSecret, err := pollForServiceServingSecretWithReturn(adminClient, testHeadlessSecretName, ns.Name)
if err != nil {
t.Fatalf("error fetching created serving cert secret: %v", err)
}
headlessSecretCopy := headlessSecret.DeepCopy()
// create configmap
testConfigMapName := "test-configmap-" + randSeq(5)
err = createAnnotatedCABundleInjectionConfigMap(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error creating annotated configmap: %v", err)
}
configmap, err := pollForCABundleInjectionConfigMapWithReturn(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error fetching ca bundle injection configmap: %v", err)
}
configmapCopy := configmap.DeepCopy()
err = checkConfigMapCABundleInjectionData(adminClient, testConfigMapName, ns.Name)
if err != nil {
t.Fatalf("error when checking ca bundle injection configmap: %v", err)
}
// delete ca secret
err = adminClient.CoreV1().Secrets(serviceCAControllerNamespace).Delete(context.TODO(), signingKeySecretName, metav1.DeleteOptions{})
if err != nil {
t.Fatalf("error deleting signing key: %v", err)
}
// make sure it's recreated
err = pollForCARecreation(adminClient)
if err != nil {
t.Fatalf("signing key was not recreated: %v", err)
}
err = pollForConfigMapChange(adminClient, configmapCopy, api.InjectionDataKey)
if err != nil {
t.Fatalf("configmap bundle did not change: %v", err)
}
err = pollForSecretChange(adminClient, secretCopy, v1.TLSCertKey, v1.TLSPrivateKeyKey)
if err != nil {
t.Fatalf("secret cert did not change: %v", err)
}
if err := pollForSecretChange(adminClient, headlessSecretCopy); err != nil {
t.Fatalf("headless secret cert did not change: %v", err)
}
})
// This test triggers rotation by updating the CA to have an
// expiry that is less than the minimum required duration and then
// validates that both refreshed and unrefreshed clients and
// servers can continue to communicate in a trusted fashion.
t.Run("time-based-ca-rotation", func(t *testing.T) {
checkCARotation(t, adminClient, adminConfig, triggerTimeBasedRotation)
})
// This test triggers rotation by updating the operator
// configuration to force rotation and then validates that both
// refreshed and unrefreshed clients and servers can continue to
// communicate in a trusted fashion.
t.Run("forced-ca-rotation", func(t *testing.T) {
checkCARotation(t, adminClient, adminConfig, triggerForcedRotation)
})
t.Run("apiservice-ca-bundle-injection", func(t *testing.T) {
client := apiserviceclient.NewForConfigOrDie(adminConfig).ApiregistrationV1().APIServices()
// Create an api service with the injection annotation
randomGroup := fmt.Sprintf("e2e-%s", randSeq(10))
version := "v1alpha1"
obj := &apiregv1.APIService{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s.%s", version, randomGroup),
},
Spec: apiregv1.APIServiceSpec{
Group: randomGroup,
Version: version,
GroupPriorityMinimum: 1,
VersionPriority: 1,
// A service must be specified for validation to
// accept a cabundle.
Service: &apiregv1.ServiceReference{
Namespace: "foo",
Name: "foo",
},
},
}
setInjectionAnnotation(&obj.ObjectMeta)
createdObj, err := client.Create(context.TODO(), obj, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating api service: %v", err)
}
defer func() {
err := client.Delete(context.TODO(), obj.Name, metav1.DeleteOptions{})
if err != nil {
t.Errorf("Failed to cleanup api service: %v", err)
}
}()
// Retrieve the expected CA bundle
expectedCABundle, err := pollForSigningCABundle(t, adminClient)
if err != nil {
t.Fatalf("error retrieving the signing ca bundle: %v", err)
}
// Wait for the expected bundle to be injected
injectedObj, err := pollForAPIService(t, client, createdObj.Name, expectedCABundle)
if err != nil {
t.Fatalf("error waiting for ca bundle to be injected: %v", err)
}
// Set an invalid ca bundle
injectedObj.Spec.CABundle = append(injectedObj.Spec.CABundle, []byte("garbage")...)
_, err = client.Update(context.TODO(), injectedObj, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("error updated api service: %v", err)
}
// Check that the expected ca bundle is restored
_, err = pollForAPIService(t, client, createdObj.Name, expectedCABundle)
if err != nil {
t.Fatalf("error waiting for ca bundle to be re-injected: %v", err)
}
})
t.Run("crd-ca-bundle-injection", func(t *testing.T) {
client := apiextclient.NewForConfigOrDie(adminConfig).CustomResourceDefinitions()
// Create a crd with the injection annotation
randomGroup := fmt.Sprintf("e2e-%s.example.com", randSeq(10))
pluralName := "cabundleinjectiontargets"
version := "v1beta1"
obj := &apiext.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s.%s", pluralName, randomGroup),
},
Spec: apiext.CustomResourceDefinitionSpec{
Group: randomGroup,
Scope: apiext.ClusterScoped,
Names: apiext.CustomResourceDefinitionNames{
Plural: pluralName,
Kind: "CABundleInjectionTarget",
},
Conversion: &apiext.CustomResourceConversion{
// CA bundle will only be injected for a webhook converter
Strategy: apiext.WebhookConverter,
Webhook: &apiext.WebhookConversion{
// CA bundle will be set on the following struct
ClientConfig: &apiext.WebhookClientConfig{
Service: &apiext.ServiceReference{
Namespace: "foo",
Name: "foo",
},
},
ConversionReviewVersions: []string{
version,
},
},
},
// At least one version must be defined for a v1 crd to be valid
Versions: []apiext.CustomResourceDefinitionVersion{
{
Name: version,
Storage: true,
Schema: &apiext.CustomResourceValidation{
OpenAPIV3Schema: &apiext.JSONSchemaProps{
Type: "object",
},
},
},
},
},
}
setInjectionAnnotation(&obj.ObjectMeta)
createdObj, err := client.Create(context.TODO(), obj, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating crd: %v", err)
}
defer func() {
err := client.Delete(context.TODO(), obj.Name, metav1.DeleteOptions{})
if err != nil {
t.Errorf("Failed to cleanup crd: %v", err)
}
}()
// Retrieve the expected CA bundle
expectedCABundle, err := pollForSigningCABundle(t, adminClient)
if err != nil {
t.Fatalf("error retrieving the signing ca bundle: %v", err)
}
// Wait for the expected bundle to be injected
injectedObj, err := pollForCRD(t, client, createdObj.Name, expectedCABundle)
if err != nil {
t.Fatalf("error waiting for ca bundle to be injected: %v", err)
}
// Set an invalid ca bundle
whClientConfig := injectedObj.Spec.Conversion.Webhook.ClientConfig
whClientConfig.CABundle = append(whClientConfig.CABundle, []byte("garbage")...)
_, err = client.Update(context.TODO(), injectedObj, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("error updated crd: %v", err)
}
// Check that the expected ca bundle is restored
_, err = pollForCRD(t, client, createdObj.Name, expectedCABundle)
if err != nil {
t.Fatalf("error waiting for ca bundle to be re-injected: %v", err)
}
})
// Common webhook config
webhookClientConfig := admissionreg.WebhookClientConfig{
// A service must be specified for validation to
// accept a cabundle.
Service: &admissionreg.ServiceReference{
Namespace: "foo",
Name: "foo",
},
}
sideEffectNone := admissionreg.SideEffectClassNone
t.Run("mutatingwebhook-ca-bundle-injection", func(t *testing.T) {
client := adminClient.AdmissionregistrationV1().MutatingWebhookConfigurations()
obj := &admissionreg.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "e2e-",
},
Webhooks: []admissionreg.MutatingWebhook{
// Specify 2 webhooks to ensure more than 1 webhook will be updated
{
Name: "e2e-1.example.com",
ClientConfig: webhookClientConfig,
SideEffects: &sideEffectNone,
AdmissionReviewVersions: []string{"v1beta1"},
},
{
Name: "e2e-2.example.com",
ClientConfig: webhookClientConfig,
SideEffects: &sideEffectNone,
AdmissionReviewVersions: []string{"v1beta1"},
},
},
}
setInjectionAnnotation(&obj.ObjectMeta)
createdObj, err := client.Create(context.TODO(), obj, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating mutating webhook configuration: %v", err)
}
defer func() {
err := client.Delete(context.TODO(), createdObj.Name, metav1.DeleteOptions{})
if err != nil {
t.Errorf("Failed to cleanup mutating webhook configuration: %v", err)
}
}()
// Retrieve the expected CA bundle
expectedCABundle, err := pollForSigningCABundle(t, adminClient)
if err != nil {
t.Fatalf("error retrieving the expected ca bundle: %v", err)
}
// Poll for the updated webhook configuration
injectedObj, err := pollForMutatingWebhookConfiguration(t, client, createdObj.Name, expectedCABundle)
if err != nil {
t.Fatalf("error waiting for ca bundle to be injected: %v", err)
}
// Set an invalid ca bundle
clientConfig := injectedObj.Webhooks[0].ClientConfig
clientConfig.CABundle = append(clientConfig.CABundle, []byte("garbage")...)
_, err = client.Update(context.TODO(), injectedObj, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("error updated mutating webhook configuration: %v", err)
}
// Check that the ca bundle is restored
_, err = pollForMutatingWebhookConfiguration(t, client, createdObj.Name, expectedCABundle)
if err != nil {
t.Fatalf("error waiting for ca bundle to be re-injected: %v", err)
}
})
t.Run("validatingwebhook-ca-bundle-injection", func(t *testing.T) {
client := adminClient.AdmissionregistrationV1().ValidatingWebhookConfigurations()
obj := &admissionreg.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "e2e-",
},
Webhooks: []admissionreg.ValidatingWebhook{
// Specify 2 webhooks to ensure more than 1 webhook will be updated
{
Name: "e2e-1.example.com",
ClientConfig: webhookClientConfig,
SideEffects: &sideEffectNone,
AdmissionReviewVersions: []string{"v1beta1"},
},
{
Name: "e2e-2.example.com",
ClientConfig: webhookClientConfig,
SideEffects: &sideEffectNone,
AdmissionReviewVersions: []string{"v1beta1"},
},
},
}
setInjectionAnnotation(&obj.ObjectMeta)
createdObj, err := client.Create(context.TODO(), obj, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating validating webhook configuration: %v", err)
}
defer func() {
err := client.Delete(context.TODO(), createdObj.Name, metav1.DeleteOptions{})
if err != nil {
t.Errorf("Failed to cleanup validating webhook configuration: %v", err)
}
}()
// Retrieve the expected CA bundle
expectedCABundle, err := pollForSigningCABundle(t, adminClient)
if err != nil {
t.Fatalf("error retrieving the expected ca bundle: %v", err)
}
// Poll for the updated webhook configuration
injectedObj, err := pollForValidatingWebhookConfiguration(t, client, createdObj.Name, expectedCABundle)
if err != nil {
t.Fatalf("error waiting for ca bundle to be injected: %v", err)
}
// Set an invalid ca bundle
clientConfig := injectedObj.Webhooks[0].ClientConfig
clientConfig.CABundle = append(clientConfig.CABundle, []byte("garbage")...)
_, err = client.Update(context.TODO(), injectedObj, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("error updated validating webhook configuration: %v", err)
}
// Check that the ca bundle is restored
_, err = pollForValidatingWebhookConfiguration(t, client, createdObj.Name, expectedCABundle)
if err != nil {
t.Fatalf("error waiting for ca bundle to be re-injected: %v", err)
}
})
}
func init() {
rand.Seed(time.Now().UnixNano())
}
var characters = []rune("abcdefghijklmnopqrstuvwxyz0123456789")
// TODO drop this and just use generate name
// used for random suffix
func randSeq(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = characters[rand.Intn(len(characters))]
}
return string(b)
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
backend/xx23_32923/wsgi.py
|
"""
WSGI config for xx23_32923 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'xx23_32923.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/route-emitter/main_suite_test.go
|
package main_test
import (
"crypto/tls"
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"path"
"testing"
"time"
bbsconfig "code.cloudfoundry.org/bbs/cmd/bbs/config"
bbstestrunner "code.cloudfoundry.org/bbs/cmd/bbs/testrunner"
"code.cloudfoundry.org/bbs/encryption"
"code.cloudfoundry.org/bbs/test_helpers"
"code.cloudfoundry.org/bbs/test_helpers/sqlrunner"
"code.cloudfoundry.org/consuladapter/consulrunner"
"code.cloudfoundry.org/diego-logging-client/testhelpers"
"code.cloudfoundry.org/durationjson"
"code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2"
"code.cloudfoundry.org/inigo/helpers/portauthority"
"code.cloudfoundry.org/lager/lagerflags"
"code.cloudfoundry.org/locket"
"code.cloudfoundry.org/route-emitter/cmd/route-emitter/config"
"code.cloudfoundry.org/route-emitter/diegonats"
"code.cloudfoundry.org/route-emitter/diegonats/gnatsdrunner"
"code.cloudfoundry.org/tlsconfig"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/onsi/gomega/ghttp"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/ginkgomon"
)
var (
cfgs []func(*config.RouteEmitterConfig)
emitterPath string
locketPath string
natsPort uint16
healthCheckAddress string
oauthServer *ghttp.Server
bbsPath string
bbsURL *url.URL
bbsConfig bbsconfig.BBSConfig
bbsRunner *ginkgomon.Runner
bbsProcess ifrit.Process
routingAPIPath string
consulRunner *consulrunner.ClusterRunner
gnatsdRunner ifrit.Process
natsClient diegonats.NATSClient
syncInterval time.Duration
consulClusterAddress string
testMetricsChan chan *loggregator_v2.Envelope
signalMetricsChan chan struct{}
sqlProcess ifrit.Process
sqlRunner sqlrunner.SQLRunner
bbsRunning = false
useLoggregatorV2 bool
testIngressServer *testhelpers.TestIngressServer
portAllocator portauthority.PortAllocator
)
func TestRouteEmitter(t *testing.T) {
RegisterFailHandler(Fail)
SetDefaultEventuallyTimeout(15 * time.Second)
RunSpecs(t, "Route Emitter Suite")
}
var _ = SynchronizedBeforeSuite(func() []byte {
emitter, err := gexec.Build("code.cloudfoundry.org/route-emitter/cmd/route-emitter", "-race")
Expect(err).NotTo(HaveOccurred())
bbs, err := gexec.Build("code.cloudfoundry.org/bbs/cmd/bbs", "-race")
Expect(err).NotTo(HaveOccurred())
locket, err := gexec.Build("code.cloudfoundry.org/locket/cmd/locket", "-race")
Expect(err).NotTo(HaveOccurred())
routingAPI, err := gexec.Build("code.cloudfoundry.org/routing-api/cmd/routing-api", "-race")
Expect(err).NotTo(HaveOccurred())
payload, err := json.Marshal(map[string]string{
"emitter": emitter,
"bbs": bbs,
"locket": locket,
"routing-api": routingAPI,
})
Expect(err).NotTo(HaveOccurred())
return payload
}, func(payload []byte) {
oauthServer = startOAuthServer()
binaries := map[string]string{}
err := json.Unmarshal(payload, &binaries)
Expect(err).NotTo(HaveOccurred())
emitterPath = string(binaries["emitter"])
dbName := fmt.Sprintf("diego_%d", GinkgoParallelNode())
sqlRunner = test_helpers.NewSQLRunner(dbName)
node := GinkgoParallelNode()
startPort := 1050 * node
portRange := 1000
endPort := startPort + portRange
portAllocator, err = portauthority.New(startPort, endPort)
Expect(err).NotTo(HaveOccurred())
port, err := portAllocator.ClaimPorts(consulrunner.PortOffsetLength)
Expect(err).NotTo(HaveOccurred())
consulRunner = consulrunner.NewClusterRunner(
consulrunner.ClusterRunnerConfig{
StartingPort: int(port),
NumNodes: 1,
Scheme: "http",
},
)
natsPort, err = portAllocator.ClaimPorts(1)
Expect(err).NotTo(HaveOccurred())
syncInterval = 200 * time.Millisecond
bbsPath = string(binaries["bbs"])
locketPath = string(binaries["locket"])
bbsPort, err := portAllocator.ClaimPorts(2)
Expect(err).NotTo(HaveOccurred())
bbsAddress := fmt.Sprintf("127.0.0.1:%d", bbsPort)
bbsHealthAddress := fmt.Sprintf("127.0.0.1:%d", bbsPort+1)
routingAPIPath = string(binaries["routing-api"])
bbsURL = &url.URL{
Scheme: "https",
Host: bbsAddress,
}
basePath := path.Join(os.Getenv("GOPATH"), "src/code.cloudfoundry.org/route-emitter/cmd/route-emitter/fixtures")
bbsConfig = bbsconfig.BBSConfig{
SessionName: "bbs",
CommunicationTimeout: durationjson.Duration(10 * time.Second),
RequireSSL: true,
DesiredLRPCreationTimeout: durationjson.Duration(1 * time.Minute),
ExpireCompletedTaskDuration: durationjson.Duration(2 * time.Minute),
ExpirePendingTaskDuration: durationjson.Duration(30 * time.Minute),
EnableConsulServiceRegistration: false,
ConvergeRepeatInterval: durationjson.Duration(30 * time.Second),
KickTaskDuration: durationjson.Duration(30 * time.Second),
LockTTL: durationjson.Duration(locket.DefaultSessionTTL),
LockRetryInterval: durationjson.Duration(locket.RetryInterval),
ReportInterval: durationjson.Duration(1 * time.Minute),
ConvergenceWorkers: 20,
UpdateWorkers: 1000,
TaskCallbackWorkers: 1000,
MaxOpenDatabaseConnections: 200,
MaxIdleDatabaseConnections: 200,
AuctioneerRequireTLS: false,
RepClientSessionCacheSize: 0,
RepRequireTLS: false,
LagerConfig: lagerflags.DefaultLagerConfig(),
ListenAddress: bbsAddress,
AdvertiseURL: bbsURL.String(),
AuctioneerAddress: "http://some-address",
DatabaseDriver: sqlRunner.DriverName(),
DatabaseConnectionString: sqlRunner.ConnectionString(),
ConsulCluster: consulRunner.ConsulCluster(),
HealthAddress: bbsHealthAddress,
EncryptionConfig: encryption.EncryptionConfig{
EncryptionKeys: map[string]string{"label": "key"},
ActiveKeyLabel: "label",
},
CaFile: path.Join(basePath, "green-certs", "server-ca.crt"),
CertFile: path.Join(basePath, "green-certs", "server.crt"),
KeyFile: path.Join(basePath, "green-certs", "server.key"),
}
})
func startOAuthServer() *ghttp.Server {
server := ghttp.NewUnstartedServer()
tlsConfig, err := tlsconfig.Build(
tlsconfig.WithInternalServiceDefaults(),
tlsconfig.WithIdentityFromFile("fixtures/server.crt", "fixtures/server.key"),
).Server()
Expect(err).NotTo(HaveOccurred())
tlsConfig.ClientAuth = tls.NoClientCert
server.HTTPTestServer.TLS = tlsConfig
server.AllowUnhandledRequests = true
server.UnhandledRequestStatusCode = http.StatusOK
server.HTTPTestServer.StartTLS()
publicKey := "-----BEGIN PUBLIC KEY-----\\n" +
"MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDHFr+KICms+tuT1OXJwhCUmR2d\\n" +
"KVy7psa8xzElSyzqx7oJyfJ1JZyOzToj9T5SfTIq396agbHJWVfYphNahvZ/7uMX\\n" +
"qHxf+ZH9BL1gk9Y6kCnbM5R60gfwjyW1/dQPjOzn9N394zd2FJoFHwdq9Qs0wBug\\n" +
"spULZVNRxq7veq/fzwIDAQAB\\n" +
"-----END PUBLIC KEY-----"
data := fmt.Sprintf("{\"alg\":\"rsa\", \"value\":\"%s\"}", publicKey)
server.RouteToHandler("GET", "/token_key",
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/token_key"),
ghttp.RespondWith(http.StatusOK, data)),
)
server.RouteToHandler("POST", "/oauth/token",
ghttp.CombineHandlers(
ghttp.VerifyBasicAuth("someclient", "somesecret"),
func(w http.ResponseWriter, req *http.Request) {
jsonBytes := []byte(`{"access_token":"some-token", "expires_in":10}`)
w.Write(jsonBytes)
}))
return server
}
var _ = BeforeEach(func() {
cfgs = nil
useLoggregatorV2 = false
consulRunner.Start()
consulRunner.WaitUntilReady()
consulClusterAddress = consulRunner.ConsulCluster()
sqlProcess = ginkgomon.Invoke(sqlRunner)
startBBS()
gnatsdRunner, natsClient = gnatsdrunner.StartGnatsd(int(natsPort))
healthCheckPort, err := portAllocator.ClaimPorts(1)
Expect(err).NotTo(HaveOccurred())
healthCheckAddress = fmt.Sprintf("127.0.0.1:%d", healthCheckPort)
})
var _ = JustBeforeEach(func() {
var err error
testIngressServer, err = testhelpers.NewTestIngressServer(
"fixtures/metron/metron.crt",
"fixtures/metron/metron.key",
"fixtures/metron/CA.crt",
)
Expect(err).NotTo(HaveOccurred())
receiversChan := testIngressServer.Receivers()
Expect(testIngressServer.Start()).To(Succeed())
port, err := testIngressServer.Port()
Expect(err).NotTo(HaveOccurred())
cfgs = append(cfgs, func(cfg *config.RouteEmitterConfig) {
cfg.LoggregatorConfig.BatchFlushInterval = 10 * time.Millisecond
cfg.LoggregatorConfig.BatchMaxSize = 1
cfg.LoggregatorConfig.UseV2API = useLoggregatorV2
cfg.LoggregatorConfig.APIPort = port
cfg.LoggregatorConfig.CACertPath = "fixtures/metron/CA.crt"
cfg.LoggregatorConfig.KeyPath = "fixtures/metron/client.key"
cfg.LoggregatorConfig.CertPath = "fixtures/metron/client.crt"
})
testMetricsChan, signalMetricsChan = testhelpers.TestMetricChan(receiversChan)
})
var _ = AfterEach(func() {
stopBBS()
consulRunner.Stop()
gnatsdRunner.Signal(os.Kill)
Eventually(gnatsdRunner.Wait(), 5).Should(Receive())
testIngressServer.Stop()
close(signalMetricsChan)
ginkgomon.Kill(sqlProcess, 5*time.Second)
})
var _ = SynchronizedAfterSuite(func() {
oauthServer.Close()
}, func() {
gexec.CleanupBuildArtifacts()
})
func stopBBS() {
if !bbsRunning {
return
}
bbsRunning = false
ginkgomon.Kill(bbsProcess)
Eventually(bbsProcess.Wait()).Should(Receive())
}
func startBBS() {
if bbsRunning {
return
}
bbsRunner = bbstestrunner.New(bbsPath, bbsConfig)
bbsProcess = ginkgomon.Invoke(bbsRunner)
bbsRunning = true
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
build/android/pylib/utils/flakiness_dashboard_results_uploader.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Uploads the results to the flakiness dashboard server."""
# pylint: disable=E1002,R0201
import logging
import os
import shutil
import tempfile
import xml
#TODO(craigdh): pylib/utils/ should not depend on pylib/.
from pylib import cmd_helper
from pylib import constants
from pylib.utils import json_results_generator
from pylib.utils import repo_utils
class JSONResultsGenerator(json_results_generator.JSONResultsGeneratorBase):
"""Writes test results to a JSON file and handles uploading that file to
the test results server.
"""
def __init__(self, builder_name, build_name, build_number, tmp_folder,
test_results_map, test_results_server, test_type, master_name):
super(JSONResultsGenerator, self).__init__(
builder_name=builder_name,
build_name=build_name,
build_number=build_number,
results_file_base_path=tmp_folder,
builder_base_url=None,
test_results_map=test_results_map,
svn_repositories=(('webkit', 'third_party/WebKit'),
('chrome', '.')),
test_results_server=test_results_server,
test_type=test_type,
master_name=master_name)
#override
def _GetModifierChar(self, test_name):
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
return self._test_results_map[test_name].modifier
#override
def _GetSVNRevision(self, in_directory):
"""Returns the git/svn revision for the given directory.
Args:
in_directory: The directory relative to src.
"""
def _is_git_directory(in_directory):
"""Returns true if the given directory is in a git repository.
Args:
in_directory: The directory path to be tested.
"""
if os.path.exists(os.path.join(in_directory, '.git')):
return True
parent = os.path.dirname(in_directory)
if parent == constants.DIR_SOURCE_ROOT or parent == in_directory:
return False
return _is_git_directory(parent)
in_directory = os.path.join(constants.DIR_SOURCE_ROOT, in_directory)
if not os.path.exists(os.path.join(in_directory, '.svn')):
if _is_git_directory(in_directory):
return repo_utils.GetGitHeadSHA1(in_directory)
else:
return ''
output = cmd_helper.GetCmdOutput(['svn', 'info', '--xml'], cwd=in_directory)
try:
dom = xml.dom.minidom.parseString(output)
return dom.getElementsByTagName('entry')[0].getAttribute('revision')
except xml.parsers.expat.ExpatError:
return ''
return ''
class ResultsUploader(object):
"""Handles uploading buildbot tests results to the flakiness dashboard."""
def __init__(self, tests_type):
self._build_number = os.environ.get('BUILDBOT_BUILDNUMBER')
self._builder_name = os.environ.get('BUILDBOT_BUILDERNAME')
self._tests_type = tests_type
if not self._build_number or not self._builder_name:
raise Exception('You should not be uploading tests results to the server'
'from your local machine.')
upstream = (tests_type != 'Chromium_Android_Instrumentation')
if upstream:
# TODO(frankf): Use factory properties (see buildbot/bb_device_steps.py)
# This requires passing the actual master name (e.g. 'ChromiumFYI' not
# 'chromium.fyi').
from slave import slave_utils # pylint: disable=F0401
self._build_name = slave_utils.SlaveBuildName(constants.DIR_SOURCE_ROOT)
self._master_name = slave_utils.GetActiveMaster()
else:
self._build_name = 'chromium-android'
buildbot_branch = os.environ.get('BUILDBOT_BRANCH')
if not buildbot_branch:
buildbot_branch = 'master'
self._master_name = '%s-%s' % (self._build_name, buildbot_branch)
self._test_results_map = {}
def AddResults(self, test_results):
# TODO(frankf): Differentiate between fail/crash/timeouts.
conversion_map = [
(test_results.GetPass(), False,
json_results_generator.JSONResultsGeneratorBase.PASS_RESULT),
(test_results.GetFail(), True,
json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
(test_results.GetCrash(), True,
json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
(test_results.GetTimeout(), True,
json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
(test_results.GetUnknown(), True,
json_results_generator.JSONResultsGeneratorBase.NO_DATA_RESULT),
]
for results_list, failed, modifier in conversion_map:
for single_test_result in results_list:
test_result = json_results_generator.TestResult(
test=single_test_result.GetName(),
failed=failed,
elapsed_time=single_test_result.GetDuration() / 1000)
# The WebKit TestResult object sets the modifier it based on test name.
# Since we don't use the same test naming convention as WebKit the
# modifier will be wrong, so we need to overwrite it.
test_result.modifier = modifier
self._test_results_map[single_test_result.GetName()] = test_result
def Upload(self, test_results_server):
if not self._test_results_map:
return
tmp_folder = tempfile.mkdtemp()
try:
results_generator = JSONResultsGenerator(
builder_name=self._builder_name,
build_name=self._build_name,
build_number=self._build_number,
tmp_folder=tmp_folder,
test_results_map=self._test_results_map,
test_results_server=test_results_server,
test_type=self._tests_type,
master_name=self._master_name)
json_files = ["incremental_results.json", "times_ms.json"]
results_generator.GenerateJSONOutput()
results_generator.GenerateTimesMSFile()
results_generator.UploadJSONFiles(json_files)
except Exception as e:
logging.error("Uploading results to test server failed: %s." % e)
finally:
shutil.rmtree(tmp_folder)
def Upload(results, flakiness_dashboard_server, test_type):
"""Reports test results to the flakiness dashboard for Chrome for Android.
Args:
results: test results.
flakiness_dashboard_server: the server to upload the results to.
test_type: the type of the tests (as displayed by the flakiness dashboard).
"""
uploader = ResultsUploader(test_type)
uploader.AddResults(results)
uploader.Upload(flakiness_dashboard_server)
|
[] |
[] |
[
"BUILDBOT_BUILDNUMBER",
"BUILDBOT_BUILDERNAME",
"BUILDBOT_BRANCH"
] |
[]
|
["BUILDBOT_BUILDNUMBER", "BUILDBOT_BUILDERNAME", "BUILDBOT_BRANCH"]
|
python
| 3 | 0 | |
src/app.go
|
package main
import (
"io"
"net/http"
"log"
"os"
"strconv"
"io/ioutil"
"encoding/json"
"net"
"strings"
)
const LISTEN_ADDRESS = ":9201"
var apiUrl string
var minerId string
var testMode string
type EthminerStatistics struct {
ID int64 `json:"id"`
JSONRPC string `json:"jsonrpc"`
Result []string `json:"result"`
}
func stringToInteger(value string) int64 {
if value == "" {
return 0
}
result, err := strconv.ParseInt(value, 10, 64)
if err != nil {
log.Fatal(err)
}
return result
}
func integerToString(value int64) string {
return strconv.FormatInt(value, 10)
}
func floatToString(value float64, precision int64) string {
return strconv.FormatFloat(value, 'f', int(precision), 64)
}
func stringToFloat(value string) float64 {
if value == "" {
return 0
}
result, err := strconv.ParseFloat(value, 64)
if err != nil {
log.Fatal(err)
}
return result
}
func formatValue(key string, meta string, value string) string {
result := key;
if (meta != "") {
result += "{" + meta + "}";
}
result += " "
result += value
result += "\n"
return result
}
const StopCharacter = "\r\n\r\n"
func queryData() (string, error) {
var err error
message := "{\"method\":\"miner_getstat1\",\"jsonrpc\":\"2.0\",\"id\":5}"
conn, err := net.Dial("tcp", apiUrl)
if err != nil {
return "", err
}
defer conn.Close()
conn.Write([]byte(message))
conn.Write([]byte(StopCharacter))
buff := make([]byte, 1024)
n, _ := conn.Read(buff)
return string(buff[:n]), nil;
}
func getTestData() (string, error) {
dir, err := os.Getwd()
if err != nil {
return "", err;
}
body, err := ioutil.ReadFile(dir + "/test.json")
if err != nil {
return "", err;
}
return string(body), nil
}
func metrics(w http.ResponseWriter, r *http.Request) {
log.Print("Serving /metrics")
var up int64 = 1
var hashRate float64 = 0
var jsonString string
var err error
if (testMode == "1") {
jsonString, err = getTestData()
} else {
jsonString, err = queryData()
}
if err != nil {
log.Print(err)
up = 0
} else {
// Parse JSON
jsonData := EthminerStatistics{}
json.Unmarshal([]byte(jsonString), &jsonData)
s := strings.Split(jsonData.Result[2], ";")
hashRate = stringToFloat(s[0]) / 1000
}
// Output
io.WriteString(w, formatValue("ethminer_up", "miner=\"" + minerId + "\"", integerToString(up)))
io.WriteString(w, formatValue("ethminer_hashrate", "miner=\"" + minerId + "\"", floatToString(hashRate, 6)))
}
func index(w http.ResponseWriter, r *http.Request) {
log.Print("Serving /index")
html := string(`<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Ethminer Exporter</title>
</head>
<body>
<h1>Ethminer Exporter</h1>
<p><a href="/metrics">Metrics</a></p>
</body>
</html>
`)
io.WriteString(w, html)
}
func main() {
testMode = os.Getenv("TEST_MODE")
if (testMode == "1") {
log.Print("Test mode is enabled")
}
apiUrl = os.Getenv("API_URL")
log.Print("API URL: " + apiUrl)
minerId = os.Getenv("MINER_ID")
log.Print("Miner ID: " + minerId)
log.Print("Ethminer exporter listening on " + LISTEN_ADDRESS)
http.HandleFunc("/", index)
http.HandleFunc("/metrics", metrics)
http.ListenAndServe(LISTEN_ADDRESS, nil)
}
|
[
"\"TEST_MODE\"",
"\"API_URL\"",
"\"MINER_ID\""
] |
[] |
[
"TEST_MODE",
"MINER_ID",
"API_URL"
] |
[]
|
["TEST_MODE", "MINER_ID", "API_URL"]
|
go
| 3 | 0 | |
pylearn2/dataset_get/dataset-get.py
|
#!/usr/bin/env python
# -*- coding: utf-8
########################################
#
#
# This file is intentionally monolithic.
# It also intentionally restricts itself
# to standard library modules, with no
# extra dependencies.
#
from __future__ import print_function
__authors__ = "Steven Pigeon"
__copyright__ = "(c) 2012, Université de Montréal"
__contact__ = "Steven Pigeon: [email protected]"
__version__ = "dataset-get 0.1"
__licence__ = "BSD 3-Clause http://www.opensource.org/licenses/BSD-3-Clause "
import logging
import re,os,sys,shutil,time
import warnings
import urllib,urllib2
import tarfile
import subprocess
from theano.compat.six.moves import input
logger = logging.getLogger(__name__)
########################################
class package_info:
"""
A simple class to structure
the package's information
"""
def __init__(self, cf, name,ts,rs,src,whr):
self.configuration_file=cf # in which configuration file was it found?
self.name=name # the short name, e.g., "mnist"
self.timestamp=int(ts) # a unix ctime
self.readable_size=rs # a human-readable size, e.g., "401.3MB"
self.source=src # the web source
self.where=whr # where on this machine
########################################
#
# Global variables for the whole module.
#
dataset_sources="sources.lst"
dataset_web="http://www.stevenpigeon.org/secret"
dataset_conf_path=""
dataset_data_path=""
root_conf_path=None
root_data_path=None
user_conf_path=None
user_data_path=None
super_powers=False
# both dictionaries for fast search
# (but are semantically lists)
packages_sources={}
installed_packages_list={}
########################################
def local_path_as_url( filename ):
"""
Takes a local, OS-specific path or
filename and transforms it into an
url starting with file:// (it
simplifies a lot of things).
:param filename: a relative or absolute pathname
:returns: the urlified absolute path
"""
return "file://"+urllib.pathname2url(os.path.abspath(filename))
########################################
def has_super_powers():
"""
Determines whether or not the program
is run as root.
:returns: true if run as root, false otherwise
"""
return os.geteuid()==0
########################################
def corename( filename ):
"""
returns the 'corename' of a file. For
example, corename("thingie.tar.bz2")
returns "thingie" (a totally correct
way of doing this would be to use
MIME-approved standard extensions, in
order to distinguish from, say a file
"thingie.tar.bz2" and another file
"my.data.tar.bz2"---for which we would
have only "my" as corename)
:param filename: a (base) filename
:returns: the "core" filename
"""
f1=None
f2=os.path.basename(filename)
# repeatedly remove the right-most
# extension, until none is found
#
while f1 != f2:
f1=f2
(f2,ext)=os.path.splitext(f1)
return f2
########################################
def get_timestamp_from_url( url ):
"""
Gets the Last-Modified field from the
http header associated with the file
pointed to by the url. Raises whatever
exception urllib2.urlopen raises.
It can't lookup local file, unless they
are presented as a file:/// url.
:param url: a filename or an url
:returns: the last-modified timestamp
"""
obj = urllib2.urlopen( url )
return time.strptime(
obj.info()["Last-Modified"],
"%a, %d %b %Y %H:%M:%S GMT") # RFC 2822 date format
########################################
def download_from_url( url, filename=None, progress_hook=None ):
"""
Downloads a file from an URL in the
specificed filename (or, if filename
is None, to a temporary location).
Returns the location of the downloaded
file.
:param url: url of the file to download
:param filename: filename to download to (None means a temp file is created)
:param progress_hook: a download hook to display progress
:returns: the filename where the file was downloaded
"""
(temp_filename, headers)=urllib.urlretrieve( url,filename,progress_hook )
return temp_filename
########################################
def file_access_rights( filename, rights, check_above=False ):
"""
Determines if a file has given rights.
If the file exists, it tests for its
rights. If it does not exist, and
check_above=True, then it checks for
the directory's rights, to test for
write/creation rights.
:param filename: filename of the file to assess
:param rights: rights to be tested
:param check_above: Check directory rights if file does not exist.
:returns: boolean, whether 'rights' rights are OK
"""
if os.path.exists(filename):
return os.access(filename, rights)
else:
if check_above:
return os.access(os.path.dirname(os.path.abspath(filename)), rights)
else:
return False
########################################
def atomic_replace( src_filename, dst_filename ):
"""
Does an "atomic" replace of a file by another.
If both files reside on the fame FS
device, atomic_replace does a regular
move. If not, the source file is first
copied to a temporary location on the
same FS as the destination, then a
regular move is performed.
caveat: the destination FS must have
enough storage left for the temporary
file.
:param src_filename: The file to replace from
:param dst_filename: The file to be replaced
:raises: whatever shutil raises
"""
####################################
def same_fs( filename_a, filename_b):
"""
Checks if both files reside on the
same FS device
"""
stats_a = os.stat(filename_a)
stats_b = os.stat(filename_b)
return stats_a.st_dev == stats_b.st_dev;
if os.path.exists(dst_filename) and not same_fs(src_filename,dst_filename):
# deals with non-atomic move
#
dst_path = os.path.dirname(os.path.abspath(dst_filename))
dst_temp_filename=os.tempnam(dst_path);
shutil.copy(src_filename, dst_temp_filename) # non-atomic
shutil.move(dst_temp_filename,dst_filename) # atomic
else:
# an atomic move is performed
# (both files are on the same device,
# or the destination doesn't exist)
#
shutil.move(src_filename, dst_filename)
########################################
def set_defaults():
"""
Detects whether the program is run
as an ordinary user or as root, and
then sets defauts directories for
packages, configurations, and sources.
caveat: this is an FreeDesktop-friendly
version, and we will need eventually
to have Windows- and OSX-friendly
versions.
See: http://freedesktop.org/wiki/Home
and: http://www.linuxfoundation.org/collaborate/workgroups/lsb/fhs
"""
global dataset_conf_path, \
dataset_data_path, \
root_conf_path, \
root_data_path, \
user_conf_path, \
super_powers
# a conspicuously LINUX version
# (on windows, if we ever do a
# windows version, these would
# be different, and we may even
# not have 'root' per se.)
#
root_conf_path="/etc/pylearn/"
root_data_path="/usr/share/pylearn/dataset/"
user_conf_path=os.path.join(os.environ["HOME"],".local/share/pylearn/")
user_data_path=os.path.join(os.environ["HOME"],".local/share/pylearn/dataset/")
if has_super_powers():
dataset_conf_path=root_conf_path
dataset_data_path=root_data_path
super_powers=True
else:
dataset_conf_path=user_conf_path
dataset_data_path=user_data_path
super_powers=False
# check if directories exist, and if not,
# create them, and then fetch source.lst
#
if not os.path.exists(dataset_conf_path):
os.makedirs(dataset_conf_path)
if not os.path.exists(os.path.join(dataset_conf_path,dataset_sources)):
atomic_update(os.path.join(dataset_web,dataset_sources),
os.path.join(dataset_conf_path,dataset_sources),
progress_bar)
if not os.path.exists(dataset_data_path):
os.makedirs(dataset_data_path)
read_packages_sources()
read_installed_packages_list();
########################################
def read_packages_sources():
"""
Reads the sources.lst file and
populates the available packages
list.
caveat: parsing of the sources.lst
is pathetic
Assigns: packages_sources
:raises: RuntimeError if sources.lst cannot be read
"""
def read_from_file(config_filename):
"""
Reads a sources.lst file from a given location
:param config_filename: the configuration file to read
"""
global packages_sources
try:
f=open(config_filename,"r")
except Exception as e:
# not a problem if not found in a given location
pass
else:
# file opened
for line in f:
t=line.rstrip().split(' ') # rstrips strips whitespaces at the end (\n)
packages_sources[t[0]]=\
this_package=package_info(
config_filename,
t[0], # name
t[1], # timestamp
t[2], # human-readable size
urllib.unquote(t[3]), # source on the web
None) # None as not installed (from source) (may be overridden later)
if super_powers:
read_from_file(os.path.join(dataset_conf_path,dataset_sources))
else:
# read root, user, then paths.
paths=[ os.path.join(root_conf_path,dataset_sources),
os.path.join(user_conf_path,dataset_sources) ]
try:
paths+=[ os.path.join(x,dataset_sources) for x in re.split(":|;",os.environ["PYLEARN2_DATA_PATH"]) ]
except Exception:
# PYLEARN2_DATA_PATH may or mayn't be defined
pass
for path in paths:
read_from_file(path)
if len(packages_sources)==0:
raise RuntimeError( "[cf] fatal: could not find/read sources.lst (unexpected!)" )
########################################
def read_installed_packages_list():
"""
Reads the various installed.lst files
found on the system. First it searches
for the root-installed installed.lst,
then the user's, then searches the
locations specified by the environment
variable PYLEARN2_DATA_PATH (which is
a standard :-separated list of locations)
Assigns: installed_packages_list
"""
# note: we add and overwrite rather
# than clearing and filling (so we can
# read many installed.lst, but the last
# ones read overrides the earlier ones)
#
def read_from_file(config_filename):
"""
Reads an installed.lst file from a given location
:param config_filename: the configuration file to read
"""
global installed_packages_list
try:
installed_list_file=open(config_filename)
except IOError:
# not a problem if not found in a location
pass
else:
# read from file and
# create a dictionary
#
for line in installed_list_file:
l=line.rstrip().split(' ') # removes trailing whitespaces (\n)
if l:
installed_packages_list[l[0]]=\
this_package=package_info(
config_filename,
l[0], # name
l[1], # timestamp
l[2], # human-readable size
urllib.unquote(l[3]), # source on the web
urllib.unquote(l[4])) # where installed
else:
pass # skip blank lines (there shouldn't be any)
if super_powers:
# then read only root
read_from_file(os.path.join(dataset_conf_path,"installed.lst"))
else:
# read root, user, then paths.
paths=[ os.path.join(root_conf_path,"installed.lst"),
os.path.join(user_conf_path,"installed.lst") ]
try:
paths+=[ os.path.join(x,"installed.lst") for x in re.split(":|;",os.environ["PYLEARN2_DATA_PATH"]) ]
except Exception:
# PYLEARN2_DATA_PATH may or mayn't be defined
pass
for path in paths:
read_from_file(path)
if len(installed_packages_list)==0:
logger.warning("[cf] no install.lst found "
"(will be created on install/upgrade)")
########################################
def write_installed_packages_list():
"""
Saves the installed package list and
their location (file over-writen depends
on run as root or as a normal user)
"""
global installed_packages_list
try:
tmp=open(os.path.join(dataset_conf_path,"installed.lst.2"),"w")
except IOError:
raise RuntimeError("[cf] fatal: cannot create temp file")
else:
# ok, probably worked?
for package in installed_packages_list.values():
# adds only packages that are readable for
# this user (maybe some site-installed datasets
# are out of his reach)
#
if package.where!=None and \
file_access_rights(os.path.join(package.where,package.name),
os.F_OK | os.R_OK):
print(
" ".join(map(str,[ package.name,
package.timestamp,
package.readable_size,
urllib.quote(package.source,"/:~"),
urllib.quote(package.where,"/:~") ] )),
file=tmp)
# replace the installed.lst in
# a safe way
atomic_replace(os.path.join(dataset_conf_path,"installed.lst.2"),
os.path.join(dataset_conf_path,"installed.lst"))
########################################
def atomic_update( remote_src, local_dst, hook=None ):
"""
Takes a (possibly) remote file an checks
if it is newer than a(n obligatoritly)
local file. If the source is newer, an
"atomic update" is performed.
Atomic here means that the source is
downloaded in a distinct location, and
only if download is successful is the
destination file replaced atomically.
:param remote_src: Url to a (possibly) remote file
:param local_dst: file to update
:param hook: download progress hook
:raises: various IOErrors
"""
global hook_download_filename # hook-related
try:
remote_date = get_timestamp_from_url(remote_src);
except IOError as e:
raise IOError("[ts] %s %s" % (str(e),remote_src))
else:
if os.path.exists(local_dst):
# it exists, test for update
try:
local_date = get_timestamp_from_url(local_path_as_url(local_dst))
except Exception as e:
raise IOError("[ts] %s %s" % (str(e),local_dst))
else:
if (local_date<remote_date):
# OK, the file seems to be out-of-date
# let's update it
#
if file_access_rights(local_dst,os.W_OK,check_above=True):
# we have write access to the file, or if it doesn't
# exist, to the directory where we want to write it.
#
try:
hook_download_filename=remote_src # hook-related
temp_filename=download_from_url(remote_src, filename=None, progress_hook=hook)
except Exception as e:
raise IOError("[dl] %s %s" % (str(e),remote_src))
else:
# download to temporary was successful,
# let's (try to) perform the atomic replace
#
try:
atomic_replace(temp_filename,local_dst)
except Exception as e:
raise IOError("[ac] %s %s --> %s" % (str(e),temp_filename,local_dst))
else:
raise IOError("[rw] no write access to %s " % local_dst )
else:
# file's up to date, everything's fine
# and there's nothing else to do
#
pass
else:
# file does not exist, just download!
#
if file_access_rights(local_dst,os.W_OK,check_above=True):
try:
hook_download_filename=remote_src # hook-related
temp_filename=download_from_url(remote_src, filename=None, progress_hook=hook)
except Exception as e:
raise IOError("[dl] %s %s" % (str(e),remote_src))
else:
# yay! download successful!
#
try:
atomic_replace(temp_filename,local_dst)
except Exception as e:
raise IOError("[ac] %s %s --> %s" % (str(e),temp_filename,local_dst))
else:
raise IOError("[rw] no right access to %s" % local_dst)
########################################
def unpack_tarball( tar_filename, dest_path ):
"""
Unpacks a (bzipped2) tarball to a destination
directory
:param tar_filename: the bzipped2 tar file
:param dest_path: a path to where expand the tarball
:raises: various IOErrors
"""
if os.path.exists(tar_filename):
if file_access_rights(dest_path,os.W_OK,check_above=False):
try:
# open the tarball as read, bz2
#
this_tar_file=tarfile.open(tar_filename,"r:bz2")
except Exception as e:
raise IOError("[tar] cannot open '%s'" % tar_filename)
else:
# ok, it's openable, let's expand it
#
try:
this_tar_file.extractall(dest_path)
except Exception as e:
raise IOError("[tar] error while extracting '%s'" %tar_filename)
else:
# yay! success!
pass
else:
raise IOError("[tar] no right access to '%s'" % dest_path)
else:
raise IOError("'%s' not found" % tar_filename)
########################################
def run_scripts( package_location, scripts ):
"""
Search for installation scripts speficied
by the scripts list
:param package_location: "root" path for the package
:param scripts: list of scripts to look for (and execute)
:raises: subprocess exceptions
"""
path=os.path.join(package_location,"scripts/")
cwd=os.getcwd()
os.chdir(path)
for script in scripts:
if os.path.exists( script ):
# throws CalledProcessError if return
# return code is not zero.
#
try:
subprocess.check_call( script, stdout=sys.stdout, stderr=sys.stderr )
except Exception:
os.chdir(cwd)
raise
# ok, success (or not), let's unstack
os.chdir(cwd)
########################################
def install_package( package, src, dst ):
"""
Unpacks a (bzipped2) tarball and
expands it to the given location.
If unpacking is successful, installation
scripts are run.
:param package: package information
:param src: the source tarball
:param dst: the destination directory
:raises: IOErrors and subprocess exceptions
"""
#FIXME: change creation flags to group-public
# readable when invoked with super-powers
#
unpack_tarball(src,dst)
run_scripts(dst+package.name, scripts=["getscript","postinst"] )
########################################
def remove_package(package,dst):
"""
Removes a script by running the
various removal scripts, then by
deleting files and directories.
:param package: package information
:param dst: packages root (where packages are installed)
"""
#path=os.path.join(dst,package.name)
path=os.path.join(package.where,package.name)
#print path
run_scripts(path,scripts=["prerm"])
shutil.rmtree(os.path.join(path,"data/"))
shutil.rmtree(os.path.join(path,"docs/"))
run_scripts(os.path.join(dst,package.name),scripts=["postrm"])
shutil.rmtree(os.path.join(path,"scripts/"))
shutil.rmtree(path)
update_installed_list("r",package)
########################################
def update_installed_list( op, package ):
"""
Updates the internal list of installed
packages. The operation is either "i"
for install and update, or "r" for removal
:param op: the operation performed
:param package: the package information
:param dst: where the package was installed
"""
if op=="i":
installed_packages_list[package.name]=package;
elif op=="r":
# remove the package from the list
del installed_packages_list[package.name]
else:
raise RuntimeError("[cf] fatal: invalid configuration op '%s'." % op)
write_installed_packages_list()
########################################
def show_packages():
"""
List all available packages, both
installed or from remove sources
"""
logger.info("These packages are available:")
for this_package in packages_sources.values():
if this_package.name in installed_packages_list:
state="u" if installed_packages_list[this_package.name].timestamp<this_package.timestamp else 'i';
else:
state="-"
package_time = time.strftime("%a, %d %b %Y %H:%M:%S",
time.gmtime(this_package.timestamp))
logger.info("{0} {1:<20} {2:<8} "
"{3:<30} {4}".format(state,
this_package.name,
this_package.readable_size,
package_time,
this_package.source))
########################################
def install_upgrade( package, upgrade=False,progress_hook=None ):
"""
This function installs or upgrades a package.
:param package: package information
:param upgrade: If True, performs and upgrade, installs underwise
:param progress_hook: a download progress hook
"""
global hook_download_filename # hook-related
if upgrade:
operation = "[up] upgrading"
else:
operation = "[in] installing"
logger.info("{0} '{1}' to {2}".format(operation,
package.name, dataset_data_path))
remote_src=package.source
# install location is determined by super-powers
# (so a root package can be upgraded locally!)
package.where=dataset_data_path;
# TODO: to add caching, first lookup the
# tarball in the package cache (but there's'nt
# one for now)
#
cached=False;
if not cached:
hook_download_filename=remote_src # hook-related
temp_filename=download_from_url(remote_src,filename=None,progress_hook=progress_hook)
else:
# assign filename to cached package
pass
logger.info("[in] running install scripts "
"for package '{0}'".format(package.name))
# runs through the .../package_name/scripts/
# directory and executes the scripts in a
# specific order (which shouldn't display
# much unless they fail)
#
install_package(package,temp_filename,dataset_data_path)
update_installed_list("i",package)
########################################
def upgrade_packages(packages_to_upgrade, hook=None ):
"""
Upgrades packages.
If no packages are supplied, it will perform
an "update-all" operation, finding all packages
that are out of date.
If packages names are supplied, only those
are checked for upgrade (and upgraded if out
of date)
:param packages_to_upgrade: list of package names.
:raises: IOErrors (from downloads/rights)
"""
# get names only
if packages_to_upgrade==[]:
packages_to_upgrade=installed_packages_list.keys() # all installed!
all_packages=True
else:
all_packages=False
# check what packages are in the list,
# and really to be upgraded.
#
packages_really_to_upgrade=[]
for this_package in packages_to_upgrade:
if this_package in installed_packages_list:
# check if there's a date
installed_date=installed_packages_list[this_package].timestamp
if this_package in packages_sources:
repo_date=packages_sources[this_package].timestamp
if installed_date < repo_date:
# ok, there's a newer version
logger.info(this_package)
packages_really_to_upgrade.append(this_package)
else:
# no newer version, nothing to update
pass
else:
logger.warning("[up] '{0}' is unknown "
"(installed from file?).".format(this_package))
else:
# not installed?
if not all_packages:
logger.warning("[up] '{0}' is not installed, "
"cannot upgrade.".format(this_package))
pass
# once we have determined which packages
# are to be updated, we show them to the
# user for him to confirm
#
if packages_really_to_upgrade!=[]:
logger.info("[up] the following package(s) will be upgraded:")
for this_package in packages_really_to_upgrade:
readable_size = packages_sources[this_package].readable_size
logger.info("{0} ({1})".format(this_package, readable_size))
r = input("Proceed? [yes/N] ")
if r=='y' or r=='yes':
for this_package in packages_really_to_upgrade:
install_upgrade( packages_sources[this_package], upgrade=True, progress_hook=hook )
else:
logger.info("[up] Taking '{0}' for no, so there.".format(r))
else:
# ok, nothing to upgrade,
# move along.
pass
########################################
#
# installs the packages, and forces if
# they already exist
#
# packages must be supplied as argument.
#
#
def install_packages( packages_to_install, force_install=False, hook=None ):
"""
Installs the packages, possibly forcing installs.
:param packages_to_install: list of package names
:param force_install: if True, re-installs even if installed.
:param hook: download progress hook
:raises: IOErrors
"""
if packages_to_install==[]:
raise RuntimeError("[in] fatal: need packages names to install.")
if force_install:
logger.warning("[in] using the force")
packages_really_to_install=[]
for this_package in packages_to_install:
if this_package in packages_sources:
if force_install or not this_package in installed_packages_list:
packages_really_to_install.append(this_package)
else:
logger.warning("[in] package '{0}' "
"is already installed".format(this_package))
else:
logger.warning("[in] unknown package '{0}'".format(this_package))
if packages_really_to_install!=[]:
logger.info("[in] The following package(s) will be installed:")
for this_package in packages_really_to_install:
readable_size = packages_sources[this_package].readable_size
logger.info("{0} ({1})".format(this_package, readable_size))
r = input("Proceed? [yes/N] ")
if r=='y' or r=='yes':
for this_package in packages_really_to_install:
install_upgrade( packages_sources[this_package], upgrade=False, progress_hook=hook )
else:
logger.info("[in] Taking '{0}' for no, so there.".format(r))
else:
# ok, nothing to upgrade,
# move along.
pass
########################################
def install_packages_from_file( packages_to_install ):
"""
(Force)Installs packages from files, but does
not update installed.lst files.
caveat: not as tested as everything else.
:param packages_to_install: list of files to install
:raises: IOErrors
"""
if packages_to_install==[]:
raise RuntimeError("[in] fatal: need packages names to install.")
packages_really_to_install=[]
for this_package in packages_to_install:
if os.path.exists(this_package):
packages_really_to_install.append(this_package)
else:
logger.warning("[in] package '{0}' not found".format(this_package))
if packages_really_to_install!=[]:
logger.info("[in] The following package(s) will be installed:")
packages = []
for this_package in packages_really_to_install:
packages.append(corename(this_package))
logger.info(' '.join(packages))
r = input("Proceed? [yes/N] ")
if r=='y' or r=='yes':
for this_package in packages_really_to_install:
#install_upgrade( this_package, upgrade=False, progress_hook=hook )
if os.path.exists(dataset_data_path+corename(this_package)):
r = input("[in] '%s' already installed, overwrite? [yes/N] " % corename(this_package))
if r!='y' and r!='yes':
logger.info("[in] skipping package "
"'{0}'".format(corename(this_package)))
continue
install_package( corename(this_package), this_package, dataset_data_path)
#update_installed_list("i",(make a package object here),dataset_data_path)
else:
logger.info("[in] Taking '{0}' for no, so there.".format(r))
########################################
#
# uninstall packages, whether or not they
# are found in the sources.lst file (to
# account for the packages installed from
# file)
#
# like install, it expects a list, if there's
# no list, nothing happens. It will test
# whether or not the packages are installed, and
# will ask the user for a confirmation.
#
def remove_packages( packages_to_remove ):
"""
Uninstall packages, whether or not they
are found in the source.lst (so it can
remove datasets installed from file).
:param packages_to_remove: list of package names
:raises: IOErrors
"""
if packages_to_remove==[]:
raise RuntimeError("[rm] fatal: need packages names to remove.")
packages_really_to_remove=[]
for this_package in packages_to_remove:
if this_package in packages_sources:
#this_data_set_location=os.path.join(dataset_data_path,this_package)
# check if in the installed.lst
# then if directory actually exists
# then if you have rights to remove it
if this_package in installed_packages_list:
this_data_set_location=os.path.join( installed_packages_list[this_package].where,
installed_packages_list[this_package].name )
if os.path.exists(this_data_set_location):
if (file_access_rights(this_data_set_location,os.W_OK)):
# ok, you may have rights to delete it
packages_really_to_remove.append(this_package)
else:
logger.warning("[rm] insufficient rights "
"to remove '{0}'".format(this_package))
else:
logger.warning("[rm] package '{0}' found in config file "
"but not installed".format(this_package))
else:
logger.warning("[rm] package '{0}' "
"not installed".format(this_package))
else:
logger.warning("[rm] unknown package '{0}'".format(this_package))
if packages_really_to_remove!=[]:
logger.info("[rm] the following packages will be removed permanently:")
packages = []
for this_package in packages_really_to_remove:
packages.append(this_package)
logger.info(' '.join(packages))
r = input("Proceed? [yes/N] ")
if r=='y' or r=='yes':
for this_package in packages_really_to_remove:
remove_package( installed_packages_list[this_package], dataset_data_path )
else:
logger.info("[up] Taking '{0}' for no, so there.".format(r))
else:
# ok, nothing to remove, filenames where bad.
pass
########################################
hook_download_filename=""
def progress_bar( blocks, blocksize, totalsize ):
"""
Simple hook to show download progress.
caveat: not that great-looking, fix later to
a cooler progress bar or something.
"""
print("\r[dl] %6.2f%% %s" % (min(totalsize,blocks*blocksize)*100.0/totalsize, hook_download_filename), end='')
sys.stdout.flush()
########################################
def process_arguments():
"""
Processes the installation arguments (from
the command line)
The possible arguments are:
list
lists available datasets from
sources.lst
update
updates sources.lst
upgrade
upgrades datasets that are out
of date
install <dataset1> <dataset2> ... <datasetn>
uses sources.lst to locate the
package and perform the installation
force-install <dataset1> ... <datasetn>
performs an install even if the data
sets seem to be there.
remove <dataset1> <dataset2> ... <datasetn>
removes the dataset
clean
empties package cache (does nothing
for now, because no cache.)
"""
if len(sys.argv)>1:
# due to the relative simplicity of the
# arguments, we won't use optparse (2.3-2.6)
# nor argparse (2.7+), although in the future
# it may pose problems
if sys.argv[1]=="list":
show_packages()
elif sys.argv[1]=="update":
atomic_update( os.path.join(dataset_web,dataset_sources),
os.path.join(dataset_conf_path,dataset_sources),
hook=progress_bar)
elif sys.argv[1]=="upgrade":
upgrade_packages(sys.argv[2:],
hook=progress_bar)
elif sys.argv[1]=="install":
install_packages(sys.argv[2:],
hook=progress_bar)
elif sys.argv[1]=="install-from-file":
install_packages_from_file(sys.argv[2:])
elif sys.argv[1]=="force-install":
install_packages(sys.argv[2:],
force_install=True,
hook=progress_bar)
elif sys.argv[1]=="remove":
remove_packages(sys.argv[2:])
elif sys.argv[1]=="clean":
# does nothing, no cache implemented
# yet.
pass
elif sys.argv[1]=="version":
logger.info(__version__)
else:
raise RuntimeError("[cl] unknown command '%s'" % sys.argv[1])
else:
raise RuntimeError("[cl] missing command")
########################################
if __name__ == "__main__":
# to remove RuntimeWarnings about how
# tempfilename is unsafe.
#
warnings.simplefilter("ignore", RuntimeWarning)
# OK, let's construct the environment
# needed by dataset-get
try:
set_defaults()
except Exception as e:
logger.exception(e)
exit(1) # fail!
try:
process_arguments()
except Exception as e:
logger.exception(e)
exit(1)
|
[] |
[] |
[
"HOME",
"PYLEARN2_DATA_PATH"
] |
[]
|
["HOME", "PYLEARN2_DATA_PATH"]
|
python
| 2 | 0 | |
examples/object_storage/object_storage_bulk_rename.py
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
##########################################################################
# object_storage_bulk_rename.py
#
# @author: Adi Z
#
# Supports Python 3
#
# DISCLAIMER – This is not an official Oracle application, It does not supported by Oracle Support, It should NOT be used for utilization calculation purposes
##########################################################################
# Info:
# Bulk rename with parallel threads
#
##########################################################################
# Application Command line parameters
#
# -c config - Config file section to use (tenancy profile)
# -t profile - Profile in config file, DEFAULT as default
# -p proxy - Set Proxy (i.e. www-proxy-server.com:80)
# -ip - Use Instance Principals for Authentication
# -dt - Use Instance Principals with delegation token for cloud shell
# -sb source_bucket
# -sp source_prefix_include
# -sr source_region
# -textremove - text remove prefix (can be used to remove folder)
# -textappend - text append prefix (can be used to add folder)
##########################################################################
import threading
import time
import queue
import oci
import argparse
import datetime
import sys
import os
##########################################################################
# Pre Main
##########################################################################
# Get Command Line Parser
parser = argparse.ArgumentParser()
parser.add_argument('-t', default="", dest='config_profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-p', default="", dest='proxy', help='Set Proxy (i.e. www-proxy-server.com:80) ')
parser.add_argument('-ip', action='store_true', default=False, dest='is_instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('-dt', action='store_true', default=False, dest='is_delegation_token', help='Use Delegation Token for Authentication')
parser.add_argument('-c', default="", dest='config_file', help="Config File (default=~/.oci/config)")
parser.add_argument('-sb', default="", dest='source_bucket', help='Source Bucket Name')
parser.add_argument('-sp', default="", dest='source_prefix_include', help='Source Prefix Include')
parser.add_argument('-sr', default="", dest='source_region', help='Source Region')
parser.add_argument('-textrem', default="", dest='text_remove', help='text remove prefix (can be used to remove folder)')
parser.add_argument('-textadd', default="", dest='text_append', help='text append prefix (can be used to add folder)')
cmd = parser.parse_args()
if len(sys.argv) < 1:
parser.print_help()
raise SystemExit
if not cmd.source_bucket:
print("Source bucket parameter is required !!!\n")
parser.print_help()
raise SystemExit
if not cmd.text_remove and not cmd.text_append:
print("Text Remove Prefix or Text Append Prefix required !!!\n")
parser.print_help()
raise SystemExit
# Parameters
worker_count = 40
status_interval = 60
base_retry_timeout = 2
max_retry_timeout = 16**2
# global queue
q = queue.Queue()
# Update Variables based on the parameters
config_file = (cmd.config_file if cmd.config_file else oci.config.DEFAULT_LOCATION)
config_profile = (cmd.config_profile if cmd.config_profile else oci.config.DEFAULT_PROFILE)
# Global Variables
object_storage_client = None
source_namespace = ""
source_bucket = cmd.source_bucket
source_prefix = cmd.source_prefix_include
source_region = cmd.source_region
source_text_remove = cmd.text_remove
source_text_append = cmd.text_append
if source_text_remove:
source_prefix = source_text_remove
##########################################################################
# Create signer for Authentication
# Input - config_file, config_profile and is_instance_principals and is_delegation_token
# Output - config and signer objects
##########################################################################
def create_signer(config_file, config_profile, is_instance_principals, is_delegation_token):
# if instance principals authentications
if is_instance_principals:
try:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
config = {'region': signer.region, 'tenancy': signer.tenancy_id}
return config, signer
except Exception:
print_header("Error obtaining instance principals certificate, aborting")
raise SystemExit
# -----------------------------
# Delegation Token
# -----------------------------
elif is_delegation_token:
try:
# check if env variables OCI_CONFIG_FILE, OCI_CONFIG_PROFILE exist and use them
env_config_file = os.environ.get('OCI_CONFIG_FILE')
env_config_section = os.environ.get('OCI_CONFIG_PROFILE')
# check if file exist
if env_config_file is None or env_config_section is None:
print("*** OCI_CONFIG_FILE and OCI_CONFIG_PROFILE env variables not found, abort. ***")
print("")
raise SystemExit
config = oci.config.from_file(env_config_file, env_config_section)
delegation_token_location = config["delegation_token_file"]
with open(delegation_token_location, 'r') as delegation_token_file:
delegation_token = delegation_token_file.read().strip()
# get signer from delegation token
signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token)
return config, signer
except KeyError:
print("* Key Error obtaining delegation_token_file")
raise SystemExit
except Exception:
raise
# -----------------------------
# config file authentication
# -----------------------------
else:
config = oci.config.from_file(
(config_file if config_file else oci.config.DEFAULT_LOCATION),
(config_profile if config_profile else oci.config.DEFAULT_PROFILE)
)
signer = oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=oci.config.get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
return config, signer
##############################################################################
# get time
##############################################################################
def get_time(full=False):
if full:
return str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
else:
return str(datetime.datetime.now().strftime("%H:%M:%S"))
##########################################################################
# Print header centered
##########################################################################
def print_header(name):
chars = int(90)
print("")
print('#' * chars)
print("#" + name.center(chars - 2, " ") + "#")
print('#' * chars)
##########################################################################
# Print Info
##########################################################################
def print_command_info():
print_header("Running Object Storage Bulk Rename")
print("Written by Adi Z, March 2021")
print("Starts at : " + get_time(full=True))
print("Command Line : " + ' '.join(x for x in sys.argv[1:]))
print("Source Namespace : " + source_namespace)
print("Source Bucket : " + source_bucket)
print("Source Prefix Include : " + source_prefix)
print("Text Remove Prefix : " + source_text_remove)
print("Text Append Prefix : " + source_text_append)
##############################################################################
# Worker
##############################################################################
def worker():
while True:
object_ = q.get()
interval_exp = base_retry_timeout
while True:
response = None
try:
response = rename_object(source_namespace, source_bucket, object_)
break
except Exception as e:
if e.status == 400:
break
if interval_exp > max_retry_timeout:
print(" ERROR: Failed to request rename of %s" % (object_))
raise
if response:
print(" Received %s from API for object %s, will wait %s seconds before retrying." % (response.status, object_, interval_exp))
else:
print(" Received error from API for object %s, will wait %s seconds before retrying." % (object_, interval_exp))
time.sleep(interval_exp)
interval_exp **= 2
continue
q.task_done()
##############################################################################
# Add object to Q
##############################################################################
def add_objects_to_queue(ns, source_bucket):
global q
count = 0
next_starts_with = None
while True:
response = object_storage_client.list_objects(ns, source_bucket, start=next_starts_with, prefix=source_prefix, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
next_starts_with = response.data.next_start_with
for object_ in response.data.objects:
q.put(object_.name)
count += 1
if count % 100000 == 0:
print(get_time() + " - Added " + str(count) + " files to queue...")
if not next_starts_with:
break
return count
##############################################################################
# rename object function
##############################################################################
def rename_object(ns, bucket, object_name):
new_name = object_name
# if remove prefix name
if source_text_remove:
if object_name.startswith(source_text_remove):
new_name = object_name[len(source_text_remove):]
# if append prefix name
if source_text_append:
new_name = source_text_append + new_name
rename_request = oci.object_storage.models.RenameObjectDetails()
rename_request.source_name = object_name
rename_request.new_name = new_name
return object_storage_client.rename_object(ns, bucket, rename_request)
##############################################################################
# connect to object storage
##############################################################################
def connect_to_object_storage():
global source_namespace
global object_storage_client
# get signer
config, signer = create_signer(cmd.config_file, cmd.config_profile, cmd.is_instance_principals, cmd.is_delegation_token)
# if region is specified
if source_region:
config['region'] = source_region
try:
# connect and fetch namespace
print("\nConnecting to Object Storage Service...")
object_storage_client = oci.object_storage.ObjectStorageClient(config, signer=signer)
if cmd.proxy:
object_storage_client.base_client.session.proxies = {'https': cmd.proxy}
# retrieve namespace from object storage
source_namespace = object_storage_client.get_namespace(retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
print("Succeed.")
except Exception as e:
print("\nError connecting to Object Storage - " + str(e))
raise SystemExit
##############################################################################
# Main
##############################################################################
def main():
# connect to object storage
connect_to_object_storage()
# global parameters
global source_namespace
global object_storage_client
# get signer
config, signer = create_signer(cmd.config_file, cmd.config_profile, cmd.is_instance_principals, cmd.is_delegation_token)
try:
# connect and fetch namespace
print("\nConnecting to Object Storage Service...")
object_storage_client = oci.object_storage.ObjectStorageClient(config, signer=signer)
if cmd.proxy:
object_storage_client.base_client.session.proxies = {'https': cmd.proxy}
# retrieve namespace from object storage
source_namespace = object_storage_client.get_namespace(retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
print("Succeed.")
except Exception as e:
raise RuntimeError("\nError extracting namespace - " + str(e))
# command info
print_command_info()
print_header("Start Processing")
print(get_time() + " - Creating %s workers." % (worker_count))
for i in range(worker_count):
w = threading.Thread(target=worker)
w.daemon = True
w.start()
print(get_time() + " - Getting list of objects from source source_bucket (%s). Rename will start immediately." % (source_bucket))
count = add_objects_to_queue(source_namespace, source_bucket)
print(get_time() + " - Enqueued %s objects to be Renamed" % (count))
while count > 0:
print(get_time() + " - Waiting %s seconds before checking status." % (status_interval))
time.sleep(status_interval)
if q.qsize() == 0:
print(get_time() + " - Rename of all objects has been requested.")
break
else:
print(get_time() + " - %s object renames remaining to requested." % (q.qsize()))
q.join()
print_header("Completed")
print("Completed at : " + get_time(True))
##############################################################################
# Execute
##############################################################################
if __name__ == '__main__':
main()
|
[] |
[] |
[
"OCI_CONFIG_FILE",
"OCI_CONFIG_PROFILE"
] |
[]
|
["OCI_CONFIG_FILE", "OCI_CONFIG_PROFILE"]
|
python
| 2 | 0 | |
tests/test_bigquery.py
|
import unittest
import os
import threading
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from http.server import BaseHTTPRequestHandler, HTTPServer
from google.cloud import bigquery
from google.auth.exceptions import DefaultCredentialsError
from kaggle_gcp import KaggleKernelCredentials, PublicBigqueryClient
class TestBigQuery(unittest.TestCase):
def _test_proxy(self, client, should_use_proxy):
class HTTPHandler(BaseHTTPRequestHandler):
called = False
header_found = False
def do_HEAD(s):
s.send_response(200)
def do_GET(s):
HTTPHandler.called = True
HTTPHandler.header_found = any(k for k in s.headers if k == "X-KAGGLE-PROXY-DATA" and s.headers[k] == "test-key")
s.send_response(200)
server_address = urlparse(os.getenv('KAGGLE_DATA_PROXY_URL'))
with HTTPServer((server_address.hostname, server_address.port), HTTPHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
try:
for ds in client.list_datasets(): pass
except:
pass
httpd.shutdown()
if should_use_proxy:
self.assertTrue(HTTPHandler.called, msg="Fake server did not receive a request from the BQ client.")
self.assertTrue(HTTPHandler.header_found, msg="X-KAGGLE-PROXY-DATA header was missing from the BQ request.")
else:
self.assertFalse(HTTPHandler.called, msg="Fake server was called from the BQ client, but should not have been.")
def test_proxy_using_library(self):
env = EnvironmentVarGuard()
env.unset('KAGGLE_BQ_USER_JWT')
with env:
client = PublicBigqueryClient()
self._test_proxy(client, should_use_proxy=True)
def test_proxy_no_project(self):
env = EnvironmentVarGuard()
env.unset('KAGGLE_BQ_USER_JWT')
with env:
client = bigquery.Client()
self._test_proxy(client, should_use_proxy=True)
def test_project_with_connected_account(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_BQ_USER_JWT', 'foobar')
with env:
client = bigquery.Client(project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_proxy(client, should_use_proxy=False)
def test_simultaneous_clients(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_BQ_USER_JWT', 'foobar')
with env:
proxy_client = bigquery.Client()
self._test_proxy(proxy_client, should_use_proxy=True)
bq_client = bigquery.Client(project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_proxy(bq_client, should_use_proxy=False)
# Verify that proxy client is still going to proxy to ensure global Connection
# isn't being modified.
self._test_proxy(proxy_client, should_use_proxy=True)
def test_no_project_with_connected_account(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_BQ_USER_JWT', 'foobar')
with env:
with self.assertRaises(DefaultCredentialsError):
# TODO(vimota): Handle this case, either default to Kaggle Proxy or use some default project
# by the user or throw a custom exception.
client = bigquery.Client(credentials=KaggleKernelCredentials())
self._test_proxy(client, should_use_proxy=False)
|
[] |
[] |
[
"KAGGLE_DATA_PROXY_URL"
] |
[]
|
["KAGGLE_DATA_PROXY_URL"]
|
python
| 1 | 0 | |
docker/dcrwallet/main.go
|
package main
import (
"context"
"fmt"
"io/ioutil"
"os"
pb "decred.org/dcrwallet/rpc/walletrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
var certificateFile = "/rpc/rpc.cert"
func main() {
// Load credentials
creds, err := credentials.NewClientTLSFromFile(certificateFile, "localhost")
if err != nil {
fmt.Println(err)
return
}
// Create te grpc connection
conn, err := grpc.Dial("localhost:19558", grpc.WithTransportCredentials(creds))
if err != nil {
fmt.Println(err)
return
}
defer conn.Close()
// Init the loader service client used for
// create a new wallet
lc := pb.NewWalletLoaderServiceClient(conn)
createWalletRequest := &pb.CreateWalletRequest{
PrivatePassphrase: []byte(os.Getenv("WALLET_PASS")),
Seed: []byte(os.Getenv("WALLET_SEED")),
}
// Create/import a wallet
_, err = lc.CreateWallet(context.Background(), createWalletRequest)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("\033[1;35mWallet created!\033[0m")
// Init the wallet service client to request
// an new address for past wallet imported.
c := pb.NewWalletServiceClient(conn)
nextAddressRequest := &pb.NextAddressRequest{
Account: 0,
Kind: pb.NextAddressRequest_BIP0044_EXTERNAL,
}
nextAddressResponse, err := c.NextAddress(context.Background(), nextAddressRequest)
if err != nil {
fmt.Println(err)
return
}
miningAddress := nextAddressResponse.GetAddress()
fmt.Printf("\033[1;34mNew address generated: %v\n\033[0m", miningAddress)
// Create the dcrd config file with new mining address
data := []byte(fmt.Sprintf("miningaddr=%v", miningAddress))
err = ioutil.WriteFile("/data/dcrd.conf", data, 0644)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("\033[1;35mdcrd.conf created!\033[0m")
return
}
|
[
"\"WALLET_PASS\"",
"\"WALLET_SEED\""
] |
[] |
[
"WALLET_SEED",
"WALLET_PASS"
] |
[]
|
["WALLET_SEED", "WALLET_PASS"]
|
go
| 2 | 0 | |
vendor/github.com/openshift/library-go/pkg/operator/v1alpha1staticpod/controller/installer/installer_controller.go
|
package installer
import (
"fmt"
"os"
"reflect"
"strconv"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1"
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
"github.com/openshift/library-go/pkg/operator/v1alpha1helpers"
"github.com/openshift/library-go/pkg/operator/v1alpha1staticpod/controller/common"
)
const installerControllerWorkQueueKey = "key"
type InstallerController struct {
targetNamespace, staticPodName string
// configMaps is the list of configmaps that are directly copied.A different actor/controller modifies these.
// the first element should be the configmap that contains the static pod manifest
configMaps []string
// secrets is a list of secrets that are directly copied for the current values. A different actor/controller modifies these.
secrets []string
// command is the string to use for the installer pod command
command []string
operatorConfigClient common.OperatorClient
kubeClient kubernetes.Interface
// queue only ever has one item, but it has nice error handling backoff/retry semantics
queue workqueue.RateLimitingInterface
// installerPodImageFn returns the image name for the installer pod
installerPodImageFn func() string
}
// staticPodState is the status of a static pod that has been installed to a node.
type staticPodState int
const (
// staticPodStatePending means that the installed static pod is not up yet.
staticPodStatePending = staticPodState(iota)
// staticPodStateReady means that the installed static pod is ready.
staticPodStateReady
// staticPodStateFailed means that the static pod installation of a node has failed.
staticPodStateFailed
)
const deploymentIDLabel = "deployment-id"
func NewInstallerController(
targetNamespace, staticPodName string,
configMaps []string,
secrets []string,
command []string,
kubeInformersForTargetNamespace informers.SharedInformerFactory,
operatorConfigClient common.OperatorClient,
kubeClient kubernetes.Interface,
) *InstallerController {
c := &InstallerController{
targetNamespace: targetNamespace,
staticPodName: staticPodName,
configMaps: configMaps,
secrets: secrets,
command: command,
operatorConfigClient: operatorConfigClient,
kubeClient: kubeClient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "InstallerController"),
installerPodImageFn: getInstallerPodImageFromEnv,
}
operatorConfigClient.Informer().AddEventHandler(c.eventHandler())
kubeInformersForTargetNamespace.Core().V1().Pods().Informer().AddEventHandler(c.eventHandler())
return c
}
func (c *InstallerController) getStaticPodState(nodeName string) (state staticPodState, deploymentID string, errors []string, err error) {
pod, err := c.kubeClient.CoreV1().Pods(c.targetNamespace).Get(mirrorPodNameForNode(c.staticPodName, nodeName), metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return staticPodStatePending, "", nil, nil
}
return staticPodStatePending, "", nil, err
}
switch pod.Status.Phase {
case corev1.PodRunning, corev1.PodSucceeded:
for _, c := range pod.Status.Conditions {
if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
return staticPodStateReady, pod.Labels[deploymentIDLabel], nil, nil
}
}
case corev1.PodFailed:
return staticPodStateFailed, pod.Labels[deploymentIDLabel], []string{pod.Status.Message}, nil
}
return staticPodStatePending, "", nil, nil
}
func (c *InstallerController) nodeToStartDeploymentWith(nodes []operatorv1alpha1.NodeStatus) (int, error) {
// find upgrading node as this will be the first to start new deployment (to minimize number of down nodes)
startNode := 0
foundUpgradingNode := false
for i := range nodes {
if nodes[i].TargetDeploymentGeneration != 0 {
startNode = i
foundUpgradingNode = true
break
}
}
// otherwise try to find a node that is not ready regarding its currently reported deployment id
if !foundUpgradingNode {
for i := range nodes {
currNodeState := &nodes[i]
state, deploymentID, _, err := c.getStaticPodState(currNodeState.NodeName)
if err != nil {
return 0, err
}
if state != staticPodStateReady || deploymentID != strconv.Itoa(int(currNodeState.CurrentDeploymentGeneration)) {
startNode = i
break
}
}
}
return startNode, nil
}
// createInstallerController takes care of creating content for the static pods to deploy.
// returns whether or not requeue and if an error happened when updating status. Normally it updates status itself.
func (c *InstallerController) createInstallerController(operatorSpec *operatorv1alpha1.OperatorSpec, originalOperatorStatus *operatorv1alpha1.StaticPodOperatorStatus, resourceVersion string) (bool, error) {
operatorStatus := originalOperatorStatus.DeepCopy()
if len(operatorStatus.NodeStatuses) == 0 {
return false, nil
}
// start with node which is in worst state (instead of terminating healthy pods first)
startNode, err := c.nodeToStartDeploymentWith(operatorStatus.NodeStatuses)
if err != nil {
return true, err
}
for l := 0; l < len(operatorStatus.NodeStatuses); l++ {
i := (startNode + l) % len(operatorStatus.NodeStatuses)
var currNodeState *operatorv1alpha1.NodeStatus
var prevNodeState *operatorv1alpha1.NodeStatus
currNodeState = &operatorStatus.NodeStatuses[i]
if l > 0 {
prev := (startNode + l - 1) % len(operatorStatus.NodeStatuses)
prevNodeState = &operatorStatus.NodeStatuses[prev]
}
// if we are in a transition, check to see if our installer pod completed
if currNodeState.TargetDeploymentGeneration > currNodeState.CurrentDeploymentGeneration {
if err := c.ensureInstallerPod(currNodeState.NodeName, operatorSpec, currNodeState.TargetDeploymentGeneration); err != nil {
return true, err
}
pendingNewDeployment := operatorStatus.LatestAvailableDeploymentGeneration > currNodeState.TargetDeploymentGeneration
newCurrNodeState, err := c.newNodeStateForInstallInProgress(currNodeState, pendingNewDeployment)
if err != nil {
return true, err
}
// if we make a change to this status, we want to write it out to the API before we commence work on the next node.
// it's an extra write/read, but it makes the state debuggable from outside this process
if !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) {
glog.Infof("%q moving to %v", currNodeState.NodeName, spew.Sdump(*newCurrNodeState))
operatorStatus.NodeStatuses[i] = *newCurrNodeState
if !reflect.DeepEqual(originalOperatorStatus, operatorStatus) {
_, updateError := c.operatorConfigClient.UpdateStatus(resourceVersion, operatorStatus)
return false, updateError
}
} else {
glog.V(2).Infof("%q is in transition to %d, but has not made progress", currNodeState.NodeName, currNodeState.TargetDeploymentGeneration)
}
break
}
deploymentIDToStart := c.getDeploymentIDToStart(currNodeState, prevNodeState, operatorStatus)
if deploymentIDToStart == 0 {
glog.V(4).Infof("%q does not need update", currNodeState.NodeName)
continue
}
glog.Infof("%q needs to deploy to %d", currNodeState.NodeName, deploymentIDToStart)
newCurrNodeState := currNodeState.DeepCopy()
newCurrNodeState.TargetDeploymentGeneration = deploymentIDToStart
newCurrNodeState.LastFailedDeploymentErrors = nil
// if we make a change to this status, we want to write it out to the API before we commence work on the next node.
// it's an extra write/read, but it makes the state debuggable from outside this process
if !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) {
glog.Infof("%q moving to %v", currNodeState.NodeName, spew.Sdump(*newCurrNodeState))
operatorStatus.NodeStatuses[i] = *newCurrNodeState
if !reflect.DeepEqual(originalOperatorStatus, operatorStatus) {
_, updateError := c.operatorConfigClient.UpdateStatus(resourceVersion, operatorStatus)
return false, updateError
}
}
break
}
v1alpha1helpers.SetOperatorCondition(&operatorStatus.Conditions, operatorv1alpha1.OperatorCondition{
Type: "InstallerControllerFailing",
Status: operatorv1alpha1.ConditionFalse,
})
if !reflect.DeepEqual(originalOperatorStatus, operatorStatus) {
_, updateError := c.operatorConfigClient.UpdateStatus(resourceVersion, operatorStatus)
if updateError != nil {
return true, updateError
}
}
return false, nil
}
// newNodeStateForInstallInProgress returns the new NodeState or error
func (c *InstallerController) newNodeStateForInstallInProgress(currNodeState *operatorv1alpha1.NodeStatus, newDeploymentPending bool) (*operatorv1alpha1.NodeStatus, error) {
ret := currNodeState.DeepCopy()
installerPod, err := c.kubeClient.CoreV1().Pods(c.targetNamespace).Get(getInstallerPodName(currNodeState.TargetDeploymentGeneration, currNodeState.NodeName), metav1.GetOptions{})
if apierrors.IsNotFound(err) {
ret.LastFailedDeploymentGeneration = currNodeState.TargetDeploymentGeneration
ret.TargetDeploymentGeneration = currNodeState.CurrentDeploymentGeneration
ret.LastFailedDeploymentErrors = []string{err.Error()}
return ret, nil
}
if err != nil {
return nil, err
}
failed := false
errors := []string{}
switch installerPod.Status.Phase {
case corev1.PodSucceeded:
if newDeploymentPending {
// stop early, don't wait for ready static pod because a new deployment is waiting
failed = true
errors = append(errors, "static pod has been installed, but is not ready while new deployment is pending")
break
}
state, deploymentID, failedErrors, err := c.getStaticPodState(currNodeState.NodeName)
if err != nil {
return nil, err
}
if deploymentID != strconv.Itoa(int(currNodeState.TargetDeploymentGeneration)) {
// new updated pod to be launched
break
}
switch state {
case staticPodStateFailed:
failed = true
errors = failedErrors
case staticPodStateReady:
ret.CurrentDeploymentGeneration = currNodeState.TargetDeploymentGeneration
ret.TargetDeploymentGeneration = 0
ret.LastFailedDeploymentGeneration = 0
ret.LastFailedDeploymentErrors = nil
return ret, nil
}
case corev1.PodFailed:
failed = true
for _, containerStatus := range installerPod.Status.ContainerStatuses {
if containerStatus.State.Terminated != nil && len(containerStatus.State.Terminated.Message) > 0 {
errors = append(errors, fmt.Sprintf("%s: %s", containerStatus.Name, containerStatus.State.Terminated.Message))
}
}
}
if failed {
ret.LastFailedDeploymentGeneration = currNodeState.TargetDeploymentGeneration
ret.TargetDeploymentGeneration = 0
if len(errors) == 0 {
errors = append(errors, "no detailed termination message, see `oc get -n %q pods/%q -oyaml`", installerPod.Namespace, installerPod.Name)
}
ret.LastFailedDeploymentErrors = errors
return ret, nil
}
return ret, nil
}
// getDeploymentIDToStart returns the deploymentID we need to start or zero if none
func (c *InstallerController) getDeploymentIDToStart(currNodeState, prevNodeState *operatorv1alpha1.NodeStatus, operatorStatus *operatorv1alpha1.StaticPodOperatorStatus) int32 {
if prevNodeState == nil {
currentAtLatest := currNodeState.CurrentDeploymentGeneration == operatorStatus.LatestAvailableDeploymentGeneration
failedAtLatest := currNodeState.LastFailedDeploymentGeneration == operatorStatus.LatestAvailableDeploymentGeneration
if !currentAtLatest && !failedAtLatest {
return operatorStatus.LatestAvailableDeploymentGeneration
}
return 0
}
prevFinished := prevNodeState.TargetDeploymentGeneration == 0
prevInTransition := prevNodeState.CurrentDeploymentGeneration != prevNodeState.TargetDeploymentGeneration
if prevInTransition && !prevFinished {
return 0
}
prevAhead := prevNodeState.CurrentDeploymentGeneration > currNodeState.CurrentDeploymentGeneration
failedAtPrev := currNodeState.LastFailedDeploymentGeneration == prevNodeState.CurrentDeploymentGeneration
if prevAhead && !failedAtPrev {
return prevNodeState.CurrentDeploymentGeneration
}
return 0
}
func getInstallerPodName(deploymentID int32, nodeName string) string {
return fmt.Sprintf("installer-%d-%s", deploymentID, nodeName)
}
// ensureInstallerPod creates the installer pod with the secrets required to if it does not exist already
func (c *InstallerController) ensureInstallerPod(nodeName string, operatorSpec *operatorv1alpha1.OperatorSpec, deploymentID int32) error {
required := resourceread.ReadPodV1OrDie([]byte(installerPod))
switch corev1.PullPolicy(operatorSpec.ImagePullPolicy) {
case corev1.PullAlways, corev1.PullIfNotPresent, corev1.PullNever:
required.Spec.Containers[0].ImagePullPolicy = corev1.PullPolicy(operatorSpec.ImagePullPolicy)
case "":
default:
return fmt.Errorf("invalid imagePullPolicy specified: %v", operatorSpec.ImagePullPolicy)
}
required.Name = getInstallerPodName(deploymentID, nodeName)
required.Namespace = c.targetNamespace
required.Spec.NodeName = nodeName
required.Spec.Containers[0].Image = c.installerPodImageFn()
required.Spec.Containers[0].Command = c.command
required.Spec.Containers[0].Args = append(required.Spec.Containers[0].Args,
fmt.Sprintf("-v=%d", operatorSpec.Logging.Level),
fmt.Sprintf("--deployment-id=%d", deploymentID),
fmt.Sprintf("--namespace=%s", c.targetNamespace),
fmt.Sprintf("--pod=%s", c.configMaps[0]),
fmt.Sprintf("--resource-dir=%s", "/etc/kubernetes/static-pod-resources"),
fmt.Sprintf("--pod-manifest-dir=%s", "/etc/kubernetes/manifests"),
)
for _, name := range c.configMaps {
required.Spec.Containers[0].Args = append(required.Spec.Containers[0].Args, fmt.Sprintf("--configmaps=%s", name))
}
for _, name := range c.secrets {
required.Spec.Containers[0].Args = append(required.Spec.Containers[0].Args, fmt.Sprintf("--secrets=%s", name))
}
if _, err := c.kubeClient.CoreV1().Pods(c.targetNamespace).Create(required); err != nil && !apierrors.IsAlreadyExists(err) {
glog.Errorf("failed to create pod on node %q for %s: %v", nodeName, resourceread.WritePodV1OrDie(required), err)
return err
}
return nil
}
func getInstallerPodImageFromEnv() string {
return os.Getenv("OPERATOR_IMAGE")
}
func (c InstallerController) sync() error {
operatorSpec, originalOperatorStatus, resourceVersion, err := c.operatorConfigClient.Get()
if err != nil {
return err
}
operatorStatus := originalOperatorStatus.DeepCopy()
switch operatorSpec.ManagementState {
case operatorv1alpha1.Unmanaged:
return nil
case operatorv1alpha1.Removed:
// TODO probably just fail. Static pod managers can't be removed.
return nil
}
requeue, syncErr := c.createInstallerController(operatorSpec, operatorStatus, resourceVersion)
if requeue && syncErr == nil {
return fmt.Errorf("synthetic requeue request")
}
err = syncErr
if err != nil {
v1alpha1helpers.SetOperatorCondition(&operatorStatus.Conditions, operatorv1alpha1.OperatorCondition{
Type: operatorv1alpha1.OperatorStatusTypeFailing,
Status: operatorv1alpha1.ConditionTrue,
Reason: "StatusUpdateError",
Message: err.Error(),
})
if !reflect.DeepEqual(originalOperatorStatus, operatorStatus) {
if _, updateError := c.operatorConfigClient.UpdateStatus(resourceVersion, operatorStatus); updateError != nil {
glog.Error(updateError)
}
}
return err
}
return nil
}
// Run starts the kube-apiserver and blocks until stopCh is closed.
func (c *InstallerController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
glog.Infof("Starting InstallerController")
defer glog.Infof("Shutting down InstallerController")
// doesn't matter what workers say, only start one.
go wait.Until(c.runWorker, time.Second, stopCh)
<-stopCh
}
func (c *InstallerController) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *InstallerController) processNextWorkItem() bool {
dsKey, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(dsKey)
err := c.sync()
if err == nil {
c.queue.Forget(dsKey)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
c.queue.AddRateLimited(dsKey)
return true
}
// eventHandler queues the operator to check spec and status
func (c *InstallerController) eventHandler() cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { c.queue.Add(installerControllerWorkQueueKey) },
UpdateFunc: func(old, new interface{}) { c.queue.Add(installerControllerWorkQueueKey) },
DeleteFunc: func(obj interface{}) { c.queue.Add(installerControllerWorkQueueKey) },
}
}
func mirrorPodNameForNode(staticPodName, nodeName string) string {
return staticPodName + "-" + nodeName
}
const installerPod = `apiVersion: v1
kind: Pod
metadata:
namespace: <namespace>
name: installer-<deployment-id>-<nodeName>
labels:
app: installer
spec:
serviceAccountName: installer-sa
containers:
- name: installer
image: ${IMAGE}
imagePullPolicy: Always
securityContext:
privileged: true
runAsUser: 0
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/kubernetes/
name: kubelet-dir
restartPolicy: Never
securityContext:
runAsUser: 0
volumes:
- hostPath:
path: /etc/kubernetes/
name: kubelet-dir
`
|
[
"\"OPERATOR_IMAGE\""
] |
[] |
[
"OPERATOR_IMAGE"
] |
[]
|
["OPERATOR_IMAGE"]
|
go
| 1 | 0 | |
data/build_ravdess.py
|
import os
import glob
import math
import numpy as np
import pickle
import ipdb
import librosa
import librosa.display
import itertools
import scipy
import urllib.request
import zipfile
def build():
data_dir = os.getenv('DATA_PATH')
assert data_dir is not None
data_path = os.path.join(data_dir, 'ravdess_speech')
# urllib.request.urlretrieve("https://github.com/brendenlake/omniglot/archive/refs/heads/master.zip", os.path.join(data_dir, 'Audio_Speech_Actors_01-24.zip'))
with zipfile.ZipFile(os.path.join(data_dir, 'Audio_Speech_Actors_01-24.zip'), 'r') as zip_ref:
os.mkdir(os.path.join(data_dir, 'ravdess_speech'))
zip_ref.extractall(os.path.join(data_dir, 'ravdess_speech'))
_n_mels = 128
_n_fft = 2048
_hop_len = 512
_duration = 3
_mel_len = 128
_zoom = 0.5
emotions = {2: (0, 'calm'),
5: (1, 'angry'),
6: (2, 'fearful'),
8: (3, 'surprised')}
relation_set = {}
relation_map = np.ones((len(emotions), len(emotions)))*-1
for i, (a,b) in enumerate(itertools.product(list(emotions.keys()), list(emotions.keys()))):
relation_map[emotions[a][0], emotions[b][0]] = i
relation_set[i] = f'{emotions[a][1]}-{emotions[b][1]}'
data = np.zeros((24*2*2, _n_mels, _mel_len, len(emotions)))
data = np.zeros((24*2*2,
int(_n_mels*_zoom),
int(_mel_len*_zoom),
len(emotions)))
eps = 1e-30
amp_bias = -1*np.log(eps)
amp_scale = 5./amp_bias
for fn in glob.glob(data_path+'/*/*.wav'):
_, _, _emotion, _intensity, _stmt, _rep, _id = [int(t) for t in fn.split('/')[-1].split('.')[0].split('-')]
if _emotion in emotions and _intensity==2:
_y, _sr = librosa.load(fn)
_y, _idx = librosa.effects.trim(_y, top_db=25)
if _y.shape[0] >= _duration*_sr:
_y = _y[:_duration*_sr]
else:
_y = np.pad(_y, (0, _duration*_sr - _y.shape[0]), "constant")
_s = librosa.feature.melspectrogram(_y, _sr,
n_mels=_n_mels,
n_fft=_n_fft,
hop_length=_hop_len,
power=2.0)
_s_db = librosa.amplitude_to_db(_s + 1e-8, ref=np.max)
_s_db = _s_db[:,:_mel_len]
_s_db = scipy.ndimage.zoom(_s_db, _zoom)
_s_db = np.clip((_s_db + amp_bias)*amp_scale, 0, None)
idx = (_stmt-1)*24*2 + (_id-1)*2 + (_rep-1)
data[idx, ..., emotions[_emotion][0]] = _s_db[::-1, :]
data = {'relation_set':relation_set,
'relation_map':relation_map,
'data':data}
pickle.dump(data, open(os.path.join(data_dir,'ravdess.pkl'), 'wb'))
if __name__ == '__main__':
build()
|
[] |
[] |
[
"DATA_PATH"
] |
[]
|
["DATA_PATH"]
|
python
| 1 | 0 | |
vendor/github.com/heroku/x/hmetrics/examples_test.go
|
/* Copyright (c) 2018 Salesforce
* All rights reserved.
* Licensed under the BSD 3-Clause license.
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package hmetrics_test
import (
"context"
"log"
"net/http"
"os"
"github.com/heroku/x/hmetrics"
)
func ExampleReport_basic() {
// Don't care about canceling or errors
go hmetrics.Report(context.Background(), hmetrics.DefaultEndpoint, nil)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
http.ListenAndServe(":"+port, nil)
}
func ExampleReport_logging() {
go func() {
if err := hmetrics.Report(context.Background(), hmetrics.DefaultEndpoint, func(err error) error {
log.Println("Error reporting metrics to heroku:", err)
return nil
}); err != nil {
log.Fatal("Error starting hmetrics reporting:", err)
}
}()
}
func ExampleReport_advanced() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
type fataler interface {
Fatal() bool
}
for { // try again and again on non fatal errors
if err := hmetrics.Report(ctx, hmetrics.DefaultEndpoint, func(err error) error {
log.Println("Error reporting metrics to heroku:", err)
return nil
}); err != nil {
if f, ok := err.(fataler); ok && f.Fatal() {
log.Fatal(err)
}
log.Println(err)
}
}
}()
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
http.ListenAndServe(":"+port, nil)
}
|
[
"\"PORT\"",
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
go-time/main.go
|
package main
import (
"encoding/json"
"net/http"
"os"
"time"
)
func handler(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
t := time.Now()
enc := json.NewEncoder(w)
err := enc.Encode(map[string]interface{}{
"now": t,
})
if err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
http.HandleFunc("/", handler)
http.ListenAndServe(":"+port, nil)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
sl2ipmiview.py
|
import argparse
import os
import sys
import itertools
import SoftLayer
import Crypto.Cipher.AES
def encrypt_password(hostname, password):
"""IPMIView stores its passwords encrypted with AES-128-CBC using
an all-zeros IV and the hostname as the key.
SO SECURE!"""
iv = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if len(hostname) < 16:
key = hostname + ('\x00' * (16 - len(hostname)))
else:
key = hostname[:16]
cipher = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CBC, iv)
if len(password) % 16 != 0:
password += ('\x00' * (16 - (len(password) % 16)))
return cipher.encrypt(password).encode('hex')
def hostname_frags(s):
return tuple(
(int(''.join(chrs)) if is_digits else ''.join(chrs))
for (is_digits, chrs) in
itertools.groupby(s, str.isdigit)
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--username', default=os.environ.get('SL_USERNAME', None),
required='SL_USERNAME' not in os.environ, help='SoftLayer username (default $SL_USERNAME)')
parser.add_argument('--api-key', default=os.environ.get('SL_API_KEY', None),
required='SL_API_KEY' not in os.environ, help='SoftLayer API key (default $SL_API_KEY)')
parser.add_argument('-A', '--account-file', default='account.properties', type=argparse.FileType('w'),
help='Path to write account.properties file to')
parser.add_argument('-I', '--ipmiview-file', default='IPMIView.properties', type=argparse.FileType('w'),
help='Path to write IPMIView.properties file to')
args = parser.parse_args()
client = SoftLayer.create_client_from_env(args.username, args.api_key)
hardware = SoftLayer.managers.hardware.HardwareManager(client)
for host in sorted(hardware.list_hardware(), key=lambda d: hostname_frags(d.get('hostname', None))):
if 'globalIdentifier' not in host:
continue
hwinfo = hardware.get_hardware(host['globalIdentifier'])
args.ipmiview_file.write('{hostname}={mgmt_ip}:{hostname}.{domain}\n'.format(
hostname=hwinfo['hostname'],
mgmt_ip=hwinfo['networkManagementIpAddress'],
domain=hwinfo['domain']
))
if len(hwinfo['remoteManagementAccounts']) > 0:
acct = hwinfo['remoteManagementAccounts'][0]
args.account_file.write('{hostname}={username},{password}\n'.format(
hostname=hwinfo['hostname'],
username=acct['username'],
password=encrypt_password(hwinfo['hostname'], acct['password'])
))
sys.exit(main())
|
[] |
[] |
[
"SL_API_KEY",
"SL_USERNAME"
] |
[]
|
["SL_API_KEY", "SL_USERNAME"]
|
python
| 2 | 0 | |
cmd/integrationArtifactGetMplStatus_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type integrationArtifactGetMplStatusOptions struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
IntegrationFlowID string `json:"integrationFlowId,omitempty"`
Platform string `json:"platform,omitempty"`
Host string `json:"host,omitempty"`
OAuthTokenProviderURL string `json:"oAuthTokenProviderUrl,omitempty"`
}
type integrationArtifactGetMplStatusCommonPipelineEnvironment struct {
custom struct {
iFlowMplStatus string
}
}
func (p *integrationArtifactGetMplStatusCommonPipelineEnvironment) persist(path, resourceName string) {
content := []struct {
category string
name string
value interface{}
}{
{category: "custom", name: "iFlowMplStatus", value: p.custom.iFlowMplStatus},
}
errCount := 0
for _, param := range content {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting piper environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Piper environment")
}
}
// IntegrationArtifactGetMplStatusCommand Get the MPL status of an integration flow
func IntegrationArtifactGetMplStatusCommand() *cobra.Command {
const STEP_NAME = "integrationArtifactGetMplStatus"
metadata := integrationArtifactGetMplStatusMetadata()
var stepConfig integrationArtifactGetMplStatusOptions
var startTime time.Time
var commonPipelineEnvironment integrationArtifactGetMplStatusCommonPipelineEnvironment
var createIntegrationArtifactGetMplStatusCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Get the MPL status of an integration flow",
Long: `With this step you can obtain information about the Message Processing Log (MPL) status of integration flow using OData API. Learn more about the SAP Cloud Integration remote API for getting MPL status messages processed of an deployed integration artifact [here](https://help.sap.com/viewer/368c481cd6954bdfa5d0435479fd4eaf/Cloud/en-US/d1679a80543f46509a7329243b595bdb.html).`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
integrationArtifactGetMplStatus(stepConfig, &telemetryData, &commonPipelineEnvironment)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addIntegrationArtifactGetMplStatusFlags(createIntegrationArtifactGetMplStatusCmd, &stepConfig)
return createIntegrationArtifactGetMplStatusCmd
}
func addIntegrationArtifactGetMplStatusFlags(cmd *cobra.Command, stepConfig *integrationArtifactGetMplStatusOptions) {
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User to authenticate to the SAP Cloud Platform Integration Service")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password to authenticate to the SAP Cloud Platform Integration Service")
cmd.Flags().StringVar(&stepConfig.IntegrationFlowID, "integrationFlowId", os.Getenv("PIPER_integrationFlowId"), "Specifies the ID of the Integration Flow artifact")
cmd.Flags().StringVar(&stepConfig.Platform, "platform", os.Getenv("PIPER_platform"), "Specifies the running platform of the SAP Cloud platform integraion service")
cmd.Flags().StringVar(&stepConfig.Host, "host", os.Getenv("PIPER_host"), "Specifies the protocol and host address, including the port. Please provide in the format `<protocol>://<host>:<port>`. Supported protocols are `http` and `https`.")
cmd.Flags().StringVar(&stepConfig.OAuthTokenProviderURL, "oAuthTokenProviderUrl", os.Getenv("PIPER_oAuthTokenProviderUrl"), "Specifies the oAuth Provider protocol and host address, including the port. Please provide in the format `<protocol>://<host>:<port>`. Supported protocols are `http` and `https`.")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("integrationFlowId")
cmd.MarkFlagRequired("host")
cmd.MarkFlagRequired("oAuthTokenProviderUrl")
}
// retrieve step metadata
func integrationArtifactGetMplStatusMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "integrationArtifactGetMplStatus",
Aliases: []config.Alias{},
Description: "Get the MPL status of an integration flow",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "username",
ResourceRef: []config.ResourceReference{
{
Name: "cpiCredentialsId",
Param: "username",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "password",
ResourceRef: []config.ResourceReference{
{
Name: "cpiCredentialsId",
Param: "password",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "integrationFlowId",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "platform",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "host",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "oAuthTokenProviderUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "commonPipelineEnvironment",
Type: "piperEnvironment",
Parameters: []map[string]interface{}{
{"Name": "custom/iFlowMplStatus"},
},
},
},
},
},
}
return theMetaData
}
|
[
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_integrationFlowId\"",
"\"PIPER_platform\"",
"\"PIPER_host\"",
"\"PIPER_oAuthTokenProviderUrl\""
] |
[] |
[
"PIPER_oAuthTokenProviderUrl",
"PIPER_host",
"PIPER_integrationFlowId",
"PIPER_password",
"PIPER_username",
"PIPER_platform"
] |
[]
|
["PIPER_oAuthTokenProviderUrl", "PIPER_host", "PIPER_integrationFlowId", "PIPER_password", "PIPER_username", "PIPER_platform"]
|
go
| 6 | 0 | |
Houston_code/SSRN_Houston_stack_all.py
|
#Write by Chiru Ge, contact: [email protected]
# HSI ++ Resnet
# use CPU only
#import os
#import sys
#os.environ["CUDA_DEVICE_ORDER"]="PCA_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
#import tensorflow as tf
#sess = tf.Session(config=tf.ConfigProto(device_count={'gpu':-1}))
#use GPU
import os
import tensorflow as tf
#tf.device('gpu:1')
#os.environ["CUDA_DEVICE_ORDER"]="PCA_BUS_ID"
##CUDA_VISIBLE_DEVICES=0,1 ./cuda_executable
os.environ['CUDA_VISIBLE_DEVICES']='0'
config=tf.ConfigProto()
config.gpu_options.allow_growth= True
sess=tf.Session(config=config)
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from keras.models import Sequential, Model
from keras.layers import Convolution2D, MaxPooling2D, Conv3D, MaxPooling3D, ZeroPadding3D
from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization, Input
from keras.utils.np_utils import to_categorical
from sklearn.decomposition import PCA
from keras.optimizers import Adam, SGD, Adadelta, RMSprop, Nadam
import keras.callbacks as kcallbacks
from keras.regularizers import l2
import time
import collections
from sklearn import metrics, preprocessing
from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ssrn_SS_Houston_1_nbfilter
from keras.utils.vis_utils import plot_model
import h5py
from keras.models import load_model
def indexToAssignment(index_, Row, Col, pad_length):
new_assign = {}
for counter, value in enumerate(index_):
assign_0 = value // Col + pad_length
assign_1 = value % Col + pad_length
new_assign[counter] = [assign_0, assign_1]
return new_assign
def assignmentToIndex( assign_0, assign_1, Row, Col):
new_index = assign_0 * Col + assign_1
return new_index
def selectNeighboringPatch(matrix, pos_row, pos_col, ex_len):
selected_rows = matrix[range(pos_row-ex_len,pos_row+ex_len+1), :]
selected_patch = selected_rows[:, range(pos_col-ex_len, pos_col+ex_len+1)]
return selected_patch
def sampling(proptionVal, groundTruth): #divide dataset into train and test datasets
labels_loc = {}
train = {}
test = {}
m = max(groundTruth)
for i in range(m):
indices = [j for j, x in enumerate(groundTruth.ravel().tolist()) if x == i + 1]
np.random.shuffle(indices)
labels_loc[i] = indices
nb_val = int(proptionVal * len(indices))
train[i] = indices[:-nb_val]
test[i] = indices[-nb_val:]
# whole_indices = []
train_indices = []
test_indices = []
for i in range(m):
# whole_indices += labels_loc[i]
train_indices += train[i]
test_indices += test[i]
np.random.shuffle(train_indices)
np.random.shuffle(test_indices)
return train_indices, test_indices
def sampling1(trainlabels, testlabels, verification_set):
labels_loc = {}
train_indices=[]
test_indices=[]
verification_indices=[]
m={}
m=np.max(trainlabels[:])
for i in range(m):
indices = [j for j, x in enumerate(trainlabels.ravel().tolist()) if x == i + 1]
labels_loc[i] = indices
train_indices += labels_loc[i]
for i in range(m):
indices = [j for j, x in enumerate(testlabels.ravel().tolist()) if x == i + 1]
labels_loc[i] = indices
test_indices += labels_loc[i]
for i in range(m):
indices = [j for j, x in enumerate(verification_set.ravel().tolist()) if x == i + 1]
labels_loc[i] = indices
verification_indices += labels_loc[i]
return train_indices, test_indices, verification_indices
def res4_model_ss():
model_res4 = ssrn_SS_Houston_1_nbfilter.ResnetBuilder.build_resnet_1_1((1, img_rows, img_cols, img_channels), nb_classes)
RMS = RMSprop(lr=0.0003)
# Let's train the model using RMSprop
model_res4.compile(loss='categorical_crossentropy', optimizer=RMS, metrics=['accuracy'])
return model_res4
######### save the best validated model ##########
best_weights_RES_path_ss4 = ('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/models/Houston/stack/Houston_stackall_2-2_32_11x11_0.0003.hdf5')
######### Load data ########
file=h5py.File('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/stack_EPLBP.mat','r')
file.keys()
data = file['stack_EPLBP'][:]
data_Houston=data.transpose(2,1,0);
file.close()
mat_data = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_labels.mat')
trainlabels = mat_data['trainlabels']
testlabels = mat_data['testlabels']
verification_set = mat_data['verification_set']
del mat_data
######### Show the image ########
plt.imshow(data_Houston[:,:,10])
plt.show()
plt.imshow(trainlabels[:,], cmap="jet")
plt.show()
plt.imshow(testlabels[:,],cmap="jet")
plt.show()
np.max(testlabels[:])#find the max value of the groundtruth
######### Training parameter setting ##########
batch_size = 16 #sample number of each batch
nb_classes = 15 #class number
nb_epoch = 200 #epoch
img_rows, img_cols = 11, 11
PATCH_LENGTH = 5 #Patch_size
INPUT_DIMENSION_CONV = 1093
INPUT_DIMENSION = 1093
img_channels = 1093
patience = 200
TEST_SIZE = 12197
TRAIN_SIZE = 2832
VERIFICATION_SIZE = 1500
TOTAL_SIZE = TRAIN_SIZE+TEST_SIZE
CATEGORY = 15
######### Data normalization ########
data = data_Houston.reshape(np.prod(data_Houston.shape[:2]),np.prod(data_Houston.shape[2:]))# 3D to 2D
data = preprocessing.scale(data) #normalization
whole_data = data.reshape(data_Houston.shape[0], data_Houston.shape[1],data_Houston.shape[2])
padded_data = zeroPadding.zeroPadding_3D(whole_data, PATCH_LENGTH)
del data,data_Houston
######### Make the labels of the training samples and testing samples ########
## 3D to 2D
trainl = trainlabels.reshape(np.prod(trainlabels.shape[:2]),)
testl = testlabels.reshape(np.prod(testlabels.shape[:2]),)
verificationl = verification_set.reshape(np.prod(verification_set.shape[:2]),)
## Defining data space
train_data = np.zeros((TRAIN_SIZE, 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, INPUT_DIMENSION_CONV))
test_data = np.zeros((TEST_SIZE, 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, INPUT_DIMENSION_CONV))
verification_data = np.zeros((VERIFICATION_SIZE, 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, INPUT_DIMENSION_CONV))
## Find the index of training samples and testing samples
train_indices, test_indices, verification_indices = sampling1(trainlabels, testlabels, verification_set)
## Training sample label
y_train = trainl[train_indices] - 1
y_train = to_categorical(np.asarray(y_train))#to_categorical convert the category number to binary
## Testing sample label
y_test = testl[test_indices] - 1
y_test = to_categorical(np.asarray(y_test))
## Validation sample label
y_verification = verificationl[verification_indices] - 1
y_verification = to_categorical(np.asarray(y_verification))
## training samples
train_assign = indexToAssignment(train_indices, whole_data.shape[0], whole_data.shape[1], PATCH_LENGTH)
for i in range(len(train_assign)):
train_data[i] = selectNeighboringPatch(padded_data, train_assign[i][0], train_assign[i][1], PATCH_LENGTH)
## testing samples
test_assign = indexToAssignment(test_indices, whole_data.shape[0], whole_data.shape[1], PATCH_LENGTH)
for i in range(len(test_assign)):
test_data[i] = selectNeighboringPatch(padded_data, test_assign[i][0], test_assign[i][1], PATCH_LENGTH)
## validation samples
verification_assign = indexToAssignment(verification_indices, whole_data.shape[0], whole_data.shape[1], PATCH_LENGTH)
for i in range(len(verification_assign)):
verification_data[i] = selectNeighboringPatch(padded_data, verification_assign[i][0], verification_assign[i][1], PATCH_LENGTH)
x_train = train_data.reshape(train_data.shape[0], train_data.shape[1], train_data.shape[2], INPUT_DIMENSION_CONV)
x_test = test_data.reshape(test_data.shape[0], test_data.shape[1], test_data.shape[2], INPUT_DIMENSION_CONV)
x_verification = verification_data.reshape(verification_data.shape[0], verification_data.shape[1], verification_data.shape[2], INPUT_DIMENSION_CONV)
############ Evaluation index ############
KAPPA_RES_SS4 = []
OA_RES_SS4 = []
AA_RES_SS4 = []
TRAINING_TIME_RES_SS4 = []
TESTING_TIME_RES_SS4 = []
#ELEMENT_ACC_RES_SS4 = np.zeros((ITER, CATEGORY))
############ Model training and result evaluation ############
model_res4_SS_BN = res4_model_ss()
plot_model(model_res4_SS_BN, to_file='/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/model_show/test1.png', show_shapes=True, show_layer_names=True) # imageshow the Residual Network with BN
earlyStopping6 = kcallbacks.EarlyStopping(monitor='val_acc', patience=patience, verbose=1, mode='max')
saveBestModel6 = kcallbacks.ModelCheckpoint(best_weights_RES_path_ss4, monitor='val_acc', verbose=1,
save_best_only=True,
mode='max')
tic6 = time.clock()
print(x_train.shape, x_test.shape)
history_res4_SS_BN = model_res4_SS_BN.fit(x=x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], x_train.shape[3], 1), y=y_train,
batch_size=batch_size, epochs=nb_epoch, verbose=1, callbacks=[earlyStopping6, saveBestModel6],
validation_data=(x_verification.reshape(x_verification.shape[0], x_verification.shape[1], x_verification.shape[2], x_verification.shape[3], 1), y_verification),
shuffle=True)
toc6 = time.clock()
# load best model
model=load_model(best_weights_RES_path_ss4)
tic7 = time.clock()
loss_and_metrics = model.evaluate(
x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3], 1), y_test,
batch_size=batch_size)
toc7 = time.clock()
print('3D RES_SS4 without BN Training Time: ', toc6 - tic6)
print('3D RES_SS4 without BN Test time:', toc7 - tic7)
print('3D RES_SS4 without BN Test score:', loss_and_metrics[0])
print('3D RES_SS4 without BN Test accuracy:', loss_and_metrics[1])
Probability = model.predict(
x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3], 1))
pred_test_res4=Probability.argmax(axis=1)
collections.Counter(pred_test_res4)
gt_test = testl[test_indices] - 1
overall_acc_res4 = metrics.accuracy_score(pred_test_res4, gt_test)
confusion_matrix_res4 = metrics.confusion_matrix(pred_test_res4, gt_test)
each_acc_res4, average_acc_res4 = averageAccuracy.AA_andEachClassAccuracy(confusion_matrix_res4)
kappa = metrics.cohen_kappa_score(pred_test_res4, gt_test)
testing_time=toc7 - tic7
training_time=toc6 - tic6
# Save the data to "**.mat" format
adict={}
adict['OA']=overall_acc_res4
adict['AA']=average_acc_res4
adict['testing_time']=testing_time
adict['training_time']=training_time
adict['kappa']=kappa
adict['each_acc']=each_acc_res4
adict['confusion_matrix']=confusion_matrix_res4
adict['Probability_HSI']=Probability
adict['maxPro_HSI']=pred_test_res4
adict['testlabel']=y_test
sio.savemat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/records/Houston/stack/Houston_stackall_2-2_32_11x11_0.0003.mat',adict)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
bot.py
|
import re
import os
import random
import logging
from telegram import Update
from telegram.ext import (
Updater,
MessageHandler,
Filters,
CallbackContext,
)
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
audio_numbers = []
token = os.environ.get("RAELAOVIVOBOT_TELEGRAM_TOKEN")
assert token is not None, "RAELAOVIVOBOT_TELEGRAM_TOKEN envvar required, idiot."
def _get_random_audio():
global audio_numbers
if len(audio_numbers) == 0:
audio_numbers = list(range(1, 17))
random.shuffle(audio_numbers)
random_number = audio_numbers.pop()
return open(f"./audios/{random_number}.ogg", "rb")
def echo(update: Update, context: CallbackContext) -> None:
"""Echo the user message."""
audio = _get_random_audio()
update.message.reply_audio(audio)
def main() -> None:
"""Start the bot."""
# Create the Updater and pass it your bot's token.
updater = Updater(token)
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
# on non command i.e message - echo the message on Telegram
dispatcher.add_handler(
MessageHandler(Filters.regex(re.compile(r"rael", re.IGNORECASE)), echo)
)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"RAELAOVIVOBOT_TELEGRAM_TOKEN"
] |
[]
|
["RAELAOVIVOBOT_TELEGRAM_TOKEN"]
|
python
| 1 | 0 | |
src/device-manager/python/build-openweave-wheel.py
|
#
# Copyright (c) 2019 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Description:
# Builds a Python wheel package for OpenWeave.
#
from __future__ import absolute_import
import sys
import os
import stat
from datetime import datetime
import shutil
import getpass
from setuptools import setup
from wheel.bdist_wheel import bdist_wheel
import platform
owDLLName = '_WeaveDeviceMgr.so'
deviceManagerShellName = 'weave-device-mgr.py'
deviceManagerShellInstalledName = os.path.splitext(deviceManagerShellName)[0]
# Record the current directory at the start of execution.
curDir = os.curdir
# Expect to find the source files for the python modules in the same directory
# as the build script.
srcDir = os.path.dirname(os.path.abspath(__file__))
# Presume that the current directory is the build directory.
buildDir = os.path.abspath(os.curdir)
# Use a temporary directory within the build directory to assemble the components
# for the installable package.
tmpDir = os.path.join(buildDir, 'openweave-wheel-components')
try:
#
# Perform a series of setup steps prior to creating the openweave package...
#
# Create the temporary components directory.
if os.path.isdir(tmpDir):
shutil.rmtree(tmpDir)
os.mkdir(tmpDir)
# Switch to the temporary directory. (Foolishly, setuptools relies on the current directory
# for many of its features.)
os.chdir(tmpDir)
# Make a copy of the openweave package in the tmp directory and ensure the copied
# directory is writable.
owPackageDir = os.path.join(tmpDir, 'openweave')
if os.path.isdir(owPackageDir):
shutil.rmtree(owPackageDir)
shutil.copytree(os.path.join(srcDir, 'openweave'), owPackageDir)
os.chmod(owPackageDir, os.stat(owPackageDir).st_mode|stat.S_IWUSR)
# Copy the openweave wrapper DLL from where libtool places it (.libs) into
# the root of the openweave package directory. This is necessary because
# setuptools will only add package data files that are relative to the
# associated package source root.
shutil.copy2(os.path.join(buildDir, '.libs', owDLLName),
os.path.join(owPackageDir, owDLLName))
# Make a copy of the Weave Device Manager Shell script in the tmp directory,
# but without the .py suffix. This is how we want it to appear when installed
# by the wheel.
shutil.copy2(os.path.join(srcDir, deviceManagerShellName),
os.path.join(tmpDir, deviceManagerShellInstalledName))
# Search for the OpenWeave LICENSE file in the parents of the source
# directory and make a copy of the file called LICENSE.txt in the tmp
# directory.
def _AllDirsToRoot(dir):
dir = os.path.abspath(dir)
while True:
yield dir
parent = os.path.dirname(dir)
if parent == '' or parent == dir:
break
dir = parent
for dir in _AllDirsToRoot(srcDir):
licFileName = os.path.join(dir, 'LICENSE')
if os.path.isfile(licFileName):
shutil.copy2(licFileName,
os.path.join(tmpDir, 'LICENSE.txt'))
break
else:
raise FileNotFoundError('Unable to find OpenWeave LICENSE file')
# Define a custom version of the bdist_wheel command that configures the
# resultant wheel as platform-specific (i.e. not "pure").
class bdist_wheel_override(bdist_wheel):
def finalize_options(self):
bdist_wheel.finalize_options(self)
self.root_is_pure = False
# Construct the package version string. If building under Travis use the Travis
# build number as the package version. Otherwise use a dummy version of '0.0'.
# (See PEP-440 for the syntax rules for python package versions).
if 'TRAVIS_BUILD_NUMBER' in os.environ:
owPackageVer = os.environ['TRAVIS_BUILD_NUMBER']
else:
owPackageVer = os.environ.get('OPENWEAVE_PYTHON_VERSION', '0.0')
# Generate a description string with information on how/when the package
# was built.
if 'TRAVIS_BUILD_NUMBER' in os.environ:
buildDescription = 'Built by Travis CI on %s\n- Build: %s/#%s\n- Build URL: %s\n- Branch: %s\n- Commit: %s\n' % (
datetime.now().strftime('%Y/%m/%d %H:%M:%S'),
os.environ['TRAVIS_REPO_SLUG'],
os.environ['TRAVIS_BUILD_NUMBER'],
os.environ['TRAVIS_BUILD_WEB_URL'],
os.environ['TRAVIS_BRANCH'],
os.environ['TRAVIS_COMMIT'])
else:
buildDescription = 'Build by %s on %s\n' % (
getpass.getuser(),
datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
# Select required packages based on the target system.
if platform.system() == 'Linux':
requiredPackages = [
'dbus-python',
'pgi'
]
else:
requiredPackages = []
#
# Build the openweave package...
#
# Invoke the setuptools 'bdist_wheel' command to generate a wheel containing
# the OpenWeave python packages, shared libraries and scripts.
setup(
name='openweave',
version=owPackageVer,
description='Python-base APIs and tools for OpenWeave.',
long_description=buildDescription,
url='https://github.com/openweave/openweave-core',
license='Apache',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
python_requires='>=2.7, <3',
packages=[
'openweave' # Arrange to install a package named "openweave"
],
package_dir={
'':tmpDir, # By default, look in the tmp directory for packages/modules to be included.
},
package_data={
'openweave':[
owDLLName # Include the wrapper DLL as package data in the "openweave" package.
]
},
scripts=[ # Install the Device Manager Shell as an executable script in the 'bin' directory.
os.path.join(tmpDir, deviceManagerShellInstalledName)
],
install_requires=requiredPackages,
options={
'bdist_wheel':{
'universal':False,
'dist_dir':buildDir # Place the generated .whl in the build directory.
},
'egg_info':{
'egg_base':tmpDir # Place the .egg-info subdirectory in the tmp directory.
}
},
cmdclass={
'bdist_wheel':bdist_wheel_override
},
script_args=[ 'clean', '--all', 'bdist_wheel' ]
)
finally:
# Switch back to the initial current directory.
os.chdir(curDir)
# Remove the temporary directory.
if os.path.isdir(tmpDir):
shutil.rmtree(tmpDir)
|
[] |
[] |
[
"TRAVIS_BUILD_WEB_URL",
"TRAVIS_BUILD_NUMBER",
"TRAVIS_BRANCH",
"OPENWEAVE_PYTHON_VERSION",
"TRAVIS_REPO_SLUG",
"TRAVIS_COMMIT"
] |
[]
|
["TRAVIS_BUILD_WEB_URL", "TRAVIS_BUILD_NUMBER", "TRAVIS_BRANCH", "OPENWEAVE_PYTHON_VERSION", "TRAVIS_REPO_SLUG", "TRAVIS_COMMIT"]
|
python
| 6 | 0 | |
rllib/examples/remote_vector_env_with_custom_api.py
|
"""
This script demonstrates how one can specify custom env APIs in
combination with RLlib's `remote_worker_envs` setting, which
parallelizes individual sub-envs within a vector env by making each
one a ray Actor.
You can access your Env's API via a custom callback as shown below.
"""
import argparse
import gym
import os
import ray
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env.apis.task_settable_env import TaskSettableEnv
from ray.rllib.utils.test_utils import check_learning_achieved
from ray import tune
parser = argparse.ArgumentParser()
parser.add_argument(
"--run",
type=str,
default="PPO",
help="The RLlib-registered algorithm to use.")
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.")
parser.add_argument("--num-workers", type=int, default=1)
# This should be >1, otherwise, remote envs make no sense.
parser.add_argument("--num-envs-per-worker", type=int, default=4)
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.")
parser.add_argument(
"--stop-iters",
type=int,
default=50,
help="Number of iterations to train.")
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train.")
parser.add_argument(
"--stop-reward",
type=float,
default=180.0,
help="Reward at which we stop training.")
class NonVectorizedEnvToBeVectorizedIntoRemoteVectorEnv(TaskSettableEnv):
"""Class for a single sub-env to be vectorized into RemoteVectorEnv.
If you specify this class directly under the "env" config key, RLlib
will auto-wrap
Note that you may implement your own custom APIs. Here, we demonstrate
using RLlib's TaskSettableEnv API (which is a simple sub-class
of gym.Env).
"""
def __init__(self, config):
self.action_space = gym.spaces.Box(0, 1, shape=(1, ))
self.observation_space = gym.spaces.Box(0, 1, shape=(2, ))
self.task = 1
def reset(self):
self.steps = 0
return self.observation_space.sample()
def step(self, action):
self.steps += 1
return self.observation_space.sample(), 0, self.steps > 10, {}
def set_task(self, task) -> None:
"""We can set the task of each sub-env (ray actor)"""
print("Task set to {}".format(task))
self.task = task
class TaskSettingCallback(DefaultCallbacks):
"""Custom callback to verify, we can set the task on each remote sub-env.
"""
def on_train_result(self, *, trainer, result: dict, **kwargs) -> None:
""" Curriculum learning as seen in Ray docs """
if result["episode_reward_mean"] > 0.0:
phase = 0
else:
phase = 1
# Sub-envs are now ray.actor.ActorHandles, so we have to add
# `remote()` here.
trainer.workers.foreach_env(lambda env: env.set_task.remote(phase))
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=6, local_mode=True)
config = {
# Specify your custom (single, non-vectorized) env directly as a
# class. This way, RLlib can auto-create Actors from this class
# and handle everything correctly.
# TODO: Test for multi-agent case.
"env": NonVectorizedEnvToBeVectorizedIntoRemoteVectorEnv,
# Set up our own callbacks.
"callbacks": TaskSettingCallback,
# Force sub-envs to be ray.actor.ActorHandles, so we can step
# through them in parallel.
"remote_worker_envs": True,
# How many RolloutWorkers (each with n environment copies:
# `num_envs_per_worker`)?
"num_workers": args.num_workers,
# This setting should not really matter as it does not affect the
# number of GPUs reserved for each worker.
"num_envs_per_worker": args.num_envs_per_worker,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"framework": args.framework,
}
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
results = tune.run(args.run, config=config, stop=stop, verbose=1)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown()
|
[] |
[] |
[
"RLLIB_NUM_GPUS"
] |
[]
|
["RLLIB_NUM_GPUS"]
|
python
| 1 | 0 | |
datasciencebox/tests/salt/utils.py
|
import pytest
import os
import sys
import json
from click.testing import CliRunner
from ...cli.main import cli
from ...core.project import Project
remotetest = pytest.mark.skipif('TEST_DSBFILE' not in os.environ,
reason="Environment variable 'TEST_DSBFILE' is required")
def get_test_project():
dsbfile = os.environ['TEST_DSBFILE']
return Project.from_file(dsbfile)
def invoke(*args):
dsbfile = os.environ['TEST_DSBFILE']
args = list(args)
args.extend(['--file', dsbfile])
runner = CliRunner()
return runner.invoke(cli, args, catch_exceptions=False, input=sys.stdin)
def check_all_true(salt_output, none_is_ok=False):
minions = []
for minion_output in salt_output.split('\n'):
minions.append(json.loads(minion_output))
for minion in minions:
minion_values = minion.values()[0]
for id_, value in minion_values.items():
if none_is_ok:
assert value['result'] is not False, (id_, value)
else:
assert value['result'] is True, (id_, value)
def check_all_cmd_retcode0(salt_output):
minions = []
for minion_output in salt_output.split('\n'):
minions.append(json.loads(minion_output))
for minion in minions:
minion_output = minion.values()[0]
assert minion_output['retcode'] == 0, (minion_output)
|
[] |
[] |
[
"TEST_DSBFILE"
] |
[]
|
["TEST_DSBFILE"]
|
python
| 1 | 0 | |
signer/keydbstore/rethink_realkeydbstore_test.go
|
// +build rethinkdb
// Uses a real RethinkDB connection testing purposes
package keydbstore
import (
"crypto/rand"
"os"
"testing"
"time"
"github.com/autonomic-ai/notary/storage/rethinkdb"
"github.com/autonomic-ai/notary/trustmanager"
"github.com/autonomic-ai/notary/tuf/data"
"github.com/autonomic-ai/notary/tuf/signed"
"github.com/docker/go-connections/tlsconfig"
jose "github.com/dvsekhvalnov/jose2go"
"github.com/stretchr/testify/require"
gorethink "gopkg.in/rethinkdb/rethinkdb-go.v6"
)
var tlsOpts = tlsconfig.Options{InsecureSkipVerify: true, ExclusiveRootPools: true}
var rdbNow = time.Date(2016, 12, 31, 1, 1, 1, 0, time.UTC)
func rethinkSessionSetup(t *testing.T) (*gorethink.Session, string) {
// Get the Rethink connection string from an environment variable
rethinkSource := os.Getenv("DBURL")
require.NotEqual(t, "", rethinkSource)
sess, err := rethinkdb.AdminConnection(tlsOpts, rethinkSource)
require.NoError(t, err)
return sess, rethinkSource
}
func rethinkDBSetup(t *testing.T, dbName string) (*RethinkDBKeyStore, func()) {
session, _ := rethinkSessionSetup(t)
var cleanup = func() { gorethink.DBDrop(dbName).Exec(session) }
cleanup()
err := rethinkdb.SetupDB(session, dbName, []rethinkdb.Table{PrivateKeysRethinkTable})
require.NoError(t, err)
dbStore := NewRethinkDBKeyStore(dbName, "", "", multiAliasRetriever, validAliases[0], session)
require.Equal(t, "RethinkDB", dbStore.Name())
dbStore.nowFunc = func() time.Time { return rdbNow }
return dbStore, cleanup
}
func TestRethinkBootstrapSetsUsernamePassword(t *testing.T) {
adminSession, source := rethinkSessionSetup(t)
dbname, username, password := "signertestdb", "testuser", "testpassword"
otherDB, otherUser, otherPass := "othersignertestdb", "otheruser", "otherpassword"
// create a separate user with access to a different DB
require.NoError(t, rethinkdb.SetupDB(adminSession, otherDB, nil))
defer gorethink.DBDrop(otherDB).Exec(adminSession)
require.NoError(t, rethinkdb.CreateAndGrantDBUser(adminSession, otherDB, otherUser, otherPass))
// Bootstrap
s := NewRethinkDBKeyStore(dbname, username, password, constRetriever, "ignored", adminSession)
require.NoError(t, s.Bootstrap())
defer gorethink.DBDrop(dbname).Exec(adminSession)
// A user with an invalid password cannot connect to rethink DB at all
_, err := rethinkdb.UserConnection(tlsOpts, source, username, "wrongpass")
require.Error(t, err)
// the other user cannot access rethink, causing health checks to fail
userSession, err := rethinkdb.UserConnection(tlsOpts, source, otherUser, otherPass)
require.NoError(t, err)
s = NewRethinkDBKeyStore(dbname, otherUser, otherPass, constRetriever, "ignored", userSession)
_, _, err = s.GetPrivateKey("nonexistent")
require.Error(t, err)
require.IsType(t, gorethink.RQLRuntimeError{}, err)
key := s.GetKey("nonexistent")
require.Nil(t, key)
require.Error(t, s.CheckHealth())
// our user can access the DB though
userSession, err = rethinkdb.UserConnection(tlsOpts, source, username, password)
require.NoError(t, err)
s = NewRethinkDBKeyStore(dbname, username, password, constRetriever, "ignored", userSession)
_, _, err = s.GetPrivateKey("nonexistent")
require.Error(t, err)
require.IsType(t, trustmanager.ErrKeyNotFound{}, err)
require.NoError(t, s.CheckHealth())
}
// Checks that the DB contains the expected keys, and returns a map of the GormPrivateKey object by key ID
func requireExpectedRDBKeys(t *testing.T, dbStore *RethinkDBKeyStore, expectedKeys []data.PrivateKey) map[string]RDBPrivateKey {
res, err := gorethink.DB(dbStore.dbName).Table(PrivateKeysRethinkTable.Name).Run(dbStore.sess)
require.NoError(t, err)
var rows []RDBPrivateKey
require.NoError(t, res.All(&rows))
require.Len(t, rows, len(expectedKeys))
result := make(map[string]RDBPrivateKey)
for _, rdbKey := range rows {
result[rdbKey.KeyID] = rdbKey
}
for _, key := range expectedKeys {
rdbKey, ok := result[key.ID()]
require.True(t, ok)
require.NotNil(t, rdbKey)
require.Equal(t, key.Public(), rdbKey.Public)
require.Equal(t, key.Algorithm(), rdbKey.Algorithm)
// because we have to manually set the created and modified times
require.True(t, rdbKey.CreatedAt.Equal(rdbNow))
require.True(t, rdbKey.UpdatedAt.Equal(rdbNow))
require.True(t, rdbKey.DeletedAt.Equal(time.Time{}))
}
return result
}
func TestRethinkKeyCanOnlyBeAddedOnce(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t, "signerAddTests")
defer cleanup()
expectedKeys := testKeyCanOnlyBeAddedOnce(t, dbStore)
rdbKeys := requireExpectedRDBKeys(t, dbStore, expectedKeys)
// none of these keys are active, since they have not been activated
for _, rdbKey := range rdbKeys {
require.True(t, rdbKey.LastUsed.Equal(time.Time{}))
}
}
func TestRethinkCreateDelete(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t, "signerDeleteTests")
defer cleanup()
expectedKeys := testCreateDelete(t, dbStore)
rdbKeys := requireExpectedRDBKeys(t, dbStore, expectedKeys)
// none of these keys are active, since they have not been activated
for _, rdbKey := range rdbKeys {
require.True(t, rdbKey.LastUsed.Equal(time.Time{}))
}
}
func TestRethinkKeyRotation(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t, "signerRotationTests")
defer cleanup()
rotatedKey, nonRotatedKey := testKeyRotation(t, dbStore, validAliases[1])
rdbKeys := requireExpectedRDBKeys(t, dbStore, []data.PrivateKey{rotatedKey, nonRotatedKey})
// none of these keys are active, since they have not been activated
for _, rdbKey := range rdbKeys {
require.True(t, rdbKey.LastUsed.Equal(time.Time{}))
}
// require that the rotated key is encrypted with the new passphrase
rotatedRDBKey := rdbKeys[rotatedKey.ID()]
require.Equal(t, validAliases[1], rotatedRDBKey.PassphraseAlias)
decryptedKey, _, err := jose.Decode(string(rotatedRDBKey.Private), validAliasesAndPasswds[validAliases[1]])
require.NoError(t, err)
require.Equal(t, string(rotatedKey.Private()), decryptedKey)
// require that the nonrotated key is encrypted with the old passphrase
nonRotatedRDBKey := rdbKeys[nonRotatedKey.ID()]
require.Equal(t, validAliases[0], nonRotatedRDBKey.PassphraseAlias)
decryptedKey, _, err = jose.Decode(string(nonRotatedRDBKey.Private), validAliasesAndPasswds[validAliases[0]])
require.NoError(t, err)
require.Equal(t, string(nonRotatedKey.Private()), decryptedKey)
}
func TestRethinkCheckHealth(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t, "signerHealthcheckTests")
defer cleanup()
// sanity check - all tables present - health check passes
require.NoError(t, dbStore.CheckHealth())
// if the DB is unreachable, health check fails
require.NoError(t, dbStore.sess.Close())
require.Error(t, dbStore.CheckHealth())
// if the connection is reopened, health check succeeds
require.NoError(t, dbStore.sess.Reconnect())
require.NoError(t, dbStore.CheckHealth())
// No tables, health check fails
require.NoError(t, gorethink.DB(dbStore.dbName).TableDrop(PrivateKeysRethinkTable.Name).Exec(dbStore.sess))
require.Error(t, dbStore.CheckHealth())
// No DB, health check fails
cleanup()
require.Error(t, dbStore.CheckHealth())
}
func TestRethinkSigningMarksKeyActive(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t, "signerActivationTests")
defer cleanup()
activeKey, nonActiveKey := testSigningWithKeyMarksAsActive(t, dbStore)
rdbKeys := requireExpectedRDBKeys(t, dbStore, []data.PrivateKey{activeKey, nonActiveKey})
// check that activation updates the activated key but not the unactivated key
require.True(t, rdbKeys[activeKey.ID()].LastUsed.Equal(rdbNow))
require.True(t, rdbKeys[nonActiveKey.ID()].LastUsed.Equal(time.Time{}))
// check that signing succeeds even if the DB connection is closed and hence
// mark as active errors
dbStore.sess.Close()
msg := []byte("successful, db closed")
sig, err := nonActiveKey.Sign(rand.Reader, msg, nil)
require.NoError(t, err)
require.NoError(t, signed.Verifiers[data.ECDSASignature].Verify(
data.PublicKeyFromPrivate(nonActiveKey), sig, msg))
}
func TestRethinkCreateKey(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t, "signerCreationTests")
defer cleanup()
activeED25519Key, pendingED25519Key, pendingECDSAKey := testCreateKey(t, dbStore)
rdbKeys := requireExpectedRDBKeys(t, dbStore, []data.PrivateKey{activeED25519Key, pendingED25519Key, pendingECDSAKey})
// check that activation updates the activated key but not the unactivated keys
require.True(t, rdbKeys[activeED25519Key.ID()].LastUsed.Equal(rdbNow))
require.True(t, rdbKeys[pendingED25519Key.ID()].LastUsed.Equal(time.Time{}))
require.True(t, rdbKeys[pendingECDSAKey.ID()].LastUsed.Equal(time.Time{}))
}
func TestRethinkUnimplementedInterfaceBehavior(t *testing.T) {
dbStore, cleanup := rethinkDBSetup(t, "signerInterfaceTests")
defer cleanup()
testUnimplementedInterfaceMethods(t, dbStore)
}
|
[
"\"DBURL\""
] |
[] |
[
"DBURL"
] |
[]
|
["DBURL"]
|
go
| 1 | 0 | |
src/test/java/org/nrg/containers/CommandLaunchIntegrationTest.java
|
package org.nrg.containers;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.jayway.jsonpath.Configuration;
import com.jayway.jsonpath.Option;
import com.jayway.jsonpath.spi.json.JacksonJsonProvider;
import com.jayway.jsonpath.spi.json.JsonProvider;
import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider;
import com.jayway.jsonpath.spi.mapper.MappingProvider;
import com.spotify.docker.client.DockerClient;
import com.spotify.docker.client.LoggingBuildHandler;
import com.spotify.docker.client.exceptions.ContainerNotFoundException;
import com.spotify.docker.client.exceptions.DockerException;
import com.spotify.docker.client.messages.ContainerInfo;
import com.spotify.docker.client.messages.swarm.Service;
import com.spotify.docker.client.messages.swarm.Task;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.SystemUtils;
import org.hamcrest.CustomTypeSafeMatcher;
import org.hamcrest.Matchers;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.junit.runner.RunWith;
import org.nrg.containers.api.DockerControlApi;
import org.nrg.containers.config.EventPullingIntegrationTestConfig;
import org.nrg.containers.model.command.auto.Command;
import org.nrg.containers.model.command.auto.Command.CommandWrapper;
import org.nrg.containers.model.container.auto.Container;
import org.nrg.containers.model.container.auto.Container.ContainerMount;
import org.nrg.containers.model.container.auto.ServiceTask;
import org.nrg.containers.model.server.docker.DockerServerBase.DockerServer;
import org.nrg.containers.model.xnat.Project;
import org.nrg.containers.model.xnat.Resource;
import org.nrg.containers.model.xnat.Scan;
import org.nrg.containers.model.xnat.Session;
import org.nrg.containers.services.CommandService;
import org.nrg.containers.services.ContainerService;
import org.nrg.containers.services.DockerServerService;
import org.nrg.containers.services.DockerService;
import org.nrg.framework.exceptions.NotFoundException;
import org.nrg.xdat.entities.AliasToken;
import org.nrg.xdat.preferences.SiteConfigPreferences;
import org.nrg.xdat.security.helpers.Users;
import org.nrg.xdat.security.services.PermissionsServiceI;
import org.nrg.xdat.security.services.UserManagementServiceI;
import org.nrg.xdat.services.AliasTokenService;
import org.nrg.xft.ItemI;
import org.nrg.xft.schema.XFTManager;
import org.nrg.xft.security.UserI;
import org.nrg.xnat.helpers.uri.UriParserUtils;
import org.nrg.xnat.helpers.uri.archive.impl.ExptURI;
import org.nrg.xnat.helpers.uri.archive.impl.ProjURI;
import org.nrg.xnat.turbine.utils.ArchivableItem;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.powermock.modules.junit4.PowerMockRunnerDelegate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.test.context.transaction.TestTransaction;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.regex.Pattern;
import static org.awaitility.Awaitility.await;
import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.hasItemInArray;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.isEmptyOrNullString;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeThat;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.powermock.api.mockito.PowerMockito.mockStatic;
import static org.powermock.api.mockito.PowerMockito.when;
@Slf4j
@RunWith(PowerMockRunner.class)
@PowerMockRunnerDelegate(SpringJUnit4ClassRunner.class)
@PrepareForTest({UriParserUtils.class, XFTManager.class, Users.class})
@PowerMockIgnore({"org.apache.*", "java.*", "javax.*", "org.w3c.*", "com.sun.*"})
@ContextConfiguration(classes = EventPullingIntegrationTestConfig.class)
@Transactional
public class CommandLaunchIntegrationTest {
private UserI mockUser;
private String buildDir;
private String archiveDir;
private final String FAKE_USER = "mockUser";
private final String FAKE_ALIAS = "alias";
private final String FAKE_SECRET = "secret";
private final String FAKE_HOST = "mock://url";
private final boolean swarmMode = false;
private boolean testIsOnCircleCi;
private final List<String> containersToCleanUp = new ArrayList<>();
private final List<String> imagesToCleanUp = new ArrayList<>();
private static DockerClient CLIENT;
@Autowired private ObjectMapper mapper;
@Autowired private CommandService commandService;
@Autowired private ContainerService containerService;
@Autowired private DockerControlApi controlApi;
@Autowired private DockerService dockerService;
@Autowired private AliasTokenService mockAliasTokenService;
@Autowired private DockerServerService dockerServerService;
@Autowired private SiteConfigPreferences mockSiteConfigPreferences;
@Autowired private UserManagementServiceI mockUserManagementServiceI;
@Autowired private PermissionsServiceI mockPermissionsServiceI;
@Rule public TemporaryFolder folder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/build"));
@Before
public void setup() throws Exception {
Configuration.setDefaults(new Configuration.Defaults() {
private final JsonProvider jsonProvider = new JacksonJsonProvider();
private final MappingProvider mappingProvider = new JacksonMappingProvider();
@Override
public JsonProvider jsonProvider() {
return jsonProvider;
}
@Override
public MappingProvider mappingProvider() {
return mappingProvider;
}
@Override
public Set<Option> options() {
return Sets.newHashSet(Option.DEFAULT_PATH_LEAF_TO_NULL);
}
});
// Mock out the prefs bean
final String defaultHost = "unix:///var/run/docker.sock";
final String hostEnv = System.getenv("DOCKER_HOST");
final String certPathEnv = System.getenv("DOCKER_CERT_PATH");
final String tlsVerify = System.getenv("DOCKER_TLS_VERIFY");
final String circleCiEnv = System.getenv("CIRCLECI");
testIsOnCircleCi = StringUtils.isNotBlank(circleCiEnv) && Boolean.parseBoolean(circleCiEnv);
final boolean useTls = tlsVerify != null && tlsVerify.equals("1");
final String certPath;
if (useTls) {
if (StringUtils.isBlank(certPathEnv)) {
throw new Exception("Must set DOCKER_CERT_PATH if DOCKER_TLS_VERIFY=1.");
}
certPath = certPathEnv;
} else {
certPath = "";
}
final String containerHost;
if (StringUtils.isBlank(hostEnv)) {
containerHost = defaultHost;
} else {
final Pattern tcpShouldBeHttpRe = Pattern.compile("tcp://.*");
final java.util.regex.Matcher tcpShouldBeHttpMatch = tcpShouldBeHttpRe.matcher(hostEnv);
if (tcpShouldBeHttpMatch.matches()) {
// Must switch out tcp:// for either http:// or https://
containerHost = hostEnv.replace("tcp://", "http" + (useTls ? "s" : "") + "://");
} else {
containerHost = hostEnv;
}
}
dockerServerService.setServer(DockerServer.create(0L, "Test server", containerHost, certPath, false, null, null, null, false, null));
// Mock the userI
mockUser = mock(UserI.class);
when(mockUser.getLogin()).thenReturn(FAKE_USER);
// Permissions
when(mockPermissionsServiceI.canEdit(any(UserI.class), any(ItemI.class))).thenReturn(Boolean.TRUE);
// Mock the user management service
when(mockUserManagementServiceI.getUser(FAKE_USER)).thenReturn(mockUser);
// Mock UriParserUtils using PowerMock. This allows us to mock out
// the responses to its static method parseURI().
mockStatic(UriParserUtils.class);
// Mock the aliasTokenService
final AliasToken mockAliasToken = new AliasToken();
mockAliasToken.setAlias(FAKE_ALIAS);
mockAliasToken.setSecret(FAKE_SECRET);
when(mockAliasTokenService.issueTokenForUser(mockUser)).thenReturn(mockAliasToken);
mockStatic(Users.class);
when(Users.getUser(FAKE_USER)).thenReturn(mockUser);
// Mock the site config preferences
buildDir = folder.newFolder().getAbsolutePath();
archiveDir = folder.newFolder().getAbsolutePath();
when(mockSiteConfigPreferences.getSiteUrl()).thenReturn(FAKE_HOST);
when(mockSiteConfigPreferences.getBuildPath()).thenReturn(buildDir); // transporter makes a directory under build
when(mockSiteConfigPreferences.getArchivePath()).thenReturn(archiveDir); // container logs get stored under archive
when(mockSiteConfigPreferences.getProperty("processingUrl", FAKE_HOST)).thenReturn(FAKE_HOST);
// Use powermock to mock out the static method XFTManager.isInitialized()
mockStatic(XFTManager.class);
when(XFTManager.isInitialized()).thenReturn(true);
CLIENT = controlApi.getClient();
CLIENT.pull("busybox:latest");
}
@After
public void cleanup() throws Exception {
for (final String containerToCleanUp : containersToCleanUp) {
if (swarmMode) {
CLIENT.removeService(containerToCleanUp);
} else {
CLIENT.removeContainer(containerToCleanUp, DockerClient.RemoveContainerParam.forceKill());
}
}
containersToCleanUp.clear();
for (final String imageToCleanUp : imagesToCleanUp) {
CLIENT.removeImage(imageToCleanUp, true, false);
}
imagesToCleanUp.clear();
CLIENT.close();
}
private boolean canConnectToDocker() {
try {
return CLIENT.ping().equals("OK");
} catch (InterruptedException | DockerException e) {
log.warn("Could not connect to docker.", e);
}
return false;
}
@Test
@DirtiesContext
public void testFakeReconAll() throws Exception {
assumeThat(SystemUtils.IS_OS_WINDOWS_7, is(false));
assumeThat(canConnectToDocker(), is(true));
CLIENT.pull("busybox:latest");
final String dir = Paths.get(ClassLoader.getSystemResource("commandLaunchTest").toURI()).toString().replace("%20", " ");
final String commandJsonFile = Paths.get(dir, "/fakeReconAllCommand.json").toString();
final String sessionJsonFile = Paths.get(dir, "/session.json").toString();
final String fakeResourceDir = Paths.get(dir, "/fakeResource").toString();
final String commandWrapperName = "recon-all-session";
final Command fakeReconAll = mapper.readValue(new File(commandJsonFile), Command.class);
final Command fakeReconAllCreated = commandService.create(fakeReconAll);
CommandWrapper commandWrapper = null;
for (final CommandWrapper commandWrapperLoop : fakeReconAllCreated.xnatCommandWrappers()) {
if (commandWrapperName.equals(commandWrapperLoop.name())) {
commandWrapper = commandWrapperLoop;
break;
}
}
assertThat(commandWrapper, is(not(nullValue())));
final Session session = mapper.readValue(new File(sessionJsonFile), Session.class);
final Scan scan = session.getScans().get(0);
final Resource resource = scan.getResources().get(0);
resource.setDirectory(fakeResourceDir);
final String sessionJson = mapper.writeValueAsString(session);
final ArchivableItem mockProjectItem = mock(ArchivableItem.class);
final ProjURI mockUriObject = mock(ProjURI.class);
when(UriParserUtils.parseURI("/archive" + session.getUri())).thenReturn(mockUriObject);
when(mockUriObject.getSecurityItem()).thenReturn(mockProjectItem);
final String t1Scantype = "T1_TEST_SCANTYPE";
final Map<String, String> runtimeValues = Maps.newHashMap();
runtimeValues.put("session", sessionJson);
runtimeValues.put("T1-scantype", t1Scantype);
final Container execution = containerService.resolveCommandAndLaunchContainer(commandWrapper.id(), runtimeValues, mockUser);
containersToCleanUp.add(swarmMode ? execution.serviceId() : execution.containerId());
await().until(containerIsRunning(execution), is(false));
// Raw inputs
assertThat(execution.getRawInputs(), is(runtimeValues));
// xnat wrapper inputs
final Map<String, String> expectedXnatInputValues = Maps.newHashMap();
expectedXnatInputValues.put("session", session.getExternalWrapperInputValue());
expectedXnatInputValues.put("T1-scantype", t1Scantype);
expectedXnatInputValues.put("label", session.getLabel());
expectedXnatInputValues.put("T1", session.getScans().get(0).getDerivedWrapperInputValue());
expectedXnatInputValues.put("resource", session.getScans().get(0).getResources().get(0).getDerivedWrapperInputValue());
assertThat(execution.getWrapperInputs(), is(expectedXnatInputValues));
// command inputs
final Map<String, String> expectedCommandInputValues = Maps.newHashMap();
expectedCommandInputValues.put("subject-id", session.getLabel());
expectedCommandInputValues.put("other-recon-all-args", "-all");
assertThat(execution.getCommandInputs(), is(expectedCommandInputValues));
// Outputs
// assertTrue(resolvedCommand.getOutputs().isEmpty());
final List<String> outputNames = Lists.transform(execution.outputs(), new Function<Container.ContainerOutput, String>() {
@Nullable
@Override
public String apply(@Nullable final Container.ContainerOutput output) {
return output == null ? "" : output.name();
}
});
assertThat(outputNames, contains("data:data-output", "text-file:text-file-output"));
// Environment variables
final Map<String, String> expectedEnvironmentVariables = Maps.newHashMap();
expectedEnvironmentVariables.put("XNAT_USER", FAKE_ALIAS);
expectedEnvironmentVariables.put("XNAT_PASS", FAKE_SECRET);
expectedEnvironmentVariables.put("XNAT_HOST", FAKE_HOST);
assertThat(execution.environmentVariables(), is(expectedEnvironmentVariables));
final List<ContainerMount> mounts = execution.mounts();
assertThat(mounts, hasSize(2));
ContainerMount inputMount = null;
ContainerMount outputMount = null;
for (final ContainerMount mount : mounts) {
if (mount.name().equals("input")) {
inputMount = mount;
} else if (mount.name().equals("output")) {
outputMount = mount;
} else {
fail("We should not have a mount with name " + mount.name());
}
}
assertThat(inputMount, is(not(nullValue())));
assertThat(inputMount.containerPath(), is("/input"));
assertThat(inputMount.xnatHostPath(), is(fakeResourceDir));
assertThat(outputMount, is(not(nullValue())));
assertThat(outputMount.containerPath(), is("/output"));
final String outputPath = outputMount.xnatHostPath();
printContainerLogs(execution);
try {
final String[] outputFileContents = readFile(outputPath + "/out.txt");
assertThat(outputFileContents.length, greaterThanOrEqualTo(2));
assertThat(outputFileContents[0], is("recon-all -s session1 -all"));
final File fakeResourceDirFile = new File(fakeResourceDir);
assertThat(fakeResourceDirFile, is(not(nullValue())));
assertThat(fakeResourceDirFile.listFiles(), is(not(nullValue())));
final List<String> fakeResourceDirFileNames = Lists.newArrayList();
for (final File file : fakeResourceDirFile.listFiles()) {
fakeResourceDirFileNames.add(file.getName());
}
assertThat(Lists.newArrayList(outputFileContents[1].split(" ")), is(fakeResourceDirFileNames));
} catch (IOException e) {
log.warn("Failed to read output files. This is not a problem if you are using docker-machine and cannot mount host directories.", e);
}
}
@Test
@DirtiesContext
public void testProjectMount() throws Exception {
assumeThat(canConnectToDocker(), is(true));
final String dir = Paths.get(ClassLoader.getSystemResource("commandLaunchTest").toURI()).toString().replace("%20", " ");
final String commandJsonFile = dir + "/project-mount-command.json";
final String projectJsonFile = dir + "/project.json";
final String projectDir = dir + "/project";
// final String commandWrapperName = "find-in-project";
final Command command = mapper.readValue(new File(commandJsonFile), Command.class);
final Command commandCreated = commandService.create(command);
final CommandWrapper commandWrapper = commandCreated.xnatCommandWrappers().get(0);
assertThat(commandWrapper, is(not(nullValue())));
final Project project = mapper.readValue(new File(projectJsonFile), Project.class);
project.setDirectory(projectDir);
final String projectJson = mapper.writeValueAsString(project);
// Create the mock objects we will need in order to verify permissions
final ArchivableItem mockProjectItem = mock(ArchivableItem.class);
final ExptURI mockUriObject = mock(ExptURI.class);
when(UriParserUtils.parseURI("/archive" + project.getUri())).thenReturn(mockUriObject);
when(mockUriObject.getSecurityItem()).thenReturn(mockProjectItem);
final Map<String, String> runtimeValues = Maps.newHashMap();
runtimeValues.put("project", projectJson);
final Container execution = containerService.resolveCommandAndLaunchContainer(commandWrapper.id(), runtimeValues, mockUser);
containersToCleanUp.add(swarmMode ? execution.serviceId() : execution.containerId());
await().until(containerIsRunning(execution), is(false));
// Raw inputs
assertThat(execution.getRawInputs(), is(runtimeValues));
// xnat wrapper inputs
final Map<String, String> expectedXnatInputValues = Maps.newHashMap();
expectedXnatInputValues.put("project", project.getUri());
assertThat(execution.getWrapperInputs(), is(expectedXnatInputValues));
// command inputs
final Map<String, String> expectedCommandInputValues = Maps.newHashMap();
assertThat(execution.getCommandInputs(), is(expectedCommandInputValues));
// Outputs by name. We will check the files later.
final List<String> outputNames = Lists.transform(execution.outputs(), new Function<Container.ContainerOutput, String>() {
@Override
public String apply(final Container.ContainerOutput output) {
return output.name();
}
});
assertThat(outputNames, contains("outputs:file-and-dir-lists"));
// Environment variables
final Map<String, String> expectedEnvironmentVariables = Maps.newHashMap();
expectedEnvironmentVariables.put("XNAT_USER", FAKE_ALIAS);
expectedEnvironmentVariables.put("XNAT_PASS", FAKE_SECRET);
expectedEnvironmentVariables.put("XNAT_HOST", FAKE_HOST);
assertThat(execution.environmentVariables(), is(expectedEnvironmentVariables));
// mounts
final List<ContainerMount> mounts = execution.mounts();
assertThat(mounts, hasSize(2));
ContainerMount inputMount = null;
ContainerMount outputMount = null;
for (final ContainerMount mount : mounts) {
if (mount.name().equals("input")) {
inputMount = mount;
} else if (mount.name().equals("output")) {
outputMount = mount;
} else {
fail("We should not have a mount with name " + mount.name());
}
}
assertThat(inputMount, is(not(nullValue())));
assertThat(inputMount.containerPath(), is("/input"));
assertThat(inputMount.xnatHostPath(), is(projectDir));
assertThat(outputMount, is(not(nullValue())));
assertThat(outputMount.containerPath(), is("/output"));
final String outputPath = outputMount.xnatHostPath();
printContainerLogs(execution);
try {
// Read two output files: files.txt and dirs.txt
final String[] expectedFilesFileContents = {
"/input/project-file.txt",
"/input/resource/project-resource-file.txt",
"/input/session/resource/session-resource-file.txt",
"/input/session/scan/resource/scan-resource-file.txt",
"/input/session/scan/scan-file.txt",
"/input/session/session-file.txt"
};
final List<String> filesFileContents = Lists.newArrayList(readFile(outputPath + "/files.txt"));
assertThat(filesFileContents, containsInAnyOrder(expectedFilesFileContents));
final String[] expectedDirsFileContents = {
"/input",
"/input/resource",
"/input/session",
"/input/session/resource",
"/input/session/scan",
"/input/session/scan/resource"
};
final List<String> dirsFileContents = Lists.newArrayList(readFile(outputPath + "/dirs.txt"));
assertThat(dirsFileContents, containsInAnyOrder(expectedDirsFileContents));
} catch (IOException e) {
log.warn("Failed to read output files. This is not a problem if you are using docker-machine and cannot mount host directories.", e);
}
}
@Test
@DirtiesContext
public void testLaunchCommandWithSetupCommand() throws Exception {
assumeThat(SystemUtils.IS_OS_WINDOWS_7, is(false));
assumeThat(canConnectToDocker(), is(true));
// This test fails on Circle CI because we cannot mount local directories into containers
assumeThat(testIsOnCircleCi, is(false));
CLIENT.pull("busybox:latest");
final Path setupCommandDirPath = Paths.get(ClassLoader.getSystemResource("setupCommand").toURI());
final String setupCommandDir = setupCommandDirPath.toString().replace("%20", " ");
final String commandWithSetupCommandJsonFile = Paths.get(setupCommandDir, "/command-with-setup-command.json").toString();
final Command commandWithSetupCommandToCreate = mapper.readValue(new File(commandWithSetupCommandJsonFile), Command.class);
final Command commandWithSetupCommand = commandService.create(commandWithSetupCommandToCreate);
// We could hard-code the name of the image we referenced in the "via-setup-command" property, or we could pull it out.
// Let's do the latter, so in case we change it later this will not fail.
assertThat(commandWithSetupCommand.xnatCommandWrappers(), hasSize(1));
final CommandWrapper commandWithSetupCommandWrapper = commandWithSetupCommand.xnatCommandWrappers().get(0);
assertThat(commandWithSetupCommandWrapper.externalInputs(), hasSize(1));
assertThat(commandWithSetupCommandWrapper.externalInputs().get(0).viaSetupCommand(), not(isEmptyOrNullString()));
final String setupCommandImageAndCommandName = commandWithSetupCommandWrapper.externalInputs().get(0).viaSetupCommand();
final String[] setupCommandSplitOnColon = setupCommandImageAndCommandName.split(":");
assertThat(setupCommandSplitOnColon, arrayWithSize(3));
final String setupCommandImageName = setupCommandSplitOnColon[0] + ":" + setupCommandSplitOnColon[1];
final String setupCommandName = setupCommandSplitOnColon[2];
CLIENT.build(setupCommandDirPath, setupCommandImageName);
imagesToCleanUp.add(setupCommandImageName);
// Make the setup command from the json file.
// Assert that its name and image are the same ones referred to in the "via-setup-command" property
final String setupCommandJsonFile = Paths.get(setupCommandDir, "/setup-command.json").toString();
final Command setupCommandToCreate = mapper.readValue(new File(setupCommandJsonFile), Command.class);
final Command setupCommand = commandService.create(setupCommandToCreate);
assertThat(setupCommand.name(), is(setupCommandName));
assertThat(setupCommand.image(), is(setupCommandImageName));
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
final String resourceInputJsonPath = setupCommandDir + "/resource.json";
// I need to set the resource directory to a temp directory
final String resourceDir = folder.newFolder("resource").getAbsolutePath();
final Resource resourceInput = mapper.readValue(new File(resourceInputJsonPath), Resource.class);
resourceInput.setDirectory(resourceDir);
final Map<String, String> runtimeValues = Collections.singletonMap("resource", mapper.writeValueAsString(resourceInput));
// Write a test file to the resource
final String testFileContents = "contents of the file";
Files.write(Paths.get(resourceDir, "test.txt"), testFileContents.getBytes());
// I don't know if I need this, but I copied it from another test
final ArchivableItem mockProjectItem = mock(ArchivableItem.class);
final ProjURI mockUriObject = mock(ProjURI.class);
when(UriParserUtils.parseURI("/archive" + resourceInput.getUri())).thenReturn(mockUriObject);
when(mockUriObject.getSecurityItem()).thenReturn(mockProjectItem);
// Time to launch this thing
final Container mainContainerRightAfterLaunch = containerService.resolveCommandAndLaunchContainer(commandWithSetupCommandWrapper.id(), runtimeValues, mockUser);
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
Thread.sleep(5000); // Wait for container to finish
final Container mainContainerAWhileAfterLaunch = containerService.get(mainContainerRightAfterLaunch.databaseId());
final List<Container> setupContainers = containerService.retrieveSetupContainersForParent(mainContainerAWhileAfterLaunch.databaseId());
assertThat(setupContainers, hasSize(1));
final Container setupContainer = setupContainers.get(0);
// Print the logs for debugging in case weird stuff happened
printContainerLogs(setupContainer, "setup");
printContainerLogs(mainContainerAWhileAfterLaunch, "main");
// Sanity Checks
assertThat(setupContainer.parent(), is(mainContainerAWhileAfterLaunch));
assertThat(setupContainer.status(), is(not("Failed")));
// Check main container's input mount for contents
final ContainerMount mainContainerMount = mainContainerAWhileAfterLaunch.mounts().get(0);
final File mainContainerMountDir = new File(mainContainerMount.xnatHostPath());
final File[] contentsOfMainContainerMountDir = mainContainerMountDir.listFiles();
// This is what we will be testing, and why it validates that the setup container worked.
// We wrote "test.txt" to the resource's directory.
// The main container is set to mount an initially empty directory. Call this "main mount".
// The setup container is set to mount the resource's directory as its input and the main mount as its output.
// When the setup container runs, it copies "text.txt" from its input to its output. It also creates a new
// file "another-file" in its output, which we did not explicitly create in this test.
// By verifying that the main container's mount sees both files, we have verified that the setup container
// put the files where they needed to go, and that all the mounts were hooked up correctly.
assertThat(contentsOfMainContainerMountDir, hasItemInArray(pathEndsWith("test.txt")));
assertThat(contentsOfMainContainerMountDir, hasItemInArray(pathEndsWith("another-file")));
}
@Test
@DirtiesContext
public void testLaunchCommandWithWrapupCommand() throws Exception {
assumeThat(SystemUtils.IS_OS_WINDOWS_7, is(false));
assumeThat(canConnectToDocker(), is(true));
// This test fails on Circle CI because we cannot mount local directories into containers
assumeThat(testIsOnCircleCi, is(false));
CLIENT.pull("busybox:latest");
final Path wrapupCommandDirPath = Paths.get(ClassLoader.getSystemResource("wrapupCommand").toURI());
final String wrapupCommandDir = wrapupCommandDirPath.toString().replace("%20", " ");
final String commandWithWrapupCommandJsonFile = Paths.get(wrapupCommandDir, "/command-with-wrapup-command.json").toString();
final Command commandWithWrapupCommandToCreate = mapper.readValue(new File(commandWithWrapupCommandJsonFile), Command.class);
final Command commandWithWrapupCommand = commandService.create(commandWithWrapupCommandToCreate);
// We could hard-code the name of the image we referenced in the "via-wrapup-command" property, or we could pull it out.
// Let's do the latter, so in case we change it later this will not fail.
assertThat(commandWithWrapupCommand.xnatCommandWrappers(), hasSize(1));
final CommandWrapper commandWithWrapupCommandWrapper = commandWithWrapupCommand.xnatCommandWrappers().get(0);
assertThat(commandWithWrapupCommandWrapper.outputHandlers(), hasSize(1));
assertThat(commandWithWrapupCommandWrapper.outputHandlers().get(0).viaWrapupCommand(), not(isEmptyOrNullString()));
final String wrapupCommandImageAndCommandName = commandWithWrapupCommandWrapper.outputHandlers().get(0).viaWrapupCommand();
final String[] wrapupCommandSplitOnColon = wrapupCommandImageAndCommandName.split(":");
assertThat(wrapupCommandSplitOnColon, arrayWithSize(3));
final String wrapupCommandImageName = wrapupCommandSplitOnColon[0] + ":" + wrapupCommandSplitOnColon[1];
final String wrapupCommandName = wrapupCommandSplitOnColon[2];
final String commandWithWrapupCommandImageName = commandWithWrapupCommand.image();
// Build two images: the wrapup image and the main image
CLIENT.build(wrapupCommandDirPath, wrapupCommandImageName, "Dockerfile.wrapup", new LoggingBuildHandler());
CLIENT.build(wrapupCommandDirPath, commandWithWrapupCommandImageName, "Dockerfile.main", new LoggingBuildHandler());
imagesToCleanUp.add(wrapupCommandImageName);
imagesToCleanUp.add(commandWithWrapupCommandImageName);
// Make the wrapup command from the json file.
// Assert that its name and image are the same ones referred to in the "via-wrapup-command" property
final String wrapupCommandJsonFile = Paths.get(wrapupCommandDir, "/wrapup-command.json").toString();
final Command wrapupCommandToCreate = mapper.readValue(new File(wrapupCommandJsonFile), Command.class);
final Command wrapupCommand = commandService.create(wrapupCommandToCreate);
assertThat(wrapupCommand.name(), is(wrapupCommandName));
assertThat(wrapupCommand.image(), is(wrapupCommandImageName));
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
// Set up input object(s)
final String sessionInputJsonPath = wrapupCommandDir + "/session.json";
// I need to set the resource directory to a temp directory
final String resourceDir = folder.newFolder("resource").getAbsolutePath();
final Session sessionInput = mapper.readValue(new File(sessionInputJsonPath), Session.class);
assertThat(sessionInput.getResources(), Matchers.<Resource>hasSize(1));
final Resource resource = sessionInput.getResources().get(0);
resource.setDirectory(resourceDir);
final Map<String, String> runtimeValues = Collections.singletonMap("session", mapper.writeValueAsString(sessionInput));
// Write a few test files to the resource
final byte[] testFileContents = "contents of the file".getBytes();
final String[] fileNames = new String[] {"a", "b", "c", "d", "e", "f", "g"};
for (final String filename : fileNames) {
Files.write(Paths.get(resourceDir, filename), testFileContents);
}
// Ensure the session XNAT object will be returned by the call to UriParserUtils.parseURI
final ArchivableItem mockSessionItem = mock(ArchivableItem.class);
final ExptURI mockUriObject = mock(ExptURI.class);
when(UriParserUtils.parseURI("/archive" + sessionInput.getUri())).thenReturn(mockUriObject);
when(mockUriObject.getSecurityItem()).thenReturn(mockSessionItem);
// Time to launch this thing
final Container mainContainerRightAfterLaunch = containerService.resolveCommandAndLaunchContainer(commandWithWrapupCommandWrapper.id(), runtimeValues, mockUser);
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
log.debug("Waiting for ten seconds. Peace!");
Thread.sleep(10000); // Wait for container to finish
final Container mainContainerAWhileAfterLaunch = containerService.get(mainContainerRightAfterLaunch.databaseId());
final List<Container> wrapupContainers = containerService.retrieveWrapupContainersForParent(mainContainerAWhileAfterLaunch.databaseId());
assertThat(wrapupContainers, hasSize(1));
final Container wrapupContainer = wrapupContainers.get(0);
// Print the logs for debugging in case weird stuff happened
printContainerLogs(wrapupContainer, "wrapup");
printContainerLogs(mainContainerAWhileAfterLaunch, "main");
// Sanity Checks
assertThat(wrapupContainer.parent(), is(mainContainerAWhileAfterLaunch));
assertThat(wrapupContainer.status(), is(not("Failed")));
// This is what we will be testing, and why it validates that the wrapup container worked.
// The wrapup container wrote "found-files.txt" to the output mount. The contents of the file
// will be the locations (from find) of all the files in the input mount.
final String[] expectedFileContentsByLine = new String[fileNames.length + 1];
expectedFileContentsByLine[0] = "/input";
for (int i = 0; i < fileNames.length; i++) {
expectedFileContentsByLine[i+1] = "/input/" + fileNames[i];
}
// Check wrapup container's output mount for contents
ContainerMount wrapupContainerOutputMount = null;
for (final ContainerMount wrapupMount : wrapupContainer.mounts()) {
if (wrapupMount.name().equals("output")) {
wrapupContainerOutputMount = wrapupMount;
}
}
assertThat(wrapupContainerOutputMount, notNullValue(ContainerMount.class));
final File wrapupContainerOutputMountDir = new File(wrapupContainerOutputMount.xnatHostPath());
final File[] contentsOfWrapupContainerOutputMountDir = wrapupContainerOutputMountDir.listFiles();
assertThat(contentsOfWrapupContainerOutputMountDir, Matchers.<File>arrayWithSize(1));
assertThat(contentsOfWrapupContainerOutputMountDir, hasItemInArray(pathEndsWith("found-files.txt")));
final File foundFilesDotTxt = contentsOfWrapupContainerOutputMountDir[0];
final String[] foundFilesDotTxtContentByLine = readFile(foundFilesDotTxt);
assertThat(foundFilesDotTxtContentByLine, arrayContainingInAnyOrder(expectedFileContentsByLine));
}
@Test
@DirtiesContext
public void testFailedContainer() throws Exception {
assumeThat(SystemUtils.IS_OS_WINDOWS_7, is(false));
assumeThat(canConnectToDocker(), is(true));
CLIENT.pull("busybox:latest");
final Command willFail = commandService.create(Command.builder()
.name("will-fail")
.image("busybox:latest")
.version("0")
.commandLine("/bin/sh -c \"exit 1\"")
.addCommandWrapper(CommandWrapper.builder()
.name("placeholder")
.build())
.build());
final CommandWrapper willFailWrapper = willFail.xnatCommandWrappers().get(0);
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
final Container container = containerService.resolveCommandAndLaunchContainer(willFailWrapper.id(), Collections.<String, String>emptyMap(), mockUser);
containersToCleanUp.add(swarmMode ? container.serviceId() : container.containerId());
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
log.debug("Waiting until task has started");
await().until(containerHasStarted(container), is(true));
log.debug("Waiting until task has finished");
await().until(containerIsRunning(container), is(false));
log.debug("Waiting until status updater has picked up finished task and added item to history");
await().until(containerHistoryHasItemFromSystem(container.databaseId()), is(true));
final Container exited = containerService.get(container.databaseId());
printContainerLogs(exited);
assertThat(exited.exitCode(), is("1"));
assertThat(exited.status(), is("Failed"));
}
@Test
@DirtiesContext
public void testEntrypointIsPreserved() throws Exception {
assumeThat(canConnectToDocker(), is(true));
CLIENT.pull("busybox:latest");
final String resourceDir = Paths.get(ClassLoader.getSystemResource("commandLaunchTest").toURI()).toString().replace("%20", " ");
final Path testDir = Paths.get(resourceDir, "/testEntrypointIsPreserved");
final String commandJsonFile = Paths.get(testDir.toString(), "/command.json").toString();
final String imageName = "xnat/entrypoint-test:latest";
CLIENT.build(testDir, imageName);
imagesToCleanUp.add(imageName);
final Command commandToCreate = mapper.readValue(new File(commandJsonFile), Command.class);
final Command command = commandService.create(commandToCreate);
final CommandWrapper wrapper = command.xnatCommandWrappers().get(0);
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
final Container container = containerService.resolveCommandAndLaunchContainer(wrapper.id(), Collections.<String, String>emptyMap(), mockUser);
containersToCleanUp.add(swarmMode ? container.serviceId() : container.containerId());
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
await().until(containerIsRunning(container), is(false));
await().until(containerHasLogPaths(container.databaseId())); // Thus we know it has been finalized
final Container exited = containerService.get(container.databaseId());
printContainerLogs(exited);
assertThat(exited.status(), is(not("Failed")));
assertThat(exited.exitCode(), is("0"));
}
@Test
@DirtiesContext
public void testEntrypointIsRemoved() throws Exception {
assumeThat(canConnectToDocker(), is(true));
CLIENT.pull("busybox:latest");
final String resourceDir = Paths.get(ClassLoader.getSystemResource("commandLaunchTest").toURI()).toString().replace("%20", " ");
final Path testDir = Paths.get(resourceDir, "/testEntrypointIsRemoved");
final String commandJsonFile = Paths.get(testDir.toString(), "/command.json").toString();
final String imageName = "xnat/entrypoint-test:latest";
CLIENT.build(testDir, imageName);
imagesToCleanUp.add(imageName);
final Command commandToCreate = mapper.readValue(new File(commandJsonFile), Command.class);
final Command command = commandService.create(commandToCreate);
final CommandWrapper wrapper = command.xnatCommandWrappers().get(0);
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
final Container container = containerService.resolveCommandAndLaunchContainer(wrapper.id(), Collections.<String, String>emptyMap(), mockUser);
containersToCleanUp.add(swarmMode ? container.serviceId() : container.containerId());
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
await().until(containerIsRunning(container), is(false));
await().until(containerHasLogPaths(container.databaseId())); // Thus we know it has been finalized
final Container exited = containerService.get(container.databaseId());
printContainerLogs(exited);
assertThat(exited.status(), is(not("Failed")));
assertThat(exited.exitCode(), is("0"));
}
@Test
@DirtiesContext
public void testContainerWorkingDirectory() throws Exception {
assumeThat(SystemUtils.IS_OS_WINDOWS_7, is(false));
assumeThat(canConnectToDocker(), is(true));
CLIENT.pull("busybox:latest");
final String workingDirectory = "/usr/local/bin";
final Command command = commandService.create(Command.builder()
.name("command")
.image("busybox:latest")
.version("0")
.commandLine("pwd")
.workingDirectory(workingDirectory)
.addCommandWrapper(CommandWrapper.builder()
.name("placeholder")
.build())
.build());
final CommandWrapper wrapper = command.xnatCommandWrappers().get(0);
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
final Container container = containerService.resolveCommandAndLaunchContainer(wrapper.id(), Collections.<String, String>emptyMap(), mockUser);
containersToCleanUp.add(swarmMode ? container.serviceId() : container.containerId());
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
await().until(containerIsRunning(container), is(false));
final Container exited = containerService.get(container.databaseId());
printContainerLogs(exited);
assertThat(exited.workingDirectory(), is(workingDirectory));
}
@Test
@DirtiesContext
public void testDeleteCommandWhenDeleteImageAfterLaunchingContainer() throws Exception {
assumeThat(canConnectToDocker(), is(true));
final String imageName = "xnat/testy-test";
final String resourceDir = Paths.get(ClassLoader.getSystemResource("commandLaunchTest").toURI()).toString().replace("%20", " ");
final Path testDir = Paths.get(resourceDir, "/testDeleteCommandWhenDeleteImageAfterLaunchingContainer");
final String imageId = CLIENT.build(testDir, imageName);
final List<Command> commands = dockerService.saveFromImageLabels(imageName);
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
final Command command = commands.get(0);
final CommandWrapper wrapper = command.xnatCommandWrappers().get(0);
final Container container = containerService.resolveCommandAndLaunchContainer(wrapper.id(), Collections.<String, String>emptyMap(), mockUser);
containersToCleanUp.add(swarmMode ? container.serviceId() : container.containerId());
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
await().until(containerIsRunning(container), is(false));
final Container exited = containerService.get(container.databaseId());
printContainerLogs(exited);
dockerService.removeImageById(imageId, true);
TestTransaction.flagForCommit();
TestTransaction.end();
TestTransaction.start();
try {
dockerService.getImage(imageId);
fail("We expect a NotFoundException to be thrown when getting an image that we have removed. If this line is executed it means no exception was thrown.");
} catch (NotFoundException ignored) {
// exception is expected
} catch (Exception e) {
fail("We expect a NotFoundException to be thrown when getting an image that we have removed. If this line is executed it means another exception type was thrown.\n" + e.getClass().getName() + ": " + e.getMessage());
}
final Command retrieved = commandService.retrieve(command.id());
assertThat(retrieved, is(nullValue(Command.class)));
final Command.CommandWrapper retrievedWrapper = commandService.retrieveWrapper(wrapper.id());
assertThat(retrievedWrapper, is(nullValue(Command.CommandWrapper.class)));
}
@SuppressWarnings("deprecation")
private String[] readFile(final String outputFilePath) throws IOException {
return readFile(new File(outputFilePath));
}
@SuppressWarnings("deprecation")
private String[] readFile(final File file) throws IOException {
if (!file.canRead()) {
throw new IOException("Cannot read output file " + file.getAbsolutePath());
}
return FileUtils.readFileToString(file).split("\\n");
}
private void printContainerLogs(final Container container) throws IOException {
printContainerLogs(container, "main");
}
private void printContainerLogs(final Container container, final String containerTypeForLogs) throws IOException {
log.debug("Trying to print {} container logs.", containerTypeForLogs);
if (container.logPaths().size() == 0) {
log.debug("No logs.");
return;
}
for (final String containerLogPath : container.logPaths()) {
final String[] containerLogPathComponents = containerLogPath.split("/");
final String containerLogName = containerLogPathComponents[containerLogPathComponents.length - 1];
log.info("Displaying contents of {} for {} container {} {}.", containerLogName, containerTypeForLogs, container.databaseId(), container.containerId());
final String[] logLines = readFile(containerLogPath);
for (final String logLine : logLines) {
log.info("\t{}", logLine);
}
}
}
private CustomTypeSafeMatcher<File> pathEndsWith(final String filePathEnd) {
final String description = "Match a file path if it ends with " + filePathEnd;
return new CustomTypeSafeMatcher<File>(description) {
@Override
protected boolean matchesSafely(final File item) {
return item == null && filePathEnd == null ||
item != null && item.getAbsolutePath().endsWith(filePathEnd);
}
};
}
private Callable<Boolean> containerHasStarted(final Container container) {
return new Callable<Boolean>() {
public Boolean call() throws Exception {
try {
if (swarmMode) {
final Service serviceResponse = CLIENT.inspectService(container.serviceId());
final List<Task> tasks = CLIENT.listTasks(Task.Criteria.builder().serviceName(serviceResponse.spec().name()).build());
if (tasks.size() != 1) {
return false;
}
final Task task = tasks.get(0);
final ServiceTask serviceTask = ServiceTask.create(task, container.serviceId());
if (serviceTask.hasNotStarted()) {
return false;
}
return true;
} else {
final ContainerInfo containerInfo = CLIENT.inspectContainer(container.containerId());
return (!containerInfo.state().status().equals("created"));
}
} catch (ContainerNotFoundException ignored) {
// Ignore exception. If container is not found, it is not running.
return false;
}
}
};
}
private Callable<Boolean> containerIsRunning(final Container container) {
return new Callable<Boolean>() {
public Boolean call() throws Exception {
try {
if (swarmMode) {
final Service serviceResponse = CLIENT.inspectService(container.serviceId());
final List<Task> tasks = CLIENT.listTasks(Task.Criteria.builder().serviceName(serviceResponse.spec().name()).build());
for (final Task task : tasks) {
final ServiceTask serviceTask = ServiceTask.create(task, container.serviceId());
if (serviceTask.isExitStatus()) {
return false;
} else if (serviceTask.status().equals("running")) {
return true;
}
}
return false;
} else {
final ContainerInfo containerInfo = CLIENT.inspectContainer(container.containerId());
return containerInfo.state().running();
}
} catch (ContainerNotFoundException ignored) {
// Ignore exception. If container is not found, it is not running.
return false;
}
}
};
}
private Callable<Boolean> containerHasLogPaths(final long containerDbId) {
return new Callable<Boolean>() {
public Boolean call() throws Exception {
final Container container = containerService.get(containerDbId);
return container.logPaths().size() > 0;
}
};
}
private Callable<Boolean> containerHistoryHasItemFromSystem(final long containerDatabaseId) {
return new Callable<Boolean>() {
public Boolean call() throws Exception {
try {
final Container container = containerService.get(containerDatabaseId);
for (final Container.ContainerHistory historyItem : container.history()) {
if (historyItem.entityType() != null && historyItem.entityType().equals("system")) {
return true;
}
}
} catch (Exception ignored) {
// ignored
}
return false;
}
};
}
}
|
[
"\"DOCKER_HOST\"",
"\"DOCKER_CERT_PATH\"",
"\"DOCKER_TLS_VERIFY\"",
"\"CIRCLECI\""
] |
[] |
[
"DOCKER_HOST",
"CIRCLECI",
"DOCKER_CERT_PATH",
"DOCKER_TLS_VERIFY"
] |
[]
|
["DOCKER_HOST", "CIRCLECI", "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY"]
|
java
| 4 | 0 | |
ddl/ddl_test.go
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"os"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mock"
"golang.org/x/net/context"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(&logutil.LogConfig{
Level: logLevel,
Format: "highlight",
})
TestingT(t)
}
func testCreateStore(c *C, name string) kv.Storage {
store, err := mockstore.NewMockTikvStore()
c.Assert(err, IsNil)
return store
}
func testNewContext(d *ddl) sessionctx.Context {
ctx := mock.NewContext()
ctx.Store = d.store
return ctx
}
func testNewDDL(ctx context.Context, etcdCli *clientv3.Client, store kv.Storage,
infoHandle *infoschema.Handle, hook Callback, lease time.Duration) *ddl {
return newDDL(ctx, etcdCli, store, infoHandle, hook, lease, nil)
}
func getSchemaVer(c *C, ctx sessionctx.Context) int64 {
err := ctx.NewTxn()
c.Assert(err, IsNil)
m := meta.NewMeta(ctx.Txn())
ver, err := m.GetSchemaVersion()
c.Assert(err, IsNil)
return ver
}
type historyJobArgs struct {
ver int64
db *model.DBInfo
tbl *model.TableInfo
tblIDs map[int64]struct{}
}
func checkEqualTable(c *C, t1, t2 *model.TableInfo) {
c.Assert(t1.ID, Equals, t2.ID)
c.Assert(t1.Name, Equals, t2.Name)
c.Assert(t1.Charset, Equals, t2.Charset)
c.Assert(t1.Collate, Equals, t2.Collate)
c.Assert(t1.PKIsHandle, DeepEquals, t2.PKIsHandle)
c.Assert(t1.Comment, DeepEquals, t2.Comment)
c.Assert(t1.AutoIncID, DeepEquals, t2.AutoIncID)
}
func checkHistoryJob(c *C, job *model.Job) {
c.Assert(job.State, Equals, model.JobStateSynced)
}
func checkHistoryJobArgs(c *C, ctx sessionctx.Context, id int64, args *historyJobArgs) {
c.Assert(ctx.NewTxn(), IsNil)
t := meta.NewMeta(ctx.Txn())
historyJob, err := t.GetHistoryDDLJob(id)
c.Assert(err, IsNil)
c.Assert(historyJob.BinlogInfo.FinishedTS, Greater, uint64(0))
if args.tbl != nil {
c.Assert(historyJob.BinlogInfo.SchemaVersion, Equals, args.ver)
checkEqualTable(c, historyJob.BinlogInfo.TableInfo, args.tbl)
return
}
// for handling schema job
c.Assert(historyJob.BinlogInfo.SchemaVersion, Equals, args.ver)
c.Assert(historyJob.BinlogInfo.DBInfo, DeepEquals, args.db)
// only for creating schema job
if args.db != nil && len(args.tblIDs) == 0 {
return
}
}
func testCreateIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job {
job := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionAddIndex,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{unique, model.NewCIStr(indexName),
[]*ast.IndexColName{{
Column: &ast.ColumnName{Name: model.NewCIStr(colName)},
Length: types.UnspecifiedLength}}},
}
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func testDropIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job {
job := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionDropIndex,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{model.NewCIStr(indexName)},
}
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
|
[
"\"log_level\""
] |
[] |
[
"log_level"
] |
[]
|
["log_level"]
|
go
| 1 | 0 | |
BaseTools/Source/Python/UPT/Xml/IniToXml.py
|
## @file
# This file is for converting package information data file to xml file.
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
IniToXml
'''
import os.path
import re
from time import strftime
from time import localtime
import Logger.Log as Logger
from Logger.ToolError import UPT_INI_PARSE_ERROR
from Logger.ToolError import FILE_NOT_FOUND
from Library.Xml.XmlRoutines import CreateXmlElement
from Library.DataType import TAB_VALUE_SPLIT
from Library.DataType import TAB_EQUAL_SPLIT
from Library.DataType import TAB_SECTION_START
from Library.DataType import TAB_SECTION_END
from Logger import StringTable as ST
from Library.String import ConvertSpecialChar
from Library.ParserValidate import IsValidPath
from Library import GlobalData
## log error:
#
# @param error: error
# @param File: File
# @param Line: Line
#
def IniParseError(Error, File, Line):
Logger.Error("UPT", UPT_INI_PARSE_ERROR, File=File,
Line=Line, ExtraData=Error)
## __ValidatePath
#
# @param Path: Path to be checked
#
def __ValidatePath(Path, Root):
Path = Path.strip()
if os.path.isabs(Path) or not IsValidPath(Path, Root):
return False, ST.ERR_FILELIST_LOCATION % (Root, Path)
return True, ''
## ValidateMiscFile
#
# @param Filename: File to be checked
#
def ValidateMiscFile(Filename):
Root = GlobalData.gWORKSPACE
return __ValidatePath(Filename, Root)
## ValidateToolsFile
#
# @param Filename: File to be checked
#
def ValidateToolsFile(Filename):
Valid, Cause = False, ''
if not Valid and 'EDK_TOOLS_PATH' in os.environ:
Valid, Cause = __ValidatePath(Filename, os.environ['EDK_TOOLS_PATH'])
if not Valid:
Valid, Cause = __ValidatePath(Filename, GlobalData.gWORKSPACE)
return Valid, Cause
## ParseFileList
#
# @param Line: Line
# @param Map: Map
# @param CurrentKey: CurrentKey
# @param PathFunc: Path validate function
#
def ParseFileList(Line, Map, CurrentKey, PathFunc):
FileList = ["", {}]
TokenList = Line.split(TAB_VALUE_SPLIT)
if len(TokenList) > 0:
Path = TokenList[0].strip().replace('\\', '/')
if not Path:
return False, ST.ERR_WRONG_FILELIST_FORMAT
Valid, Cause = PathFunc(Path)
if not Valid:
return Valid, Cause
FileList[0] = TokenList[0].strip()
for Token in TokenList[1:]:
Attr = Token.split(TAB_EQUAL_SPLIT)
if len(Attr) != 2 or not Attr[0].strip() or not Attr[1].strip():
return False, ST.ERR_WRONG_FILELIST_FORMAT
Key = Attr[0].strip()
Val = Attr[1].strip()
if Key not in ['OS', 'Executable']:
return False, ST.ERR_UNKNOWN_FILELIST_ATTR % Key
if Key == 'OS' and Val not in ["Win32", "Win64", "Linux32",
"Linux64", "OS/X32", "OS/X64",
"GenericWin", "GenericNix"]:
return False, ST.ERR_FILELIST_ATTR % 'OS'
elif Key == 'Executable' and Val not in ['true', 'false']:
return False, ST.ERR_FILELIST_ATTR % 'Executable'
FileList[1][Key] = Val
Map[CurrentKey].append(FileList)
return True, ''
## Create header XML file
#
# @param DistMap: DistMap
# @param Root: Root
#
def CreateHeaderXml(DistMap, Root):
Element1 = CreateXmlElement('Name', DistMap['Name'],
[], [['BaseName', DistMap['BaseName']]])
Element2 = CreateXmlElement('GUID', DistMap['GUID'],
[], [['Version', DistMap['Version']]])
AttributeList = [['ReadOnly', DistMap['ReadOnly']],
['RePackage', DistMap['RePackage']]]
NodeList = [Element1,
Element2,
['Vendor', DistMap['Vendor']],
['Date', DistMap['Date']],
['Copyright', DistMap['Copyright']],
['License', DistMap['License']],
['Abstract', DistMap['Abstract']],
['Description', DistMap['Description']],
['Signature', DistMap['Signature']],
['XmlSpecification', DistMap['XmlSpecification']],
]
Root.appendChild(CreateXmlElement('DistributionHeader', '',
NodeList, AttributeList))
## Create tools XML file
#
# @param Map: Map
# @param Root: Root
# @param Tag: Tag
#
def CreateToolsXml(Map, Root, Tag):
#
# Check if all elements in this section are empty
#
for Key in Map:
if len(Map[Key]) > 0:
break
else:
return
NodeList = [['Name', Map['Name']],
['Copyright', Map['Copyright']],
['License', Map['License']],
['Abstract', Map['Abstract']],
['Description', Map['Description']],
]
HeaderNode = CreateXmlElement('Header', '', NodeList, [])
NodeList = [HeaderNode]
for File in Map['FileList']:
AttrList = []
for Key in File[1]:
AttrList.append([Key, File[1][Key]])
NodeList.append(CreateXmlElement('Filename', File[0], [], AttrList))
Root.appendChild(CreateXmlElement(Tag, '', NodeList, []))
## ValidateValues
#
# @param Key: Key
# @param Value: Value
# @param SectionName: SectionName
#
def ValidateValues(Key, Value, SectionName):
if SectionName == 'DistributionHeader':
Valid, Cause = ValidateRegValues(Key, Value)
if not Valid:
return Valid, Cause
Valid = __ValidateDistHeader(Key, Value)
if not Valid:
return Valid, ST.ERR_VALUE_INVALID % (Key, SectionName)
else:
Valid = __ValidateOtherHeader(Key, Value)
if not Valid:
return Valid, ST.ERR_VALUE_INVALID % (Key, SectionName)
return True, ''
## ValidateRegValues
#
# @param Key: Key
# @param Value: Value
#
def ValidateRegValues(Key, Value):
ValidateMap = {
'ReadOnly' :
('true|false', ST.ERR_BOOLEAN_VALUE % (Key, Value)),
'RePackage' :
('true|false', ST.ERR_BOOLEAN_VALUE % (Key, Value)),
'GUID' :
('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}'
'-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}',
ST.ERR_GUID_VALUE % Value),
'Version' : ('[0-9]+(\.[0-9]+)?', ST.ERR_VERSION_VALUE % \
(Key, Value)),
'XmlSpecification' : ('1\.1', ST.ERR_VERSION_XMLSPEC % Value)
}
if Key not in ValidateMap:
return True, ''
Elem = ValidateMap[Key]
Match = re.compile(Elem[0]).match(Value)
if Match and Match.start() == 0 and Match.end() == len(Value):
return True, ''
return False, Elem[1]
## __ValidateDistHeaderName
#
# @param Name: Name
#
def __ValidateDistHeaderName(Name):
if len(Name) < 1:
return False
for Char in Name:
if ord(Char) < 0x20 or ord(Char) >= 0x7f:
return False
return True
## __ValidateDistHeaderBaseName
#
# @param BaseName: BaseName
#
def __ValidateDistHeaderBaseName(BaseName):
if not BaseName:
return False
# if CheckLen and len(BaseName) < 2:
# return False
if not BaseName[0].isalnum() and BaseName[0] != '_':
return False
for Char in BaseName[1:]:
if not Char.isalnum() and Char not in '-_':
return False
return True
## __ValidateDistHeaderAbstract
#
# @param Abstract: Abstract
#
def __ValidateDistHeaderAbstract(Abstract):
return '\t' not in Abstract and len(Abstract.splitlines()) == 1
## __ValidateOtherHeaderAbstract
#
# @param Abstract: Abstract
#
def __ValidateOtherHeaderAbstract(Abstract):
return __ValidateDistHeaderAbstract(Abstract)
## __ValidateDistHeader
#
# @param Key: Key
# @param Value: Value
#
def __ValidateDistHeader(Key, Value):
ValidateMap = {
'Name' : __ValidateDistHeaderName,
'BaseName' : __ValidateDistHeaderBaseName,
'Abstract' : __ValidateDistHeaderAbstract,
'Vendor' : __ValidateDistHeaderAbstract
}
return not (Value and Key in ValidateMap and not ValidateMap[Key](Value))
## __ValidateOtherHeader
#
# @param Key: Key
# @param Value: Value
#
def __ValidateOtherHeader(Key, Value):
ValidateMap = {
'Name' : __ValidateDistHeaderName,
'Abstract' : __ValidateOtherHeaderAbstract
}
return not (Value and Key in ValidateMap and not ValidateMap[Key](Value))
## Convert ini file to xml file
#
# @param IniFile
#
def IniToXml(IniFile):
if not os.path.exists(IniFile):
Logger.Error("UPT", FILE_NOT_FOUND, ST.ERR_TEMPLATE_NOTFOUND % IniFile)
DistMap = {'ReadOnly' : '', 'RePackage' : '', 'Name' : '',
'BaseName' : '', 'GUID' : '', 'Version' : '', 'Vendor' : '',
'Date' : '', 'Copyright' : '', 'License' : '', 'Abstract' : '',
'Description' : '', 'Signature' : '', 'XmlSpecification' : ''
}
ToolsMap = {'Name' : '', 'Copyright' : '', 'License' : '',
'Abstract' : '', 'Description' : '', 'FileList' : []}
#
# Only FileList is a list: [['file1', {}], ['file2', {}], ...]
#
MiscMap = {'Name' : '', 'Copyright' : '', 'License' : '',
'Abstract' : '', 'Description' : '', 'FileList' : []}
SectionMap = {
'DistributionHeader' : DistMap,
'ToolsHeader' : ToolsMap,
'MiscellaneousFilesHeader' : MiscMap
}
PathValidator = {
'ToolsHeader' : ValidateToolsFile,
'MiscellaneousFilesHeader' : ValidateMiscFile
}
ParsedSection = []
SectionName = ''
CurrentKey = ''
PreMap = None
Map = None
FileContent = ConvertSpecialChar(open(IniFile, 'rb').readlines())
LastIndex = 0
for Index in range(0, len(FileContent)):
LastIndex = Index
Line = FileContent[Index].strip()
if Line == '' or Line.startswith(';'):
continue
if Line[0] == TAB_SECTION_START and Line[-1] == TAB_SECTION_END:
CurrentKey = ''
SectionName = Line[1:-1].strip()
if SectionName not in SectionMap:
IniParseError(ST.ERR_SECTION_NAME_INVALID % SectionName,
IniFile, Index+1)
if SectionName in ParsedSection:
IniParseError(ST.ERR_SECTION_REDEFINE % SectionName,
IniFile, Index+1)
else:
ParsedSection.append(SectionName)
Map = SectionMap[SectionName]
continue
if not Map:
IniParseError(ST.ERR_SECTION_NAME_NONE, IniFile, Index+1)
TokenList = Line.split(TAB_EQUAL_SPLIT, 1)
TempKey = TokenList[0].strip()
#
# Value spanned multiple or same keyword appears more than one time
#
if len(TokenList) < 2 or TempKey not in Map:
if CurrentKey == '':
IniParseError(ST.ERR_KEYWORD_INVALID % TempKey,
IniFile, Index+1)
elif CurrentKey == 'FileList':
#
# Special for FileList
#
Valid, Cause = ParseFileList(Line, Map, CurrentKey,
PathValidator[SectionName])
if not Valid:
IniParseError(Cause, IniFile, Index+1)
else:
#
# Multiple lines for one key such as license
# Or if string on the left side of '=' is not a keyword
#
Map[CurrentKey] = ''.join([Map[CurrentKey], '\n', Line])
Valid, Cause = ValidateValues(CurrentKey,
Map[CurrentKey], SectionName)
if not Valid:
IniParseError(Cause, IniFile, Index+1)
continue
if (TokenList[1].strip() == ''):
IniParseError(ST.ERR_EMPTY_VALUE, IniFile, Index+1)
#
# A keyword found
#
CurrentKey = TempKey
if Map[CurrentKey]:
IniParseError(ST.ERR_KEYWORD_REDEFINE % CurrentKey,
IniFile, Index+1)
if id(Map) != id(PreMap) and Map['Copyright']:
PreMap = Map
Copyright = Map['Copyright'].lower()
Pos = Copyright.find('copyright')
if Pos == -1:
IniParseError(ST.ERR_COPYRIGHT_CONTENT, IniFile, Index)
if not Copyright[Pos + len('copyright'):].lstrip(' ').startswith('('):
IniParseError(ST.ERR_COPYRIGHT_CONTENT, IniFile, Index)
if CurrentKey == 'FileList':
Valid, Cause = ParseFileList(TokenList[1], Map, CurrentKey,
PathValidator[SectionName])
if not Valid:
IniParseError(Cause, IniFile, Index+1)
else:
Map[CurrentKey] = TokenList[1].strip()
Valid, Cause = ValidateValues(CurrentKey,
Map[CurrentKey], SectionName)
if not Valid:
IniParseError(Cause, IniFile, Index+1)
if id(Map) != id(PreMap) and Map['Copyright'] and 'copyright' not in Map['Copyright'].lower():
IniParseError(ST.ERR_COPYRIGHT_CONTENT, IniFile, LastIndex)
#
# Check mandatory keys
#
CheckMdtKeys(DistMap, IniFile, LastIndex,
(('ToolsHeader', ToolsMap), ('MiscellaneousFilesHeader', MiscMap))
)
return CreateXml(DistMap, ToolsMap, MiscMap, IniFile)
## CheckMdtKeys
#
# @param MdtDistKeys: All mandatory keys
# @param DistMap: Dist content
# @param IniFile: Ini file
# @param LastIndex: Last index of Ini file
# @param Maps: Tools and Misc section name and map. (('section_name', map),*)
#
def CheckMdtKeys(DistMap, IniFile, LastIndex, Maps):
MdtDistKeys = ['Name', 'GUID', 'Version', 'Vendor', 'Copyright', 'License', 'Abstract', 'XmlSpecification']
for Key in MdtDistKeys:
if Key not in DistMap or DistMap[Key] == '':
IniParseError(ST.ERR_KEYWORD_MANDATORY % Key, IniFile, LastIndex+1)
if '.' not in DistMap['Version']:
DistMap['Version'] = DistMap['Version'] + '.0'
DistMap['Date'] = str(strftime("%Y-%m-%dT%H:%M:%S", localtime()))
#
# Check Tools Surface Area according to UPT Spec
# <Tools> {0,}
# <Header> ... </Header> {0,1}
# <Filename> ... </Filename> {1,}
# </Tools>
# <Header>
# <Name> xs:normalizedString </Name> {1}
# <Copyright> xs:string </Copyright> {0,1}
# <License> xs:string </License> {0,1}
# <Abstract> xs:normalizedString </Abstract> {0,1}
# <Description> xs:string </Description> {0,1}
# </Header>
#
for Item in Maps:
Map = Item[1]
NonEmptyKey = 0
for Key in Map:
if Map[Key]:
NonEmptyKey += 1
if NonEmptyKey > 0 and not Map['FileList']:
IniParseError(ST.ERR_KEYWORD_MANDATORY % (Item[0] + '.FileList'), IniFile, LastIndex+1)
if NonEmptyKey > 0 and not Map['Name']:
IniParseError(ST.ERR_KEYWORD_MANDATORY % (Item[0] + '.Name'), IniFile, LastIndex+1)
## CreateXml
#
# @param DistMap: Dist Content
# @param ToolsMap: Tools Content
# @param MiscMap: Misc Content
# @param IniFile: Ini File
#
def CreateXml(DistMap, ToolsMap, MiscMap, IniFile):
Attrs = [['xmlns', 'http://www.uefi.org/2011/1.1'],
['xmlns:xsi', 'http:/www.w3.org/2001/XMLSchema-instance'],
]
Root = CreateXmlElement('DistributionPackage', '', [], Attrs)
CreateHeaderXml(DistMap, Root)
CreateToolsXml(ToolsMap, Root, 'Tools')
CreateToolsXml(MiscMap, Root, 'MiscellaneousFiles')
FileAndExt = IniFile.rsplit('.', 1)
if len(FileAndExt) > 1:
FileName = FileAndExt[0] + '.xml'
else:
FileName = IniFile + '.xml'
File = open(FileName, 'w')
try:
File.write(Root.toprettyxml(indent = ' '))
finally:
File.close()
return FileName
|
[] |
[] |
[
"EDK_TOOLS_PATH"
] |
[]
|
["EDK_TOOLS_PATH"]
|
python
| 1 | 0 | |
test/kb_virsorter2_server_test.py
|
# -*- coding: utf-8 -*-
import os
import time
import unittest
from configparser import ConfigParser
from kb_virsorter2.kb_virsorter2Impl import kb_virsorter2
from kb_virsorter2.kb_virsorter2Server import MethodContext
from kb_virsorter2.authclient import KBaseAuth as _KBaseAuth
from installed_clients.WorkspaceClient import Workspace
class kb_virsorter2Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = os.environ.get('KB_AUTH_TOKEN', None)
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_virsorter2'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'kb_virsorter2',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = Workspace(cls.wsURL)
cls.serviceImpl = kb_virsorter2(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
suffix = int(time.time() * 1000)
cls.wsName = "test_ContigFilter_" + str(suffix)
ret = cls.wsClient.create_workspace({'workspace': cls.wsName}) # noqa
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
def test_your_method(self):
# Prepare test objects in workspace if needed using
# self.getWsClient().save_objects({'workspace': self.getWsName(),
# 'objects': []})
#
# Run your method by
# ret = self.getImpl().your_method(self.getContext(), parameters...)
#
# Check returned data with
# self.assertEqual(ret[...], ...) or other unittest methods
ret = self.serviceImpl.run_kb_virsorter2(self.ctx, {'workspace_name': self.wsName,
'parameter_1': 'Hello World!'})
|
[] |
[] |
[
"SDK_CALLBACK_URL",
"KB_AUTH_TOKEN",
"KB_DEPLOYMENT_CONFIG"
] |
[]
|
["SDK_CALLBACK_URL", "KB_AUTH_TOKEN", "KB_DEPLOYMENT_CONFIG"]
|
python
| 3 | 0 | |
util/chplenv/chpl_llvm.py
|
#!/usr/bin/env python3
import optparse
import os
import sys
import chpl_bin_subdir, chpl_arch, chpl_compiler, chpl_platform, overrides
from chpl_home_utils import get_chpl_third_party
from utils import memoize, run_command
@memoize
def get_uniq_cfg_path_for(llvm_val):
if llvm_val == "llvm":
# put platform-arch-compiler for included llvm
host_bin_subdir = chpl_bin_subdir.get('host')
host_compiler = chpl_compiler.get('host')
llvm_target_dir = '{0}-{1}'.format(host_bin_subdir, host_compiler)
else:
# just put 'system' for system llvm
llvm_target_dir = llvm_val
return llvm_target_dir
@memoize
def get_uniq_cfg_path():
llvm_val = get()
return get_uniq_cfg_path_for(llvm_val)
def is_included_llvm_built():
chpl_third_party = get_chpl_third_party()
llvm_target_dir = get_uniq_cfg_path_for('llvm')
llvm_subdir = os.path.join(chpl_third_party, 'llvm', 'install',
llvm_target_dir)
llvm_header = os.path.join(llvm_subdir, 'include', 'llvm',
'PassSupport.h')
if os.path.exists(llvm_header):
return True
else:
return False
def compatible_platform_for_llvm_default():
target_arch = chpl_arch.get('target')
return (target_arch != "i368")
def has_compatible_installed_llvm():
preferred_vers_file = os.path.join(get_chpl_third_party(),
'llvm', 'LLVM_VERSION')
preferred_vers = ""
with open(preferred_vers_file, 'r') as file:
preferred_vers = file.read().strip()
find_llvm_config = os.path.join(get_chpl_third_party(),
'llvm', 'find-llvm-config.sh')
got = run_command([find_llvm_config, preferred_vers])
got = got.strip()
if got and got != "missing-llvm-config":
return True
else:
return False
@memoize
def get():
llvm_val = overrides.get('CHPL_LLVM')
if not llvm_val:
llvm_val = 'none'
if is_included_llvm_built():
llvm_val = 'llvm'
elif ("CHPL_LLVM_BY_DEFAULT" in os.environ and
os.environ["CHPL_LLVM_BY_DEFAULT"] != "0" and
# CHPL_LLVM_BY_DEFAULT is an enviro var to help us transition
compatible_platform_for_llvm_default()):
if has_compatible_installed_llvm():
llvm_val = 'system'
else:
llvm_val = 'llvm'
return llvm_val
def _main():
llvm_val = get()
parser = optparse.OptionParser(usage='usage: %prog [--needs-llvm-runtime]')
parser.add_option('--needs-llvm-runtime', dest='needsllvm',
action='store_const',
const='needsllvm', default='')
(options, args) = parser.parse_args()
#if --needs-llvm-runtime is set, print out llvm if runtime is needed,
# and print out nothing if it is not.
if options.needsllvm:
if llvm_val == 'system' or llvm_val == 'llvm':
sys.stdout.write("llvm\n");
else:
sys.stdout.write("{0}\n".format(llvm_val))
if __name__ == '__main__':
_main()
|
[] |
[] |
[
"CHPL_LLVM_BY_DEFAULT"
] |
[]
|
["CHPL_LLVM_BY_DEFAULT"]
|
python
| 1 | 0 | |
mysite/addOne.py
|
#!/usr/bin/env python
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
import django
django.setup()
# your imports, e.g. Django models
from buckets.models import *
from buckets.name2date import name2date
bucket = BucketInfo.objects.get(name='xinkaibuk1')
allInDb = set()
allInDb.update(ImageInfo.objects.all())
file_name='20180227.062043.749.jpg'
date = name2date(file_name)
# print date
img = ImageInfo(file_name=file_name, date_time=date, bucket=bucket)
img.save()
print 'image saved'
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.