filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
https/example/simpleServer.go | package main
import (
"flag"
"github.com/nelkinda/health-go"
"github.com/nelkinda/health-go/checks/uptime"
"github.com/nelkinda/http-go/https"
"net/http"
"os"
)
func main() {
serverNamePtr := flag.String("servername", os.Getenv("HOSTNMAE"), "Hostname for HTTPS.")
startHttpsPtr := flag.Bool("https", false, "Start HTTPS.")
flag.Parse()
mux := http.NewServeMux()
mux.HandleFunc("/health", health.New(health.Health{Version: "1", ReleaseID: "0.0.1-SNAPSHOT"}, uptime.Process()).Handler)
if *startHttpsPtr {
https.MustServeHttps(".", mux, *serverNamePtr)
} else {
https.MustServeHttp(mux)
}
https.WaitForIntOrTerm()
}
| [
"\"HOSTNMAE\""
]
| []
| [
"HOSTNMAE"
]
| [] | ["HOSTNMAE"] | go | 1 | 0 | |
third_party/github.com/docker/libcompose/project/context.go | package project
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strings"
log "github.com/golang/glog"
)
var projectRegexp = regexp.MustCompile("[^a-zA-Z0-9_.-]")
// Context holds context meta information about a libcompose project, like
// the project name, the compose file, etc.
type Context struct {
Timeout uint
Log bool
Volume bool
ForceRecreate bool
NoRecreate bool
NoCache bool
NoBuild bool
Signal string
ComposeFiles []string
ComposeBytes [][]byte
ProjectName string
isOpen bool
ServiceFactory ServiceFactory
EnvironmentLookup EnvironmentLookup
ResourceLookup ResourceLookup
IgnoreMissingConfig bool
Project *Project
}
func (c *Context) readComposeFiles() error {
if c.ComposeBytes != nil {
return nil
}
log.V(4).Infof("Opening compose files: %s", strings.Join(c.ComposeFiles, ","))
// Handle STDIN (`-f -`)
if len(c.ComposeFiles) == 1 && c.ComposeFiles[0] == "-" {
composeBytes, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Errorf("Failed to read compose file from stdin: %v", err)
return err
}
c.ComposeBytes = [][]byte{composeBytes}
return nil
}
for _, composeFile := range c.ComposeFiles {
composeBytes, err := ioutil.ReadFile(composeFile)
if err != nil && !os.IsNotExist(err) {
log.Errorf("Failed to open the compose file: %s", composeFile)
return err
}
if err != nil && !c.IgnoreMissingConfig {
log.Errorf("Failed to find the compose file: %s", composeFile)
return err
}
c.ComposeBytes = append(c.ComposeBytes, composeBytes)
}
return nil
}
func (c *Context) determineProject() error {
name, err := c.lookupProjectName()
if err != nil {
return err
}
c.ProjectName = projectRegexp.ReplaceAllString(strings.ToLower(name), "-")
if c.ProjectName == "" {
return fmt.Errorf("Falied to determine project name")
}
if strings.ContainsAny(c.ProjectName[0:1], "_.-") {
c.ProjectName = "x" + c.ProjectName
}
return nil
}
func (c *Context) lookupProjectName() (string, error) {
if c.ProjectName != "" {
return c.ProjectName, nil
}
if envProject := os.Getenv("COMPOSE_PROJECT_NAME"); envProject != "" {
return envProject, nil
}
file := "."
if len(c.ComposeFiles) > 0 {
file = c.ComposeFiles[0]
}
f, err := filepath.Abs(file)
if err != nil {
log.Errorf("Failed to get absolute directory for: %s", file)
return "", err
}
f = toUnixPath(f)
parent := path.Base(path.Dir(f))
if parent != "" && parent != "." {
return parent, nil
} else if wd, err := os.Getwd(); err != nil {
return "", err
} else {
return path.Base(toUnixPath(wd)), nil
}
}
func toUnixPath(p string) string {
return strings.Replace(p, "\\", "/", -1)
}
func (c *Context) open() error {
if c.isOpen {
return nil
}
if err := c.readComposeFiles(); err != nil {
return err
}
if err := c.determineProject(); err != nil {
return err
}
c.isOpen = true
return nil
}
| [
"\"COMPOSE_PROJECT_NAME\""
]
| []
| [
"COMPOSE_PROJECT_NAME"
]
| [] | ["COMPOSE_PROJECT_NAME"] | go | 1 | 0 | |
cache/main.go | package cache
import (
"myblog/util"
"os"
"strconv"
"github.com/go-redis/redis"
)
// RedisClient Redis缓存客户端单例
var RedisClient *redis.Client
// Redis 在中间件中初始化redis链接
func Redis() {
db, _ := strconv.ParseUint(os.Getenv("REDIS_DB"), 10, 64)
client := redis.NewClient(&redis.Options{
Addr: os.Getenv("REDIS_ADDR"),
Password: os.Getenv("REDIS_PW"),
DB: int(db),
})
_, err := client.Ping().Result()
if err != nil {
util.Log().Panic("连接Redis不成功", err)
}
RedisClient = client
}
| [
"\"REDIS_DB\"",
"\"REDIS_ADDR\"",
"\"REDIS_PW\""
]
| []
| [
"REDIS_DB",
"REDIS_ADDR",
"REDIS_PW"
]
| [] | ["REDIS_DB", "REDIS_ADDR", "REDIS_PW"] | go | 3 | 0 | |
src/cogent3/recalculation/calculation.py | #!/usr/bin/env python
import os
import time
import warnings
import numpy
from cogent3.maths.optimisers import ParameterOutOfBoundsError, maximise
from cogent3.maths.solve import find_root
Float = numpy.core.numerictypes.sctype2char(float)
TRACE_DEFAULT = "COGENT3_TRACE" in os.environ
TRACE_SCALE = 100000
__author__ = "Peter Maxwell"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Peter Maxwell", "Gavin Huttley", "Daniel McDonald"]
__license__ = "BSD-3"
__version__ = "2020.6.30a"
__maintainer__ = "Peter Maxwell"
__email__ = "[email protected]"
__status__ = "Production"
# This is the 'live' layer of the recalculation system
# Cells and OptPars are held by a Calculator
# For docstring see definitions.py
class CalculationInterupted(Exception):
pass
class OptPar(object):
"""One parameter, as seen by the optimiser, eg: length of one edge.
An OptPar reports changes to the ParameterValueSet for its parameter.
"""
is_constant = False
recycled = False
args = ()
# Use of __slots__ here and in Cell gives 8% speedup on small calculators.
__slots__ = [
"clients",
"client_ranks",
"name",
"lower",
"default_value",
"upper",
"scope",
"order",
"label",
"consequences",
"rank",
]
def __init__(self, name, scope, bounds):
self.clients = []
self.client_ranks = []
self.name = name
for (attr, v) in zip(["lower", "default_value", "upper"], bounds):
setattr(self, attr, float(v))
# controls order in optimiser - group for LF
self.scope = scope
self.order = (len(scope), scope and min(scope), name)
self.label = self.name
def add_client(self, client):
self.clients.append(client)
def __lt__(self, other):
# optimisation is more efficient if params for one edge are neighbours
return self.order < other.order
def __eq__(self, other):
# optimisation is more efficient if params for one edge are neighbours
return self.order == other.order
def __ne__(self, other):
# optimisation is more efficient if params for one edge are neighbours
return self.order != other.order
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.label)
def get_optimiser_bounds(self):
lower = self.transform_to_optimiser(self.lower)
upper = self.transform_to_optimiser(self.upper)
return (lower, upper)
def transform_from_optimiser(self, value):
return value
def transform_to_optimiser(self, value):
return value
class LogOptPar(OptPar):
# For ratios, optimiser sees log(param value). Conversions to/from
# optimiser representation are only done by Calculator.change(),
# .get_value_array() and .getBoundsArrrays().
def transform_from_optimiser(self, value):
return numpy.exp(value)
def transform_to_optimiser(self, value):
try:
return numpy.log(value)
except OverflowError:
raise OverflowError("log(%s)" % value)
class EvaluatedCell(object):
__slots__ = [
"client_ranks",
"rank",
"calc",
"args",
"is_constant",
"clients",
"failure_count",
"name",
"arg_ranks",
"consequences",
"recycled",
"default",
]
def __init__(self, name, calc, args, recycling=None, default=None):
self.name = name
self.rank = None
self.calc = calc
self.default = default
self.args = tuple(args)
self.recycled = recycling
if recycling:
self.args = (self,) + self.args
self.is_constant = True
for arg in args:
arg.add_client(self)
if not arg.is_constant:
self.is_constant = False
self.clients = []
self.client_ranks = []
self.failure_count = 0
def add_client(self, client):
self.clients.append(client)
def update(self, data):
data[self.rank] = self.calc(*[data[arg_rank] for arg_rank in self.arg_ranks])
def prime(self, data_sets):
if self.is_constant:
# Just calc once
self.update(data_sets[0])
for data in data_sets[1:]:
data[self.rank] = data_sets[0][self.rank]
else:
for data in data_sets:
self.update(data)
def report_error(self, detail, data):
self.failure_count += 1
if self.failure_count <= 5:
print(("%s in calculating %s:", detail.__class__.__name__, self.name))
if self.failure_count == 5:
print("Additional failures of this type will not be reported.")
if self.failure_count < 2:
print("%s inputs were:", len(self.arg_ranks))
for (i, arg) in enumerate(self.arg_ranks):
print("%s: " % i + repr(data[arg]))
class ConstCell(object):
__slots__ = ["name", "scope", "value", "rank", "consequences", "clients"]
recycled = False
is_constant = True
args = ()
def __init__(self, name, value):
self.name = name
self.clients = []
self.value = value
def add_client(self, client):
self.clients.append(client)
class Calculator(object):
"""A complete hierarchical function with N evaluation steps to call
for each change of inputs. Made by a ParameterController."""
def __init__(self, cells, defns, trace=None, with_undo=True):
if trace is None:
trace = TRACE_DEFAULT
self.with_undo = with_undo
self.results_by_id = defns
self.opt_pars = []
other_cells = []
for cell in cells:
if isinstance(cell, OptPar):
self.opt_pars.append(cell)
else:
other_cells.append(cell)
self._cells = self.opt_pars + other_cells
data_sets = [[0], [0, 1]][self.with_undo]
self.cell_values = [[None] * len(self._cells) for switch in data_sets]
self.arg_ranks = [[] for cell in self._cells]
for (i, cell) in enumerate(self._cells):
cell.rank = i
cell.consequences = {}
if isinstance(cell, OptPar):
for switch in data_sets:
self.cell_values[switch][i] = cell.default_value
elif isinstance(cell, ConstCell):
for switch in data_sets:
self.cell_values[switch][i] = cell.value
elif isinstance(cell, EvaluatedCell):
cell.arg_ranks = []
for arg in cell.args:
if hasattr(arg, "client_ranks"):
arg.client_ranks.append(i)
self.arg_ranks[i].append(arg.rank)
cell.arg_ranks.append(arg.rank)
try:
cell.prime(self.cell_values)
except KeyboardInterrupt:
raise
except Exception as detail:
print(("Failed initial calculation of %s" % cell.name))
raise
else:
raise RuntimeError("Unexpected Cell type %s" % type(cell))
self._switch = 0
self.recycled_cells = [cell.rank for cell in self._cells if cell.recycled]
self.spare = [None] * len(self._cells)
for cell in self._cells[::-1]:
for arg in cell.args:
arg.consequences[cell.rank] = True
arg.consequences.update(cell.consequences)
self._programs = {}
# Just for timings pre-calc these
for opt_par in self.opt_pars:
self.cells_changed_by([(opt_par.rank, None)])
self.last_values = self.get_value_array()
self.last_undo = []
self.elapsed_time = 0.0
self.evaluations = 0
self.set_tracing(trace)
self.optimised = False
def graphviz(self):
"""Returns a string in the 'dot' graph description language used by the
program 'Graphviz'. One box per cell, grouped by Defn."""
lines = ["digraph G {\n rankdir = LR\n ranksep = 1\n"]
evs = []
for cell in self._cells:
if cell.name not in evs:
evs.append(cell.name)
nodes = dict([(name, []) for name in evs])
edges = []
for cell in self._cells:
if hasattr(cell, "name"):
nodes[cell.name].append(cell)
for arg in cell.args:
if arg is not cell:
edges.append(
'"%s":%s -> "%s":%s'
% (arg.name, arg.rank, cell.name, cell.rank)
)
for name in evs:
all_const = True
some_const = False
enodes = [name.replace("edge", "QQQ")]
for cell in nodes[name]:
value = self._get_current_cell_value(cell)
if isinstance(value, float):
label = "%5.2e" % value
else:
label = "[]"
label = "<%s> %s" % (cell.rank, label)
enodes.append(label)
all_const = all_const and cell.is_constant
some_const = some_const or cell.is_constant
enodes = "|".join(enodes)
colour = ["", " fillcolor=gray90, style=filled,"][some_const]
colour = [colour, " fillcolor=gray, style=filled,"][all_const]
lines.append(
'"%s" [shape = "record",%s label="%s"];' % (name, colour, enodes)
)
lines.extend(edges)
lines.append("}")
return "\n".join(lines).replace("edge", "egde").replace("QQQ", "edge")
def optimise(self, **kw):
x = self.get_value_array()
bounds = self.get_bounds_vectors()
maximise(self, x, bounds, **kw)
self.optimised = True
def set_tracing(self, trace=False):
"""With 'trace' true every evaluated is printed. Useful for profiling
and debugging."""
self.trace = trace
if trace:
print()
n_opars = len(self.opt_pars)
n_cells = len([c for c in self._cells if not c.is_constant])
print(n_opars, "OptPars and", n_cells - n_opars, "derived values")
print("OptPars: ", ", ".join([par.name for par in self.opt_pars]))
print("Times in 1/%sths of a second" % TRACE_SCALE)
groups = []
groupd = {}
for cell in self._cells:
if cell.is_constant or not isinstance(cell, EvaluatedCell):
continue
if cell.name not in groupd:
group = []
groups.append((cell.name, group))
groupd[cell.name] = group
groupd[cell.name].append(cell)
widths = []
for (name, cells) in groups:
width = 4 + len(cells)
widths.append(min(15, width))
self._cellsGroupedForDisplay = list(zip(groups, widths))
for ((name, cells), width) in self._cellsGroupedForDisplay:
print(name[:width].ljust(width), "|", end=" ")
print()
for width in widths:
print("-" * width, "|", end=" ")
print()
def get_value_array(self):
"""This being a caching function, you can ask it for its current
input! Handy for initialising the optimiser."""
values = [
p.transform_to_optimiser(self._get_current_cell_value(p))
for p in self.opt_pars
]
return values
# get_bounds_vectors and testoptparvector make up the old LikelihoodFunction
# interface expected by the optimiser.
def get_bounds_vectors(self):
"""2 arrays: minimums, maximums"""
lower = numpy.zeros([len(self.opt_pars)], Float)
upper = numpy.zeros([len(self.opt_pars)], Float)
for (i, opt_par) in enumerate(self.opt_pars):
(lb, ub) = opt_par.get_optimiser_bounds()
lower[i] = lb
upper[i] = ub
return (lower, upper)
def fuzz(self, random_series=None, seed=None):
# Slight randomisation suitable for removing right-on-the-
# ridge starting points before local optimisation.
if random_series is None:
import random
random_series = random.Random()
if seed is not None:
random_series.seed(seed)
X = self.get_value_array()
for (i, (l, u)) in enumerate(zip(*self.get_bounds_vectors())):
sign = random_series.choice([-1, +1])
step = random_series.uniform(+0.05, +0.025)
X[i] = max(l, min(u, (1.0 + sign * step * X[i])))
self.testoptparvector(X)
self.optimised = False
def testoptparvector(self, values):
"""AKA self(). Called by optimisers. Returns the output value
after doing any recalculation required for the new input 'values'
array"""
assert len(values) == len(self.opt_pars)
changes = [
(i, new)
for (i, (old, new)) in enumerate(zip(self.last_values, values))
if old != new
]
return self.change(changes)
__call__ = testoptparvector
def testfunction(self):
"""Return the current output value without changing any inputs"""
return self._get_current_cell_value(self._cells[-1])
def change(self, changes):
"""Returns the output value after applying 'changes', a list of
(optimisable_parameter_ordinal, new_value) tuples."""
t0 = time.time()
self.evaluations += 1
# If ALL of the changes made in the last step are reversed in this step
# then it is safe to undo them first, taking advantage of the 1-deep
# cache.
if self.with_undo and self.last_undo:
for (i, v) in self.last_undo:
if (i, v) not in changes:
break
else:
changes = [ch for ch in changes if ch not in self.last_undo]
self._switch = not self._switch
for (i, v) in self.last_undo:
self.last_values[i] = v
self.last_undo = []
program = self.cells_changed_by(changes)
if self.with_undo:
self._switch = not self._switch
data = self.cell_values[self._switch]
base = self.cell_values[not self._switch]
# recycle and undo interact in bad ways
for rank in self.recycled_cells:
if data[rank] is not base[rank]:
self.spare[rank] = data[rank]
data[:] = base[:]
for cell in program:
if cell.recycled:
if data[cell.rank] is base[cell.rank]:
data[cell.rank] = self.spare[cell.rank]
assert data[cell.rank] is not base[cell.rank]
else:
data = self.cell_values[self._switch]
# Set new OptPar values
changed_optpars = []
for (i, v) in changes:
if i < len(self.opt_pars):
assert isinstance(v * 1.0, float), v
changed_optpars.append((i, self.last_values[i]))
self.last_values[i] = v
data[i] = self.opt_pars[i].transform_from_optimiser(v)
else:
data[i] = v
try:
if self.trace:
self.tracing_update(changes, program, data)
else:
self.plain_update(program, data)
# if non-optimiser parameter was set then undo is invalid
if self.last_undo and max(self.last_undo)[0] >= len(self.opt_pars):
self.last_undo = []
else:
self.last_undo = changed_optpars
except CalculationInterupted as detail:
if self.with_undo:
self._switch = not self._switch
for (i, v) in changed_optpars:
self.last_values[i] = v
self.last_undo = []
(cell, exception) = detail.args
raise exception
finally:
self.elapsed_time += time.time() - t0
return self.cell_values[self._switch][-1]
def cells_changed_by(self, changes):
# What OptPars have been changed determines cells to update
change_key = list(dict(changes).keys())
change_key.sort()
change_key = tuple(change_key)
if change_key in self._programs:
program = self._programs[change_key]
else:
# Make a list of the cells to update and cache it.
consequences = {}
for i in change_key:
consequences.update(self._cells[i].consequences)
self._programs[change_key] = program = [
cell for cell in self._cells if cell.rank in consequences
]
return program
def plain_update(self, program, data):
try:
for cell in program:
data[cell.rank] = cell.calc(*[data[a] for a in cell.arg_ranks])
except ParameterOutOfBoundsError as detail:
# Non-fatal error, just cancel this calculation.
raise CalculationInterupted(cell, detail)
except ArithmeticError as detail:
# Non-fatal but unexpected error. Warn and cancel this calculation.
cell.report_error(detail, data)
raise CalculationInterupted(cell, detail)
def tracing_update(self, changes, program, data):
# Does the same thing as plain_update, but also produces lots of
# output showing how long each step of the calculation takes.
# One line per call, '-' for undo, '+' for calculation
exception = None
elapsed = {}
for cell in program:
try:
t0 = time.time()
data[cell.rank] = cell.calc(*[data[a] for a in cell.arg_ranks])
t1 = time.time()
except (ParameterOutOfBoundsError, ArithmeticError) as exception:
error_cell = cell
break
elapsed[cell.rank] = t1 - t0
tds = []
for ((name, cells), width) in self._cellsGroupedForDisplay:
text = "".join([" +"[cell.rank in elapsed] for cell in cells])
elap = sum([elapsed.get(cell.rank, 0) for cell in cells])
if len(text) > width - 4:
edge_width = min(len(text), (width - 4 - 3)) // 2
elipsis = [" ", "..."][not not text.strip()]
text = text[:edge_width] + elipsis + text[-edge_width:]
tds.append("%s%4s" % (text, int(TRACE_SCALE * elap + 0.5) or ""))
par_descs = []
for (i, v) in changes:
cell = self._cells[i]
if isinstance(cell, OptPar):
par_descs.append("%s=%8.6f" % (cell.name, v))
else:
par_descs.append("%s=?" % cell.name)
par_descs = ", ".join(par_descs)[:22].ljust(22)
print(" | ".join(tds + [""]), end=" ")
if exception:
print("%15s | %s" % ("", par_descs))
error_cell.report_error(exception, data)
raise CalculationInterupted(cell, exception)
else:
print("%-15s | %s" % (repr(data[-1])[:15], par_descs))
def measure_evals_per_second(self, time_limit=1.0, wall=True, sa=False):
# Returns an estimate of the number of evaluations per second
# an each-optpar-in-turn simulated annealing type optimiser
# can achive, spending not much more than 'time_limit' doing
# so. 'wall'=False causes process time to be used instead of
# wall time.
# 'sa' makes it simulated-annealing-like, with frequent backtracks
if wall:
now = time.time
else:
now = time.clock
x = self.get_value_array()
samples = []
elapsed = 0.0
rounds_per_sample = 2
while elapsed < time_limit and len(samples) < 5:
time.sleep(0.01)
t0 = now()
last = []
for j in range(rounds_per_sample):
for (i, v) in enumerate(x):
# Not a real change, but works like one.
self.change(last + [(i, v)])
if sa and (i + j) % 2:
last = [(i, v)]
else:
last = []
# Use one agreed on delta otherwise different cpus will finish the
# loop at different times causing chaos.
delta = now() - t0
if delta < 0.1:
# time.clock is low res, so need to ensure each sample
# is long enough to take SOME time.
rounds_per_sample *= 2
continue
else:
rate = rounds_per_sample * len(x) / delta
samples.append(rate)
elapsed += delta
if wall:
samples.sort()
return samples[len(samples) // 2]
else:
return sum(samples) / len(samples)
def _get_current_cell_value(self, cell):
return self.cell_values[self._switch][cell.rank]
def get_current_cell_values_for_defn(self, defn):
cells = self.results_by_id[id(defn)]
return [self.cell_values[self._switch][cell.rank] for cell in cells]
def __get_bounded_root(self, func, origX, direction, bound, xtol):
return find_root(
func,
origX,
direction,
bound,
xtol=xtol,
expected_exception=(ParameterOutOfBoundsError, ArithmeticError),
)
def _get_current_cell_interval(self, opt_par, dropoff, xtol=None):
# (min, opt, max) tuples for each parameter where f(min) ==
# f(max) == f(opt)-dropoff. Uses None when a bound is hit.
# assert self.optimised, "Call optimise() first"
origY = self.testfunction()
(lower, upper) = opt_par.get_optimiser_bounds()
opt_value = self._get_current_cell_value(opt_par)
origX = opt_par.transform_to_optimiser(opt_value)
def func(x):
Y = self.change([(opt_par.rank, x)])
return Y - (origY - dropoff)
try:
lowX = self.__get_bounded_root(func, origX, -1, lower, xtol)
highX = self.__get_bounded_root(func, origX, +1, upper, xtol)
finally:
func(origX)
triple = []
for x in [lowX, origX, highX]:
if x is not None:
x = opt_par.transform_from_optimiser(x)
triple.append(x)
return tuple(triple)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/common/dao/testutils.go | // copyright (c) 2017 vmware, inc. all rights reserved.
//
// licensed under the apache license, version 2.0 (the "license");
// you may not use this file except in compliance with the license.
// you may obtain a copy of the license at
//
// http://www.apache.org/licenses/license-2.0
//
// unless required by applicable law or agreed to in writing, software
// distributed under the license is distributed on an "as is" basis,
// without warranties or conditions of any kind, either express or implied.
// see the license for the specific language governing permissions and
// limitations under the license.
package dao
import (
"fmt"
"os"
"strconv"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils/log"
)
var defaultRegistered = false
// PrepareTestForMySQL is for test only.
func PrepareTestForMySQL() {
}
// PrepareTestForSQLite is for test only.
func PrepareTestForSQLite() {
}
// PrepareTestForPostgresSQL is for test only.
func PrepareTestForPostgresSQL() {
dbHost := os.Getenv("POSTGRESQL_HOST")
if len(dbHost) == 0 {
log.Fatalf("environment variable POSTGRESQL_HOST is not set")
}
dbUser := os.Getenv("POSTGRESQL_USR")
if len(dbUser) == 0 {
log.Fatalf("environment variable POSTGRESQL_USR is not set")
}
dbPortStr := os.Getenv("POSTGRESQL_PORT")
if len(dbPortStr) == 0 {
log.Fatalf("environment variable POSTGRESQL_PORT is not set")
}
dbPort, err := strconv.Atoi(dbPortStr)
if err != nil {
log.Fatalf("invalid POSTGRESQL_PORT: %v", err)
}
dbPassword := os.Getenv("POSTGRESQL_PWD")
dbDatabase := os.Getenv("POSTGRESQL_DATABASE")
if len(dbDatabase) == 0 {
log.Fatalf("environment variable POSTGRESQL_DATABASE is not set")
}
database := &models.Database{
Type: "postgresql",
PostGreSQL: &models.PostGreSQL{
Host: dbHost,
Port: dbPort,
Username: dbUser,
Password: dbPassword,
Database: dbDatabase,
},
}
log.Infof("POSTGRES_HOST: %s, POSTGRES_USR: %s, POSTGRES_PORT: %d, POSTGRES_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
initDatabaseForTest(database)
}
func initDatabaseForTest(db *models.Database) {
database, err := getDatabase(db)
if err != nil {
panic(err)
}
log.Infof("initializing database: %s", database.String())
alias := database.Name()
if !defaultRegistered {
defaultRegistered = true
alias = "default"
}
if err := database.Register(alias); err != nil {
panic(err)
}
if err := database.UpgradeSchema(); err != nil {
panic(err)
}
if alias != "default" {
if err = globalOrm.Using(alias); err != nil {
log.Fatalf("failed to create new orm: %v", err)
}
}
}
// PrepareTestData -- Clean and Create data
func PrepareTestData(clearSqls []string, initSqls []string) {
o := GetOrmer()
for _, sql := range clearSqls {
fmt.Printf("Exec sql:%v\n", sql)
_, err := o.Raw(sql).Exec()
if err != nil {
fmt.Printf("failed to clear database, sql:%v, error: %v", sql, err)
}
}
for _, sql := range initSqls {
_, err := o.Raw(sql).Exec()
if err != nil {
fmt.Printf("failed to init database, sql:%v, error: %v", sql, err)
}
}
}
// ArrayEqual ...
func ArrayEqual(arrayA, arrayB []int) bool {
if len(arrayA) != len(arrayB) {
return false
}
size := len(arrayA)
for i := 0; i < size; i++ {
if arrayA[i] != arrayB[i] {
return false
}
}
return true
}
| [
"\"POSTGRESQL_HOST\"",
"\"POSTGRESQL_USR\"",
"\"POSTGRESQL_PORT\"",
"\"POSTGRESQL_PWD\"",
"\"POSTGRESQL_DATABASE\""
]
| []
| [
"POSTGRESQL_HOST",
"POSTGRESQL_PWD",
"POSTGRESQL_USR",
"POSTGRESQL_PORT",
"POSTGRESQL_DATABASE"
]
| [] | ["POSTGRESQL_HOST", "POSTGRESQL_PWD", "POSTGRESQL_USR", "POSTGRESQL_PORT", "POSTGRESQL_DATABASE"] | go | 5 | 0 | |
PicoCTF-Penyelesaian/lainnya/006. Nice netcat... (SOLVED)/int into chr.py | nums = [112, 105, 99, 111, 67, 84, 70, 123, 103, 48, 48, 100, 95, 107, 49, 116, 116, 121, 33, 95, 110, 49, 99, 51, 95, 107, 49, 116, 116, 121, 33, 95, 55, 99, 48, 56, 50, 49, 102, 53, 125, 10]
flag = ""
for number in nums:
flag += chr(number)
print(flag) | []
| []
| []
| [] | [] | python | null | null | null |
src/main/java/xyz/ylimit/androcov/Util.java | package xyz.ylimit.androcov;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import java.io.*;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.logging.Logger;
/**
* Created by liyc on 12/23/15.
* useful utils
*/
public class Util {
static final Logger LOGGER = Logger.getLogger(Config.PROJECT_NAME);
private static String pathToAAPT = System.getenv("ANDROID_HOME") + "/build-tools/25.0.2";
/**
* Extracts the package name from an apk file
* Update "pathToAAPT" to the local path of the android build tools!!!!!
* @param pathToAPK: path to the apk file
* @return package name
*/
static String extractPackageName(File pathToAPK) {
Process aapt = null;
String output = null;
InputStream adbout = null;
try {
aapt = Runtime.getRuntime().exec(pathToAAPT +"/aapt dump badging "+pathToAPK);
} catch (IOException e1) {
e1.printStackTrace();
}
try {
adbout = aapt.getInputStream();
output= IOUtils.toString(adbout);
} catch (IOException e1) {
e1.printStackTrace();
}
output = output.split("'")[1];
return output;
}
/**
* In the package has more than 2 parts, it returns the 2 first parts
* @param pkg
* @return
*/
static String refinePackage(String pkg) {
String parts [] = pkg.split("\\.");
if(parts.length>2)
return parts[0]+"."+parts[1];
else return pkg;
}
public static String getTimeString() {
long timeMillis = System.currentTimeMillis();
SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd-hhmmss");
Date date = new Date(timeMillis);
return sdf.format(date);
}
public static void logException(Exception e) {
StringWriter sw = new StringWriter();
e.printStackTrace(new PrintWriter(sw));
Util.LOGGER.warning(sw.toString());
}
public static float safeDivide(int obfuscated, int total) {
if (total <= 0) return 1;
return (float) obfuscated / total;
}
public static void signAPK(String apkPath) {
Runtime r = Runtime.getRuntime();
Path keystore = Paths.get("debug.keystore").toAbsolutePath();
try {
Util.copyResourceToFile(Util.class, keystore.getFileName().toString(), keystore);
Path keystorePath = Paths.get(String.format("%s/debug.keystore", Config.tempDirPath)).toAbsolutePath();
String signCmd = String.format(
"jarsigner -verbose -sigalg SHA1withRSA -digestalg SHA1 -storepass android " +
"-keystore %s %s androiddebugkey", keystorePath, apkPath);
Files.copy(keystore, keystorePath);
Process p = r.exec(signCmd);
ReadStream s1 = new ReadStream("stdin", p.getInputStream());
ReadStream s2 = new ReadStream("stderr", p.getErrorStream());
s1.start();
s2.start();
p.waitFor();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
}
}
/**
* Export a resource embedded into a Jar file to the local file path.
* The given resource name has to include the pass of the resource.
* "/resname" will lead to searching the resource in the root folder "resname" will search in the classes package.
*
* @param resourceName ie.: "/SmartLibrary.dll"
* @return The path to the exported resource
* @throws Exception
*/
public static void copyResourceToFile(Class askingClass, String resourceName, Path outFile) throws IOException {
InputStream stream = null;
OutputStream resStreamOut = null;
//String jarFolder;
try {
stream = askingClass.getClassLoader().getResourceAsStream(resourceName);//note that each / is a directory down in the "jar tree" been the jar the root of the tree
if (stream == null) {
throw new RuntimeException("Cannot get resource \"" + resourceName + "\" from Jar file.");
}
int readBytes;
byte[] buffer = new byte[4096];
resStreamOut = new FileOutputStream(outFile.toFile());
while ((readBytes = stream.read(buffer)) > 0) {
resStreamOut.write(buffer, 0, readBytes);
}
} finally {
stream.close();
resStreamOut.close();
}
}
}
| [
"\"ANDROID_HOME\""
]
| []
| [
"ANDROID_HOME"
]
| [] | ["ANDROID_HOME"] | java | 1 | 0 | |
tests/backup/backup_test.go | package tests
import (
"context"
"fmt"
"os"
"path"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega"
"github.com/pborman/uuid"
api "github.com/portworx/px-backup-api/pkg/apis/v1"
driver_api "github.com/portworx/torpedo/drivers/api"
"github.com/portworx/torpedo/drivers/backup"
"github.com/portworx/torpedo/drivers/node"
"github.com/portworx/torpedo/drivers/scheduler"
"github.com/portworx/torpedo/drivers/scheduler/spec"
. "github.com/portworx/torpedo/tests"
"github.com/sirupsen/logrus"
appsapi "k8s.io/api/apps/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
clusterName = "tp-cluster"
restoreNamePrefix = "tp-restore"
configMapName = "kubeconfigs"
defaultTimeout = 5 * time.Minute
defaultRetryInterval = 10 * time.Second
sourceClusterName = "source-cluster"
destinationClusterName = "destination-cluster"
backupLocationName = "tp-blocation"
storkDeploymentName = "stork"
storkDeploymentNamespace = "kube-system"
appReadinessTimeout = 10 * time.Minute
)
var (
orgID string
bucketName string
)
var _ = BeforeSuite(func() {
logrus.Infof("Init instance")
InitInstance()
})
func TestBackup(t *testing.T) {
RegisterFailHandler(Fail)
var specReporters []Reporter
junitReporter := reporters.NewJUnitReporter("/testresults/junit_basic.xml")
specReporters = append(specReporters, junitReporter)
RunSpecsWithDefaultAndCustomReporters(t, "Torpedo : Backup", specReporters)
}
func TearDownBackupRestore(bkpNamespaces []string, restoreNamespaces []string) {
for _, bkpNamespace := range bkpNamespaces {
BackupName := fmt.Sprintf("%s-%s", BackupNamePrefix, bkpNamespace)
DeleteBackup(BackupName, OrgID)
}
for _, restoreNamespace := range restoreNamespaces {
RestoreName := fmt.Sprintf("%s-%s", restoreNamePrefix, restoreNamespace)
DeleteRestore(RestoreName, OrgID)
}
provider := GetProvider()
DeleteCluster(destinationClusterName, OrgID)
DeleteCluster(sourceClusterName, OrgID)
DeleteBackupLocation(backupLocationName, OrgID)
DeleteCloudCredential(CredName, OrgID, CloudCredUID)
DeleteBucket(provider, BucketName)
}
var _ = AfterSuite(func() {
//PerformSystemCheck()
//ValidateCleanup()
// BackupCleanup()
})
func TestMain(m *testing.M) {
// call flag.Parse() here if TestMain uses flags
ParseFlags()
os.Exit(m.Run())
}
// This test performs basic test of starting an application, backing it up and killing stork while
// performing backup.
var _ = Describe("{BackupCreateKillStorkRestore}", func() {
var (
contexts []*scheduler.Context
bkpNamespaces []string
namespaceMapping map[string]string
taskNamePrefix = "backupcreaterestore"
)
labelSelectores := make(map[string]string)
namespaceMapping = make(map[string]string)
volumeParams := make(map[string]map[string]string)
It("has to connect and check the backup setup", func() {
Step("Setup backup", func() {
// Set cluster context to cluster where torpedo is running
SetClusterContext("")
SetupBackup(taskNamePrefix)
})
sourceClusterConfigPath, err := GetSourceClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for source cluster. Error: [%v]", err))
SetClusterContext(sourceClusterConfigPath)
Step("Deploy applications", func() {
contexts = make([]*scheduler.Context, 0)
bkpNamespaces = make([]string, 0)
for i := 0; i < Inst().GlobalScaleFactor; i++ {
taskName := fmt.Sprintf("%s-%d", taskNamePrefix, i)
logrus.Infof("Task name %s\n", taskName)
appContexts := ScheduleApplications(taskName)
contexts = append(contexts, appContexts...)
for _, ctx := range appContexts {
// Override default App readiness time out of 5 mins with 10 mins
ctx.ReadinessTimeout = appReadinessTimeout
namespace := GetAppNamespace(ctx, taskName)
bkpNamespaces = append(bkpNamespaces, namespace)
}
}
// Skip volume validation until other volume providers are implemented.
for _, ctx := range contexts {
ctx.SkipVolumeValidation = true
}
ValidateApplications(contexts)
for _, ctx := range contexts {
for vol, params := range GetVolumeParameters(ctx) {
volumeParams[vol] = params
}
}
})
logrus.Info("Wait for IO to proceed\n")
time.Sleep(time.Minute * 5)
// TODO(stgleb): Add multi-namespace backup when ready in px-backup
for _, namespace := range bkpNamespaces {
backupName := fmt.Sprintf("%s-%s", BackupNamePrefix, namespace)
Step(fmt.Sprintf("Create backup full name %s:%s:%s",
sourceClusterName, namespace, backupName), func() {
CreateBackup(backupName,
sourceClusterName, backupLocationName, BackupLocationUID,
[]string{namespace}, labelSelectores, orgID)
})
}
Step("Kill stork during backup", func() {
// setup task to delete stork pods as soon as it starts doing backup
for _, namespace := range bkpNamespaces {
backupName := fmt.Sprintf("%s-%s", BackupNamePrefix, namespace)
req := &api.BackupInspectRequest{
Name: backupName,
OrgId: orgID,
}
logrus.Infof("backup %s wait for running", backupName)
err := Inst().Backup.WaitForBackupRunning(context.Background(),
req, BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
if err != nil {
logrus.Warnf("backup %s wait for running err %v",
backupName, err)
continue
} else {
break
}
}
ctx := &scheduler.Context{
App: &spec.AppSpec{
SpecList: []interface{}{
&appsapi.Deployment{
ObjectMeta: meta_v1.ObjectMeta{
Name: storkDeploymentName,
Namespace: storkDeploymentNamespace,
},
},
},
},
}
logrus.Infof("Execute task for killing stork")
err := Inst().S.DeleteTasks(ctx, nil)
Expect(err).NotTo(HaveOccurred())
})
for _, namespace := range bkpNamespaces {
backupName := fmt.Sprintf("%s-%s", BackupNamePrefix, namespace)
Step(fmt.Sprintf("Wait for backup %s to complete", backupName), func() {
err := Inst().Backup.WaitForBackupCompletion(
context.Background(),
backupName, orgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to wait for backup [%s] to complete. Error: [%v]",
backupName, err))
})
}
Step("teardown all applications on source cluster before switching context to destination cluster", func() {
for _, ctx := range contexts {
TearDownContext(ctx, map[string]bool{
SkipClusterScopedObjects: true,
scheduler.OptionsWaitForResourceLeakCleanup: true,
scheduler.OptionsWaitForDestroy: true,
})
}
})
destClusterConfigPath, err := GetDestinationClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for destination cluster. Error: [%v]", err))
SetClusterContext(destClusterConfigPath)
for _, namespace := range bkpNamespaces {
backupName := fmt.Sprintf("%s-%s", BackupNamePrefix, namespace)
restoreName := fmt.Sprintf("%s-%s", restoreNamePrefix, namespace)
Step(fmt.Sprintf("Create restore %s:%s:%s from backup %s:%s:%s",
destinationClusterName, namespace, restoreName,
sourceClusterName, namespace, backupName), func() {
CreateRestore(restoreName, backupName, namespaceMapping,
destinationClusterName, orgID)
})
}
for _, namespace := range bkpNamespaces {
restoreName := fmt.Sprintf("%s-%s", restoreNamePrefix, namespace)
Step(fmt.Sprintf("Wait for restore %s:%s to complete",
namespace, restoreName), func() {
err := Inst().Backup.WaitForRestoreCompletion(context.Background(), restoreName, orgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to wait for restore [%s] to complete. Error: [%v]",
restoreName, err))
})
}
// Change namespaces to restored apps only after backed up apps are cleaned up
// to avoid switching back namespaces to backup namespaces
Step("Validate Restored applications", func() {
destClusterConfigPath, err := GetDestinationClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for destination cluster. Error: [%v]", err))
SetClusterContext(destClusterConfigPath)
// Populate contexts
for _, ctx := range contexts {
ctx.SkipClusterScopedObject = true
ctx.SkipVolumeValidation = true
}
ValidateRestoredApplications(contexts, volumeParams)
})
Step("teardown all restored apps", func() {
for _, ctx := range contexts {
TearDownContext(ctx, nil)
}
})
Step("teardown backup objects", func() {
TearDownBackupRestore(bkpNamespaces, bkpNamespaces)
})
})
})
// This performs scale test of px-backup and kills stork in the middle of
// backup process.
var _ = Describe("{MultiProviderBackupKillStork}", func() {
var (
kubeconfigs string
kubeconfigList []string
)
contexts := make(map[string][]*scheduler.Context)
bkpNamespaces := make(map[string][]string)
labelSelectores := make(map[string]string)
namespaceMapping := make(map[string]string)
taskNamePrefix := "backup-multi-provider"
providerUID := make(map[string]string)
It("has to connect and check the backup setup", func() {
providers := getProviders()
Step("Setup backup", func() {
kubeconfigs = os.Getenv("KUBECONFIGS")
if len(kubeconfigs) == 0 {
Expect(kubeconfigs).NotTo(BeEmpty(),
fmt.Sprintf("KUBECONFIGS %s must not be empty", kubeconfigs))
}
kubeconfigList = strings.Split(kubeconfigs, ",")
// Validate user has provided at least 1 kubeconfig for cluster
if len(kubeconfigList) == 0 {
Expect(kubeconfigList).NotTo(BeEmpty(),
fmt.Sprintf("kubeconfigList %v must have at least one", kubeconfigList))
}
// Set cluster context to cluster where torpedo is running
SetClusterContext("")
DumpKubeconfigs(kubeconfigList)
for _, provider := range providers {
logrus.Infof("Run Setup backup with object store provider: %s", provider)
orgID := fmt.Sprintf("%s-%s-%s", strings.ToLower(taskNamePrefix),
provider, Inst().InstanceID)
bucketName = fmt.Sprintf("%s-%s-%s", BucketNamePrefix, provider, Inst().InstanceID)
CredName := fmt.Sprintf("%s-%s", CredName, provider)
CloudCredUID = uuid.New()
backupLocation := fmt.Sprintf("%s-%s", backupLocationName, provider)
providerUID[provider] = uuid.New()
CreateBucket(provider, bucketName)
CreateOrganization(orgID)
CreateCloudCredential(provider, CredName, CloudCredUID, orgID)
CreateBackupLocation(provider, backupLocation, providerUID[provider], CredName, CloudCredUID, BucketName, orgID)
CreateProviderClusterObject(provider, kubeconfigList, CredName, orgID)
}
})
// Moment in time when tests should finish
end := time.Now().Add(time.Duration(Inst().MinRunTimeMins) * time.Minute)
for time.Now().Before(end) {
Step("Deploy applications", func() {
for _, provider := range providers {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
logrus.Infof("Set context to %s", providerClusterConfigPath)
SetClusterContext(providerClusterConfigPath)
providerContexts := make([]*scheduler.Context, 0)
providerNamespaces := make([]string, 0)
// Rescan specs for each provider to reload provider specific specs
logrus.Infof("Rescan specs for provider %s", provider)
err = Inst().S.RescanSpecs(Inst().SpecDir, provider)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to rescan specs from %s for storage provider %s. Error: [%v]",
Inst().SpecDir, provider, err))
logrus.Infof("Start deploy applications for provider %s", provider)
for i := 0; i < Inst().GlobalScaleFactor; i++ {
taskName := fmt.Sprintf("%s-%s-%d", taskNamePrefix, provider, i)
logrus.Infof("Task name %s\n", taskName)
appContexts := ScheduleApplications(taskName)
providerContexts = append(providerContexts, appContexts...)
for _, ctx := range appContexts {
namespace := GetAppNamespace(ctx, taskName)
providerNamespaces = append(providerNamespaces, namespace)
}
}
contexts[provider] = providerContexts
bkpNamespaces[provider] = providerNamespaces
}
})
Step("Validate applications", func() {
for _, provider := range providers {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
// In case of non-portworx volume provider skip volume validation until
// other volume providers are implemented.
for _, ctx := range contexts[provider] {
ctx.SkipVolumeValidation = true
ctx.ReadinessTimeout = BackupRestoreCompletionTimeoutMin * time.Minute
}
logrus.Infof("validate applications for provider %s", provider)
ValidateApplications(contexts[provider])
}
})
logrus.Info("Wait for IO to proceed\n")
time.Sleep(time.Minute * 5)
// Perform all backup operations concurrently
// TODO(stgleb): Add multi-namespace backup when ready in px-backup
for _, provider := range providers {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
ctx, _ := context.WithTimeout(context.Background(),
BackupRestoreCompletionTimeoutMin*time.Minute)
errChan := make(chan error)
for _, namespace := range bkpNamespaces[provider] {
go func(provider, namespace string) {
clusterName := fmt.Sprintf("%s-%s", clusterName, provider)
backupLocation := fmt.Sprintf("%s-%s", backupLocationName, provider)
backupName := fmt.Sprintf("%s-%s-%s", BackupNamePrefix, provider,
namespace)
orgID := fmt.Sprintf("%s-%s-%s", strings.ToLower(taskNamePrefix),
provider, Inst().InstanceID)
// NOTE: We don't use CreateBackup/Restore method here since it has ginkgo assertion
// which must be called inside of goroutine with GinkgoRecover https://onsi.github.io/ginkgo/#marking-specs-as-failed
Step(fmt.Sprintf("Create backup full name %s:%s:%s in organization %s",
clusterName, namespace, backupName, orgID), func() {
backupDriver := Inst().Backup
bkpCreateRequest := &api.BackupCreateRequest{
CreateMetadata: &api.CreateMetadata{
Name: backupName,
OrgId: orgID,
},
BackupLocation: backupLocation,
Cluster: clusterName,
Namespaces: []string{namespace},
LabelSelectors: labelSelectores,
}
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
_, err = backupDriver.CreateBackup(ctx, bkpCreateRequest)
errChan <- err
})
}(provider, namespace)
}
for i := 0; i < len(bkpNamespaces[provider]); i++ {
select {
case <-ctx.Done():
Expect(ctx.Err()).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to complete backup for provider %s cluster. Error: [%v]", provider, ctx.Err()))
case err := <-errChan:
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to complete backup for provider %s cluster. Error: [%v]", provider, err))
}
}
}
Step("Kill stork during backup", func() {
for provider, providerNamespaces := range bkpNamespaces {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
logrus.Infof("Kill stork during backup for provider %s", provider)
// setup task to delete stork pods as soon as it starts doing backup
for _, namespace := range providerNamespaces {
backupName := fmt.Sprintf("%s-%s-%s", BackupNamePrefix, provider, namespace)
orgID := fmt.Sprintf("%s-%s-%s", strings.ToLower(taskNamePrefix),
provider, Inst().InstanceID)
// Wait until all backups/restores start running
req := &api.BackupInspectRequest{
Name: backupName,
OrgId: orgID,
}
logrus.Infof("backup %s wait for running", backupName)
err := Inst().Backup.WaitForBackupRunning(context.Background(),
req, BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
Expect(err).NotTo(HaveOccurred())
}
killStork()
}
})
// wait until all backups are completed, there is no need to parallel here
for provider, namespaces := range bkpNamespaces {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
for _, namespace := range namespaces {
backupName := fmt.Sprintf("%s-%s-%s", BackupNamePrefix, provider, namespace)
orgID := fmt.Sprintf("%s-%s-%s", strings.ToLower(taskNamePrefix),
provider, Inst().InstanceID)
Step(fmt.Sprintf("Wait for backup %s to complete in organization %s",
backupName, orgID), func() {
err := Inst().Backup.WaitForBackupCompletion(
context.Background(),
backupName, orgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to wait for backup [%s] to complete. Error: [%v]",
backupName, err))
})
}
}
Step("teardown all applications on source cluster before switching context to destination cluster", func() {
for _, provider := range providers {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
logrus.Infof("Set config to %s", providerClusterConfigPath)
SetClusterContext(providerClusterConfigPath)
for _, ctx := range contexts[provider] {
TearDownContext(ctx, map[string]bool{
SkipClusterScopedObjects: true,
scheduler.OptionsWaitForResourceLeakCleanup: true,
scheduler.OptionsWaitForDestroy: true,
})
}
}
})
for provider := range bkpNamespaces {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
ctx, _ := context.WithTimeout(context.Background(),
BackupRestoreCompletionTimeoutMin*time.Minute)
errChan := make(chan error)
for _, namespace := range bkpNamespaces[provider] {
go func(provider, namespace string) {
clusterName := fmt.Sprintf("%s-%s", clusterName, provider)
backupName := fmt.Sprintf("%s-%s-%s", BackupNamePrefix, provider, namespace)
restoreName := fmt.Sprintf("%s-%s-%s", restoreNamePrefix, provider, namespace)
orgID := fmt.Sprintf("%s-%s-%s", strings.ToLower(taskNamePrefix),
provider, Inst().InstanceID)
Step(fmt.Sprintf("Create restore full name %s:%s:%s in organization %s",
clusterName, namespace, backupName, orgID), func() {
// NOTE: We don't use CreateBackup/Restore method here since it has ginkgo assertion
// which must be called inside of gorutuine with GinkgoRecover https://onsi.github.io/ginkgo/#marking-specs-as-failed
backupDriver := Inst().Backup
createRestoreReq := &api.RestoreCreateRequest{
CreateMetadata: &api.CreateMetadata{
Name: restoreName,
OrgId: orgID,
},
Backup: backupName,
Cluster: clusterName,
NamespaceMapping: namespaceMapping,
}
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
_, err = backupDriver.CreateRestore(ctx, createRestoreReq)
errChan <- err
})
}(provider, namespace)
}
for i := 0; i < len(bkpNamespaces[provider]); i++ {
select {
case <-ctx.Done():
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to complete backup for provider %s cluster. Error: [%v]", provider, ctx.Err()))
case err := <-errChan:
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to complete backup for provider %s cluster. Error: [%v]", provider, err))
}
}
}
Step("Kill stork during restore", func() {
for provider, providerNamespaces := range bkpNamespaces {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
logrus.Infof("Kill stork during restore for provider %s", provider)
// setup task to delete stork pods as soon as it starts doing backup
for _, namespace := range providerNamespaces {
restoreName := fmt.Sprintf("%s-%s-%s", restoreNamePrefix, provider, namespace)
orgID := fmt.Sprintf("%s-%s-%s", strings.ToLower(taskNamePrefix),
provider, Inst().InstanceID)
// Wait until all backups/restores start running
req := &api.RestoreInspectRequest{
Name: restoreName,
OrgId: orgID,
}
logrus.Infof("restore %s wait for running", restoreName)
err := Inst().Backup.WaitForRestoreRunning(context.Background(),
req, BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
Expect(err).NotTo(HaveOccurred())
}
logrus.Infof("Kill stork task")
killStork()
}
})
for provider, providerNamespaces := range bkpNamespaces {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
for _, namespace := range providerNamespaces {
restoreName := fmt.Sprintf("%s-%s-%s", restoreNamePrefix, provider, namespace)
orgID := fmt.Sprintf("%s-%s-%s", strings.ToLower(taskNamePrefix),
provider, Inst().InstanceID)
Step(fmt.Sprintf("Wait for restore %s:%s to complete",
namespace, restoreName), func() {
err := Inst().Backup.WaitForRestoreCompletion(context.Background(),
restoreName, orgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to wait for restore [%s] to complete. Error: [%v]",
restoreName, err))
})
}
}
// Change namespaces to restored apps only after backed up apps are cleaned up
// to avoid switching back namespaces to backup namespaces
Step("Validate Restored applications", func() {
// Populate contexts
for _, provider := range providers {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
for _, ctx := range contexts[provider] {
ctx.SkipClusterScopedObject = true
ctx.SkipVolumeValidation = true
ctx.ReadinessTimeout = BackupRestoreCompletionTimeoutMin * time.Minute
err := Inst().S.WaitForRunning(ctx, defaultTimeout, defaultRetryInterval)
Expect(err).NotTo(HaveOccurred())
}
ValidateApplications(contexts[provider])
}
})
Step("teardown all restored apps", func() {
for _, provider := range providers {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
for _, ctx := range contexts[provider] {
TearDownContext(ctx, map[string]bool{
scheduler.OptionsWaitForResourceLeakCleanup: true,
scheduler.OptionsWaitForDestroy: true,
})
}
}
})
Step("teardown backup and restore objects", func() {
for provider, providerNamespaces := range bkpNamespaces {
logrus.Infof("teardown backup and restore objects for provider %s", provider)
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
ctx, _ := context.WithTimeout(context.Background(),
BackupRestoreCompletionTimeoutMin*time.Minute)
errChan := make(chan error)
for _, namespace := range providerNamespaces {
go func(provider, namespace string) {
clusterName := fmt.Sprintf("%s-%s", clusterName, provider)
backupName := fmt.Sprintf("%s-%s-%s", BackupNamePrefix, provider, namespace)
orgID := fmt.Sprintf("%s-%s-%s", strings.ToLower(taskNamePrefix),
provider, Inst().InstanceID)
Step(fmt.Sprintf("Delete backup full name %s:%s:%s",
clusterName, namespace, backupName), func() {
backupDriver := Inst().Backup
bkpDeleteRequest := &api.BackupDeleteRequest{
Name: backupName,
OrgId: orgID,
}
// ctx, err = backup.GetPxCentralAdminCtx()
ctx, err = backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
_, err = backupDriver.DeleteBackup(ctx, bkpDeleteRequest)
ctx, _ := context.WithTimeout(context.Background(),
BackupRestoreCompletionTimeoutMin*time.Minute)
if err = backupDriver.WaitForBackupDeletion(ctx, backupName, orgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second); err != nil {
errChan <- err
return
}
errChan <- err
})
}(provider, namespace)
go func(provider, namespace string) {
clusterName := fmt.Sprintf("%s-%s", clusterName, provider)
restoreName := fmt.Sprintf("%s-%s-%s", restoreNamePrefix, provider, namespace)
orgID := fmt.Sprintf("%s-%s-%s", strings.ToLower(taskNamePrefix),
provider, Inst().InstanceID)
Step(fmt.Sprintf("Delete restore full name %s:%s:%s",
clusterName, namespace, restoreName), func() {
backupDriver := Inst().Backup
deleteRestoreReq := &api.RestoreDeleteRequest{
OrgId: orgID,
Name: restoreName,
}
//ctx, err = backup.GetPxCentralAdminCtx()
ctx, err = backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
_, err = backupDriver.DeleteRestore(ctx, deleteRestoreReq)
ctx, _ := context.WithTimeout(context.Background(),
BackupRestoreCompletionTimeoutMin*time.Minute)
logrus.Infof("Wait for restore %s is deleted", restoreName)
if err = backupDriver.WaitForRestoreDeletion(ctx, restoreName, orgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second); err != nil {
errChan <- err
return
}
errChan <- err
})
}(provider, namespace)
}
for i := 0; i < len(providerNamespaces)*2; i++ {
select {
case <-ctx.Done():
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to complete backup for provider %s cluster. Error: [%v]", provider, ctx.Err()))
case err := <-errChan:
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to complete backup for provider %s cluster. Error: [%v]", provider, err))
}
}
}
})
}
Step("teardown backup objects for test", func() {
for _, provider := range providers {
providerClusterConfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for provider %s cluster. Error: [%v]", provider, err))
SetClusterContext(providerClusterConfigPath)
logrus.Infof("Run Setup backup with object store provider: %s", provider)
orgID := fmt.Sprintf("%s-%s-%s", strings.ToLower(taskNamePrefix), provider, Inst().InstanceID)
bucketName := fmt.Sprintf("%s-%s-%s", BucketNamePrefix, provider, Inst().InstanceID)
CredName := fmt.Sprintf("%s-%s", CredName, provider)
backupLocation := fmt.Sprintf("%s-%s", backupLocationName, provider)
clusterName := fmt.Sprintf("%s-%s", clusterName, provider)
DeleteCluster(clusterName, orgID)
DeleteBackupLocation(backupLocation, orgID)
DeleteCloudCredential(CredName, orgID, CloudCredUID)
DeleteBucket(provider, bucketName)
}
})
})
})
func killStork() {
ctx := &scheduler.Context{
App: &spec.AppSpec{
SpecList: []interface{}{
&appsapi.Deployment{
ObjectMeta: meta_v1.ObjectMeta{
Name: storkDeploymentName,
Namespace: storkDeploymentNamespace,
},
},
},
},
}
logrus.Infof("Execute task for killing stork")
err := Inst().S.DeleteTasks(ctx, nil)
Expect(err).NotTo(HaveOccurred())
}
// This test crashes volume driver (PX) while backup is in progress
var _ = Describe("{BackupCrashVolDriver}", func() {
var contexts []*scheduler.Context
var namespaceMapping map[string]string
taskNamePrefix := "backupcrashvoldriver"
labelSelectors := make(map[string]string)
volumeParams := make(map[string]map[string]string)
bkpNamespaces := make([]string, 0)
It("has to complete backup and restore", func() {
// Set cluster context to cluster where torpedo is running
SetClusterContext("")
SetupBackup(taskNamePrefix)
sourceClusterConfigPath, err := GetSourceClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for source cluster. Error: [%v]", err))
SetClusterContext(sourceClusterConfigPath)
Step("Deploy applications", func() {
contexts = make([]*scheduler.Context, 0)
for i := 0; i < Inst().GlobalScaleFactor; i++ {
taskName := fmt.Sprintf("%s-%d", taskNamePrefix, i)
appContexts := ScheduleApplications(taskName)
contexts = append(contexts, appContexts...)
for _, ctx := range appContexts {
// Override default App readiness time out of 5 mins with 10 mins
ctx.ReadinessTimeout = appReadinessTimeout
namespace := GetAppNamespace(ctx, taskName)
bkpNamespaces = append(bkpNamespaces, namespace)
}
}
// Override default App readiness time out of 5 mins with 10 mins
for _, ctx := range contexts {
ctx.ReadinessTimeout = appReadinessTimeout
}
ValidateApplications(contexts)
for _, ctx := range contexts {
for vol, params := range GetVolumeParameters(ctx) {
volumeParams[vol] = params
}
}
})
for _, bkpNamespace := range bkpNamespaces {
BackupName := fmt.Sprintf("%s-%s", BackupNamePrefix, bkpNamespace)
Step(fmt.Sprintf("Create Backup [%s]", BackupName), func() {
CreateBackup(BackupName, sourceClusterName, backupLocationName, BackupLocationUID,
[]string{bkpNamespace}, labelSelectors, OrgID)
})
triggerFn := func() (bool, error) {
backupInspectReq := &api.BackupInspectRequest{
Name: BackupName,
OrgId: OrgID,
}
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
err = Inst().Backup.WaitForBackupRunning(ctx, backupInspectReq, defaultTimeout, defaultRetryInterval)
if err != nil {
logrus.Warnf("[TriggerCheck]: Got error while checking if backup [%s] has started.\n Error : [%v]\n",
BackupName, err)
return false, err
}
logrus.Infof("[TriggerCheck]: backup [%s] has started.\n",
BackupName)
return true, nil
}
triggerOpts := &driver_api.TriggerOptions{
TriggerCb: triggerFn,
}
bkpNode := GetNodesForBackup(BackupName, bkpNamespace,
OrgID, sourceClusterName, triggerOpts)
Expect(len(bkpNode)).NotTo(Equal(0),
fmt.Sprintf("Did not found any node on which backup [%v] is running.",
BackupName))
Step(fmt.Sprintf("Kill volume driver %s on node [%v] after backup [%s] starts",
Inst().V.String(), bkpNode[0].Name, BackupName), func() {
// Just kill storage driver on one of the node where volume backup is in progress
Inst().V.StopDriver(bkpNode[0:1], true, triggerOpts)
})
Step(fmt.Sprintf("Wait for Backup [%s] to complete", BackupName), func() {
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
err = Inst().Backup.WaitForBackupCompletion(ctx, BackupName, OrgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to wait for backup [%s] to complete. Error: [%v]",
BackupName, err))
})
}
for _, bkpNamespace := range bkpNamespaces {
BackupName := fmt.Sprintf("%s-%s", BackupNamePrefix, bkpNamespace)
RestoreName := fmt.Sprintf("%s-%s", restoreNamePrefix, bkpNamespace)
Step(fmt.Sprintf("Create Restore [%s]", RestoreName), func() {
CreateRestore(RestoreName, BackupName,
namespaceMapping, destinationClusterName, OrgID)
})
Step(fmt.Sprintf("Wait for Restore [%s] to complete", RestoreName), func() {
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
err = Inst().Backup.WaitForRestoreCompletion(ctx, RestoreName, OrgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to wait for restore [%s] to complete. Error: [%v]",
RestoreName, err))
})
}
Step("teardown all applications on source cluster before switching context to destination cluster", func() {
for _, ctx := range contexts {
TearDownContext(ctx, map[string]bool{
SkipClusterScopedObjects: true,
})
}
})
// Change namespaces to restored apps only after backed up apps are cleaned up
// to avoid switching back namespaces to backup namespaces
Step(fmt.Sprintf("Validate Restored applications"), func() {
destClusterConfigPath, err := GetDestinationClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for destination cluster. Error: [%v]", err))
SetClusterContext(destClusterConfigPath)
for _, ctx := range contexts {
err = Inst().S.WaitForRunning(ctx, defaultTimeout, defaultRetryInterval)
Expect(err).NotTo(HaveOccurred())
}
// TODO: Restored PVCs are created by stork-snapshot StorageClass
// And not by respective app's StorageClass. Need to fix below function
ValidateRestoredApplications(contexts, volumeParams)
})
Step("teardown all restored apps", func() {
for _, ctx := range contexts {
TearDownContext(ctx, nil)
}
})
Step("teardown backup objects", func() {
TearDownBackupRestore(bkpNamespaces, bkpNamespaces)
})
})
})
var _ = Describe("{BackupRestoreSimultaneous}", func() {
var (
contexts []*scheduler.Context
bkpNamespaces []string
namespaceMapping map[string]string
taskNamePrefix = "backuprestoresimultaneous"
successfulBackups int
successfulRestores int
)
labelSelectors := make(map[string]string)
namespaceMapping = make(map[string]string)
bkpNamespaceErrors := make(map[string]error)
volumeParams := make(map[string]map[string]string)
restoreNamespaces := make([]string, 0)
It("has to perform simultaneous backups and restores", func() {
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
Step("Setup backup", func() {
// Set cluster context to cluster where torpedo is running
SetClusterContext("")
SetupBackup(taskNamePrefix)
})
sourceClusterConfigPath, err := GetSourceClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for source cluster. Error: [%v]", err))
SetClusterContext(sourceClusterConfigPath)
Step("Deploy applications", func() {
contexts = make([]*scheduler.Context, 0)
bkpNamespaces = make([]string, 0)
for i := 0; i < Inst().GlobalScaleFactor; i++ {
taskName := fmt.Sprintf("%s-%d", taskNamePrefix, i)
logrus.Infof("Task name %s\n", taskName)
appContexts := ScheduleApplications(taskName)
contexts = append(contexts, appContexts...)
for _, ctx := range appContexts {
// Override default App readiness time out of 5 mins with 10 mins
ctx.ReadinessTimeout = appReadinessTimeout
namespace := GetAppNamespace(ctx, taskName)
bkpNamespaces = append(bkpNamespaces, namespace)
}
}
// Skip volume validation until other volume providers are implemented.
for _, ctx := range contexts {
ctx.SkipVolumeValidation = true
}
ValidateApplications(contexts)
for _, ctx := range contexts {
for vol, params := range GetVolumeParameters(ctx) {
volumeParams[vol] = params
}
}
})
for _, namespace := range bkpNamespaces {
backupName := fmt.Sprintf("%s-%s", BackupNamePrefix, namespace)
Step(fmt.Sprintf("Create backup full name %s:%s:%s",
sourceClusterName, namespace, backupName), func() {
err = CreateBackupGetErr(backupName,
sourceClusterName, backupLocationName, BackupLocationUID,
[]string{namespace}, labelSelectors, OrgID)
if err != nil {
bkpNamespaceErrors[namespace] = err
}
})
}
var wg sync.WaitGroup
for _, namespace := range bkpNamespaces {
backupName := fmt.Sprintf("%s-%s", BackupNamePrefix, namespace)
error, ok := bkpNamespaceErrors[namespace]
if ok {
logrus.Warningf("Skipping waiting for backup %s because %s", backupName, error)
} else {
wg.Add(1)
go func(wg *sync.WaitGroup, namespace, backupName string) {
defer wg.Done()
Step(fmt.Sprintf("Wait for backup %s to complete", backupName), func() {
err = Inst().Backup.WaitForBackupCompletion(
ctx,
backupName, OrgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
if err != nil {
bkpNamespaceErrors[namespace] = err
logrus.Errorf("Failed to wait for backup [%s] to complete. Error: [%v]",
backupName, err)
}
})
}(&wg, namespace, backupName)
}
}
wg.Wait()
successfulBackups = len(bkpNamespaces) - len(bkpNamespaceErrors)
if successfulBackups == len(bkpNamespaces) {
Step("teardown all applications on source cluster before switching context to destination cluster", func() {
for _, ctx := range contexts {
TearDownContext(ctx, map[string]bool{
SkipClusterScopedObjects: true,
scheduler.OptionsWaitForResourceLeakCleanup: true,
scheduler.OptionsWaitForDestroy: true,
})
}
})
}
destClusterConfigPath, err := GetDestinationClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for destination cluster. Error: [%v]", err))
SetClusterContext(destClusterConfigPath)
for _, namespace := range bkpNamespaces {
restoreName := fmt.Sprintf("%s-%s", restoreNamePrefix, namespace)
error, ok := bkpNamespaceErrors[namespace]
if ok {
logrus.Infof("Skipping create restore %s because %s", restoreName, error)
} else {
restoreNamespaces = append(restoreNamespaces, namespace)
backupName := fmt.Sprintf("%s-%s", BackupNamePrefix, namespace)
Step(fmt.Sprintf("Create restore %s:%s:%s from backup %s:%s:%s",
destinationClusterName, namespace, restoreName,
sourceClusterName, namespace, backupName), func() {
err = CreateRestoreGetErr(restoreName, backupName, namespaceMapping,
destinationClusterName, OrgID)
if err != nil {
bkpNamespaceErrors[namespace] = err
}
})
}
}
for _, namespace := range bkpNamespaces {
restoreName := fmt.Sprintf("%s-%s", restoreNamePrefix, namespace)
error, ok := bkpNamespaceErrors[namespace]
if ok {
logrus.Infof("Skipping waiting for restore %s because %s", restoreName, error)
} else {
wg.Add(1)
go func(wg *sync.WaitGroup, namespace, restoreName string) {
defer wg.Done()
Step(fmt.Sprintf("Wait for restore %s:%s to complete",
namespace, restoreName), func() {
err = Inst().Backup.WaitForRestoreCompletion(ctx, restoreName, OrgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
if err != nil {
bkpNamespaceErrors[namespace] = err
logrus.Errorf("Failed to wait for restore [%s] to complete. Error: [%v]",
restoreName, err)
}
})
}(&wg, namespace, restoreName)
}
}
wg.Wait()
// Change namespaces to restored apps only after backed up apps are cleaned up
// to avoid switching back namespaces to backup namespaces
Step("Validate Restored applications", func() {
destClusterConfigPath, err := GetDestinationClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for destination cluster. Error: [%v]", err))
SetClusterContext(destClusterConfigPath)
// Populate contexts
for _, ctx := range contexts {
ctx.SkipClusterScopedObject = true
ctx.SkipVolumeValidation = true
}
ValidateRestoredApplicationsGetErr(contexts, volumeParams, bkpNamespaceErrors)
})
successfulRestores = len(bkpNamespaces) - len(bkpNamespaceErrors)
if len(bkpNamespaceErrors) == 0 {
Step("teardown all restored apps", func() {
for _, ctx := range contexts {
TearDownContext(ctx, nil)
}
})
Step("teardown backup objects", func() {
TearDownBackupRestore(bkpNamespaces, restoreNamespaces)
})
}
Step("report statistics", func() {
logrus.Infof("%d/%d backups succeeded.", successfulBackups, len(bkpNamespaces))
logrus.Infof("%d/%d restores succeeded.", successfulRestores, successfulBackups)
})
Step("view errors", func() {
logrus.Infof("There were %d errors during this test", len(bkpNamespaceErrors))
var combinedErrors []string
for namespace, err := range bkpNamespaceErrors {
errString := fmt.Sprintf("%s: %s", namespace, err.Error())
combinedErrors = append(combinedErrors, errString)
}
if len(combinedErrors) > 0 {
err = fmt.Errorf(strings.Join(combinedErrors, "\n"))
Expect(err).NotTo(HaveOccurred())
}
})
})
})
var _ = Describe("{BackupRestoreOverPeriod}", func() {
var (
numBackups = 0
successfulBackups = 0
successfulBackupNames []string
numRestores = 0
successfulRestores = 0
successfulRestoreNames []string
)
var (
contexts []*scheduler.Context //for restored apps
bkpNamespaces []string
namespaceMapping map[string]string
taskNamePrefix = "backuprestoreperiod"
)
labelSelectores := make(map[string]string)
namespaceMapping = make(map[string]string)
volumeParams := make(map[string]map[string]string)
namespaceContextMap := make(map[string][]*scheduler.Context)
It("has to connect and check the backup setup", func() {
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
logrus.Errorf("Failed to fetch px-central-admin ctx: [%v]", err)
Step("Setup backup", func() {
// Set cluster context to cluster where torpedo is running
SetClusterContext("")
SetupBackup(taskNamePrefix)
})
sourceClusterConfigPath, err := GetSourceClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for source cluster. Error: [%v]", err))
SetClusterContext(sourceClusterConfigPath)
Step("Deploy applications", func() {
successfulBackupNames = make([]string, 0)
successfulRestoreNames = make([]string, 0)
contexts = make([]*scheduler.Context, 0)
bkpNamespaces = make([]string, 0)
for i := 0; i < Inst().GlobalScaleFactor; i++ {
taskName := fmt.Sprintf("%s-%d", taskNamePrefix, i)
logrus.Infof("Task name %s\n", taskName)
appContexts := ScheduleApplications(taskName)
contexts = append(contexts, appContexts...)
for _, ctx := range appContexts {
// Override default App readiness time out of 5 mins with 10 mins
ctx.ReadinessTimeout = appReadinessTimeout
namespace := GetAppNamespace(ctx, taskName)
namespaceContextMap[namespace] = append(namespaceContextMap[namespace], ctx)
bkpNamespaces = append(bkpNamespaces, namespace)
}
}
// Skip volume validation until other volume providers are implemented.
for _, ctx := range contexts {
ctx.SkipVolumeValidation = true
}
ValidateApplications(contexts)
for _, ctx := range contexts {
for vol, params := range GetVolumeParameters(ctx) {
volumeParams[vol] = params
}
}
})
logrus.Info("Wait for IO to proceed\n")
time.Sleep(time.Minute * 2)
// Moment in time when tests should finish
end := time.Now().Add(time.Duration(5) * time.Minute)
counter := 0
for time.Now().Before(end) {
counter++
aliveBackup := make(map[string]bool)
aliveRestore := make(map[string]bool)
sourceClusterConfigPath, err := GetSourceClusterConfigPath()
if err != nil {
logrus.Errorf("Failed to get kubeconfig path for source cluster. Error: [%v]", err)
continue
}
SetClusterContext(sourceClusterConfigPath)
for _, namespace := range bkpNamespaces {
numBackups++
backupName := fmt.Sprintf("%s-%s-%d", BackupNamePrefix, namespace, counter)
aliveBackup[namespace] = true
Step(fmt.Sprintf("Create backup full name %s:%s:%s",
sourceClusterName, namespace, backupName), func() {
err = CreateBackupGetErr(backupName,
sourceClusterName, backupLocationName, BackupLocationUID,
[]string{namespace}, labelSelectores, OrgID)
if err != nil {
aliveBackup[namespace] = false
logrus.Errorf("Failed to create backup [%s] in org [%s]. Error: [%v]", backupName, OrgID, err)
}
})
}
for _, namespace := range bkpNamespaces {
if !aliveBackup[namespace] {
continue
}
backupName := fmt.Sprintf("%s-%s-%d", BackupNamePrefix, namespace, counter)
Step(fmt.Sprintf("Wait for backup %s to complete", backupName), func() {
err = Inst().Backup.WaitForBackupCompletion(
ctx,
backupName, OrgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
if err == nil {
logrus.Infof("Backup [%s] completed successfully", backupName)
successfulBackups++
} else {
logrus.Errorf("Failed to wait for backup [%s] to complete. Error: [%v]",
backupName, err)
aliveBackup[namespace] = false
}
})
}
// Set kubeconfig to destination for restore
destClusterConfigPath, err := GetDestinationClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for destination cluster. Error: [%v]", err))
SetClusterContext(destClusterConfigPath)
for _, namespace := range bkpNamespaces {
if !aliveBackup[namespace] {
continue
}
backupName := fmt.Sprintf("%s-%s-%d", BackupNamePrefix, namespace, counter)
numRestores++
aliveRestore[namespace] = true
restoreName := fmt.Sprintf("%s-%s-%d", restoreNamePrefix, namespace, counter)
Step(fmt.Sprintf("Create restore full name %s:%s:%s",
destinationClusterName, namespace, restoreName), func() {
err = CreateRestoreGetErr(restoreName, backupName, namespaceMapping,
destinationClusterName, OrgID)
if err != nil {
logrus.Errorf("Failed to create restore [%s] in org [%s] on cluster [%s]. Error: [%v]",
restoreName, OrgID, clusterName, err)
aliveRestore[namespace] = false
}
})
}
for _, namespace := range bkpNamespaces {
if !aliveRestore[namespace] {
continue
}
restoreName := fmt.Sprintf("%s-%s-%d", restoreNamePrefix, namespace, counter)
Step(fmt.Sprintf("Wait for restore %s:%s to complete",
namespace, restoreName), func() {
err = Inst().Backup.WaitForRestoreCompletion(ctx, restoreName, OrgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
if err == nil {
logrus.Infof("Restore [%s] completed successfully", restoreName)
successfulRestores++
} else {
logrus.Errorf("Failed to wait for restore [%s] to complete. Error: [%v]",
restoreName, err)
aliveRestore[namespace] = false
}
})
}
for namespace, alive := range aliveBackup {
if alive {
backupName := fmt.Sprintf("%s-%s-%d", BackupNamePrefix, namespace, counter)
successfulBackupNames = append(successfulBackupNames, backupName)
}
}
remainingContexts := make([]*scheduler.Context, 0)
for namespace, alive := range aliveRestore {
if alive {
restoreName := fmt.Sprintf("%s-%s-%d", restoreNamePrefix, namespace, counter)
successfulRestoreNames = append(successfulRestoreNames, restoreName)
for _, ctx := range namespaceContextMap[namespace] {
remainingContexts = append(remainingContexts, ctx)
}
}
}
// Change namespaces to restored apps only after backed up apps are cleaned up
// to avoid switching back namespaces to backup namespaces
Step("Validate Restored applications", func() {
destClusterConfigPath, err := GetDestinationClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for destination cluster. Error: [%v]", err))
SetClusterContext(destClusterConfigPath)
// Populate contexts
for _, ctx := range remainingContexts {
ctx.SkipClusterScopedObject = true
ctx.SkipVolumeValidation = true
}
// TODO check why PX-Backup does not copy group params correctly after restore
for _, param := range volumeParams {
if _, ok := param["backupGroupCheckSkip"]; !ok {
param["backupGroupCheckSkip"] = "true"
}
}
ValidateRestoredApplications(remainingContexts, volumeParams)
})
if successfulRestores == numRestores {
Step("teardown all restored apps", func() {
for _, ctx := range remainingContexts {
TearDownContext(ctx, nil)
}
})
}
}
if successfulBackups == numBackups && successfulRestores == numRestores {
Step("teardown applications on source cluster", func() {
sourceClusterConfigPath, err := GetSourceClusterConfigPath()
if err != nil {
logrus.Errorf("Failed to get kubeconfig path for source cluster. Error: [%v]", err)
} else {
SetClusterContext(sourceClusterConfigPath)
for _, ctx := range contexts {
TearDownContext(ctx, map[string]bool{
SkipClusterScopedObjects: true,
scheduler.OptionsWaitForResourceLeakCleanup: true,
scheduler.OptionsWaitForDestroy: true,
})
}
}
})
Step("teardown backup/restore objects", func() {
TearDownBackupRestoreSpecific(successfulBackupNames, successfulRestoreNames)
})
}
Step("report statistics", func() {
logrus.Infof("%d/%d backups succeeded.", successfulBackups, numBackups)
logrus.Infof("%d/%d restores succeeded.", successfulRestores, numRestores)
})
})
})
var _ = Describe("{BackupRestoreOverPeriodSimultaneous}", func() {
var (
numBackups int32 = 0
successfulBackups int32 = 0
successfulBackupNames []string
numRestores int32 = 0
successfulRestores int32 = 0
successfulRestoreNames []string
)
var (
contexts []*scheduler.Context //for restored apps
bkpNamespaces []string
namespaceMapping map[string]string
taskNamePrefix = "backuprestoreperiodsimultaneous"
)
labelSelectores := make(map[string]string)
namespaceMapping = make(map[string]string)
volumeParams := make(map[string]map[string]string)
namespaceContextMap := make(map[string][]*scheduler.Context)
combinedErrors := make([]string, 0)
It("has to connect and check the backup setup", func() {
Step("Setup backup", func() {
// Set cluster context to cluster where torpedo is running
SetClusterContext("")
SetupBackup(taskNamePrefix)
})
sourceClusterConfigPath, err := GetSourceClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for source cluster. Error: [%v]", err))
SetClusterContext(sourceClusterConfigPath)
Step("Deploy applications", func() {
successfulBackupNames = make([]string, 0)
successfulRestoreNames = make([]string, 0)
contexts = make([]*scheduler.Context, 0)
bkpNamespaces = make([]string, 0)
for i := 0; i < Inst().GlobalScaleFactor; i++ {
taskName := fmt.Sprintf("%s-%d", taskNamePrefix, i)
logrus.Infof("Task name %s\n", taskName)
appContexts := ScheduleApplications(taskName)
contexts = append(contexts, appContexts...)
for _, ctx := range appContexts {
// Override default App readiness time out of 5 mins with 10 mins
ctx.ReadinessTimeout = appReadinessTimeout
namespace := GetAppNamespace(ctx, taskName)
namespaceContextMap[namespace] = append(namespaceContextMap[namespace], ctx)
bkpNamespaces = append(bkpNamespaces, namespace)
}
}
// Skip volume validation until other volume providers are implemented.
for _, ctx := range contexts {
ctx.SkipVolumeValidation = true
}
ValidateApplications(contexts)
for _, ctx := range contexts {
for vol, params := range GetVolumeParameters(ctx) {
volumeParams[vol] = params
}
}
})
logrus.Info("Wait for IO to proceed\n")
time.Sleep(time.Minute * 2)
// Moment in time when tests should finish
end := time.Now().Add(time.Duration(5) * time.Minute)
counter := 0
for time.Now().Before(end) {
counter++
bkpNamespaceErrors := make(map[string]error)
sourceClusterConfigPath, err := GetSourceClusterConfigPath()
if err != nil {
logrus.Errorf("Failed to get kubeconfig path for source cluster. Error: [%v]", err)
continue
}
/*Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for source cluster. Error: [%v]", err))*/
SetClusterContext(sourceClusterConfigPath)
for _, namespace := range bkpNamespaces {
go func(namespace string) {
atomic.AddInt32(&numBackups, 1)
backupName := fmt.Sprintf("%s-%s-%d", BackupNamePrefix, namespace, counter)
Step(fmt.Sprintf("Create backup full name %s:%s:%s",
sourceClusterName, namespace, backupName), func() {
err = CreateBackupGetErr(backupName,
sourceClusterName, backupLocationName, BackupLocationUID,
[]string{namespace}, labelSelectores, OrgID)
if err != nil {
//aliveBackup[namespace] = false
bkpNamespaceErrors[namespace] = err
logrus.Errorf("Failed to create backup [%s] in org [%s]. Error: [%v]", backupName, OrgID, err)
}
})
}(namespace)
}
var wg sync.WaitGroup
for _, namespace := range bkpNamespaces {
backupName := fmt.Sprintf("%s-%s-%d", BackupNamePrefix, namespace, counter)
error, ok := bkpNamespaceErrors[namespace]
if ok {
logrus.Warningf("Skipping waiting for backup %s because %s", backupName, error)
continue
}
wg.Add(1)
go func(wg *sync.WaitGroup, namespace, backupName string) {
defer wg.Done()
Step(fmt.Sprintf("Wait for backup %s to complete", backupName), func() {
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
if err != nil {
logrus.Errorf("Failed to fetch px-central-admin ctx: [%v]", err)
bkpNamespaceErrors[namespace] = err
} else {
err = Inst().Backup.WaitForBackupCompletion(
ctx,
backupName, OrgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
if err == nil {
logrus.Infof("Backup [%s] completed successfully", backupName)
atomic.AddInt32(&successfulBackups, 1)
} else {
logrus.Errorf("Failed to wait for backup [%s] to complete. Error: [%v]",
backupName, err)
bkpNamespaceErrors[namespace] = err
}
}
})
}(&wg, namespace, backupName)
}
wg.Wait()
for _, namespace := range bkpNamespaces {
_, ok := bkpNamespaceErrors[namespace]
if !ok {
backupName := fmt.Sprintf("%s-%s-%d", BackupNamePrefix, namespace, counter)
successfulBackupNames = append(successfulBackupNames, backupName)
}
}
for _, namespace := range bkpNamespaces {
backupName := fmt.Sprintf("%s-%s-%d", BackupNamePrefix, namespace, counter)
restoreName := fmt.Sprintf("%s-%s-%d", restoreNamePrefix, namespace, counter)
error, ok := bkpNamespaceErrors[namespace]
if ok {
logrus.Infof("Skipping create restore %s because %s", restoreName, error)
continue
}
go func(namespace string) {
atomic.AddInt32(&numRestores, 1)
Step(fmt.Sprintf("Create restore full name %s:%s:%s",
destinationClusterName, namespace, restoreName), func() {
err = CreateRestoreGetErr(restoreName, backupName, namespaceMapping,
destinationClusterName, OrgID)
if err != nil {
logrus.Errorf("Failed to create restore [%s] in org [%s] on cluster [%s]. Error: [%v]",
restoreName, OrgID, clusterName, err)
bkpNamespaceErrors[namespace] = err
}
})
}(namespace)
}
for _, namespace := range bkpNamespaces {
restoreName := fmt.Sprintf("%s-%s-%d", restoreNamePrefix, namespace, counter)
error, ok := bkpNamespaceErrors[namespace]
if ok {
logrus.Infof("Skipping waiting for restore %s because %s", restoreName, error)
continue
}
wg.Add(1)
go func(wg *sync.WaitGroup, namespace, restoreName string) {
defer wg.Done()
Step(fmt.Sprintf("Wait for restore %s:%s to complete",
namespace, restoreName), func() {
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
if err != nil {
logrus.Errorf("Failed to fetch px-central-admin ctx: [%v]", err)
bkpNamespaceErrors[namespace] = err
} else {
err = Inst().Backup.WaitForRestoreCompletion(ctx, restoreName, OrgID,
BackupRestoreCompletionTimeoutMin*time.Minute,
RetrySeconds*time.Second)
if err == nil {
logrus.Infof("Restore [%s] completed successfully", restoreName)
atomic.AddInt32(&successfulRestores, 1)
} else {
logrus.Errorf("Failed to wait for restore [%s] to complete. Error: [%v]",
restoreName, err)
bkpNamespaceErrors[namespace] = err
}
}
})
}(&wg, namespace, restoreName)
}
wg.Wait()
remainingContexts := make([]*scheduler.Context, 0)
for _, namespace := range bkpNamespaces {
_, ok := bkpNamespaceErrors[namespace]
if !ok {
restoreName := fmt.Sprintf("%s-%s-%d", restoreNamePrefix, namespace, counter)
successfulRestoreNames = append(successfulRestoreNames, restoreName)
for _, ctx := range namespaceContextMap[namespace] {
remainingContexts = append(remainingContexts, ctx)
}
}
}
// Change namespaces to restored apps only after backed up apps are cleaned up
// to avoid switching back namespaces to backup namespaces
Step("Validate Restored applications", func() {
destClusterConfigPath, err := GetDestinationClusterConfigPath()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for destination cluster. Error: [%v]", err))
SetClusterContext(destClusterConfigPath)
// Populate contexts
for _, ctx := range remainingContexts {
ctx.SkipClusterScopedObject = true
ctx.SkipVolumeValidation = true
}
ValidateRestoredApplicationsGetErr(remainingContexts, volumeParams, bkpNamespaceErrors)
})
Step("teardown all restored apps", func() {
for _, ctx := range remainingContexts {
TearDownContext(ctx, nil)
}
})
for namespace, err := range bkpNamespaceErrors {
errString := fmt.Sprintf("%s:%d - %s", namespace, counter, err.Error())
combinedErrors = append(combinedErrors, errString)
}
}
Step("teardown applications on source cluster", func() {
sourceClusterConfigPath, err := GetSourceClusterConfigPath()
if err != nil {
logrus.Errorf("Failed to get kubeconfig path for source cluster. Error: [%v]", err)
} else {
SetClusterContext(sourceClusterConfigPath)
for _, ctx := range contexts {
TearDownContext(ctx, map[string]bool{
SkipClusterScopedObjects: true,
scheduler.OptionsWaitForResourceLeakCleanup: true,
scheduler.OptionsWaitForDestroy: true,
})
}
}
})
Step("teardown backup/restore objects", func() {
TearDownBackupRestoreSpecific(successfulBackupNames, successfulRestoreNames)
})
Step("report statistics", func() {
logrus.Infof("%d/%d backups succeeded.", successfulBackups, numBackups)
logrus.Infof("%d/%d restores succeeded.", successfulRestores, numRestores)
})
Step("view errors", func() {
logrus.Infof("There were %d errors during this test", len(combinedErrors))
if len(combinedErrors) > 0 {
err = fmt.Errorf(strings.Join(combinedErrors, "\n"))
Expect(err).NotTo(HaveOccurred())
}
})
})
})
// TODO: There is no delete org API
/*func DeleteOrganization(orgID string) {
Step(fmt.Sprintf("Delete organization [%s]", orgID), func() {
backupDriver := Inst().Backup
req := &api.Delete{
CreateMetadata: &api.CreateMetadata{
Name: orgID,
},
}
_, err := backupDriver.Delete(req)
Expect(err).NotTo(HaveOccurred())
})
}*/
// createS3BackupLocation creates backup location
func createGkeBackupLocation(name string, cloudCred string, orgID string) {
Step(fmt.Sprintf("Create GKE backup location [%s] in org [%s]", name, orgID), func() {
// TODO(stgleb): Implement this
})
}
// CreateProviderClusterObject creates cluster for each cluster per each cloud provider
func CreateProviderClusterObject(provider string, kubeconfigList []string, cloudCred, orgID string) {
Step(fmt.Sprintf("Create cluster [%s-%s] in org [%s]",
clusterName, provider, orgID), func() {
kubeconfigPath, err := getProviderClusterConfigPath(provider, kubeconfigList)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get kubeconfig path for source cluster. Error: [%v]", err))
CreateCluster(fmt.Sprintf("%s-%s", clusterName, provider), cloudCred,
kubeconfigPath, orgID)
})
}
func getProviders() []string {
providersStr := os.Getenv("PROVIDERS")
return strings.Split(providersStr, ",")
}
func getProviderClusterConfigPath(provider string, kubeconfigs []string) (string, error) {
logrus.Infof("Get kubeconfigPath from list %v and provider %s",
kubeconfigs, provider)
for _, kubeconfigPath := range kubeconfigs {
if strings.Contains(provider, kubeconfigPath) {
fullPath := path.Join(KubeconfigDirectory, kubeconfigPath)
return fullPath, nil
}
}
return "nil", fmt.Errorf("kubeconfigPath not found for provider %s in kubeconfigPath list %v",
provider, kubeconfigs)
}
// CreateBackup creates backup
func CreateBackup(backupName string, clusterName string, bLocation string, bLocationUID string,
namespaces []string, labelSelectors map[string]string, orgID string) {
Step(fmt.Sprintf("Create backup [%s] in org [%s] from cluster [%s]",
backupName, orgID, clusterName), func() {
backupDriver := Inst().Backup
bkpCreateRequest := &api.BackupCreateRequest{
CreateMetadata: &api.CreateMetadata{
Name: backupName,
OrgId: orgID,
},
BackupLocationRef: &api.ObjectRef{
Name: bLocation,
Uid: bLocationUID,
},
Cluster: clusterName,
Namespaces: namespaces,
LabelSelectors: labelSelectors,
}
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
_, err = backupDriver.CreateBackup(ctx, bkpCreateRequest)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to create backup [%s] in org [%s]. Error: [%v]",
backupName, orgID, err))
})
}
func GetNodesForBackup(backupName string, bkpNamespace string,
orgID string, clusterName string, triggerOpts *driver_api.TriggerOptions) []node.Node {
var nodes []node.Node
backupDriver := Inst().Backup
backupInspectReq := &api.BackupInspectRequest{
Name: backupName,
OrgId: orgID,
}
err := Inst().Backup.WaitForBackupRunning(context.Background(), backupInspectReq, defaultTimeout, defaultRetryInterval)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to wait for backup [%s] to start. Error: [%v]",
backupName, err))
clusterInspectReq := &api.ClusterInspectRequest{
OrgId: orgID,
Name: clusterName,
IncludeSecrets: true,
}
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
clusterInspectRes, err := backupDriver.InspectCluster(ctx, clusterInspectReq)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to inspect cluster [%s] in org [%s]. Error: [%v]",
clusterName, orgID, err))
Expect(clusterInspectRes).NotTo(BeNil(),
"Got an empty response while inspecting cluster [%s] in org [%s]", clusterName, orgID)
cluster := clusterInspectRes.GetCluster()
volumeBackupIDs, err := backupDriver.GetVolumeBackupIDs(context.Background(),
backupName, bkpNamespace, cluster, orgID)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get volume backup IDs for backup [%s] in org [%s]. Error: [%v]",
backupName, orgID, err))
Expect(len(volumeBackupIDs)).NotTo(Equal(0),
"Got empty list of volumeBackup IDs from backup driver")
for _, backupID := range volumeBackupIDs {
n, err := Inst().V.GetNodeForBackup(backupID)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to get node on which backup [%s] in running. Error: [%v]",
backupName, err))
logrus.Debugf("Volume backup [%s] is running on node [%s], node id: [%s]\n",
backupID, n.GetHostname(), n.GetId())
nodes = append(nodes, n)
}
return nodes
}
// CreateRestore creates restore
func CreateRestore(restoreName string, backupName string,
namespaceMapping map[string]string, clusterName string, orgID string) {
Step(fmt.Sprintf("Create restore [%s] in org [%s] on cluster [%s]",
restoreName, orgID, clusterName), func() {
backupDriver := Inst().Backup
createRestoreReq := &api.RestoreCreateRequest{
CreateMetadata: &api.CreateMetadata{
Name: restoreName,
OrgId: orgID,
},
Backup: backupName,
Cluster: clusterName,
NamespaceMapping: namespaceMapping,
}
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
_, err = backupDriver.CreateRestore(ctx, createRestoreReq)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to create restore [%s] in org [%s] on cluster [%s]. Error: [%v]",
restoreName, orgID, clusterName, err))
// TODO: validate createClusterResponse also
})
}
//TearDownBackupRestoreSpecific deletes backups and restores specified by name as well as backup location
func TearDownBackupRestoreSpecific(backups []string, restores []string) {
for _, backupName := range backups {
DeleteBackup(backupName, OrgID)
}
for _, restoreName := range restores {
DeleteRestore(restoreName, OrgID)
}
provider := GetProvider()
DeleteCluster(destinationClusterName, OrgID)
DeleteCluster(sourceClusterName, OrgID)
DeleteBackupLocation(backupLocationName, OrgID)
DeleteCloudCredential(CredName, OrgID, CloudCredUID)
DeleteBucket(provider, BucketName)
}
// CreateRestoreGetErr creates restore
func CreateRestoreGetErr(restoreName string, backupName string,
namespaceMapping map[string]string, clusterName string, orgID string) (err error) {
Step(fmt.Sprintf("Create restore [%s] in org [%s] on cluster [%s]",
restoreName, orgID, clusterName), func() {
backupDriver := Inst().Backup
createRestoreReq := &api.RestoreCreateRequest{
CreateMetadata: &api.CreateMetadata{
Name: restoreName,
OrgId: orgID,
},
Backup: backupName,
Cluster: clusterName,
NamespaceMapping: namespaceMapping,
}
//ctx, err := backup.GetPxCentralAdminCtx()
ctx, err := backup.GetAdminCtxFromSecret()
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("Failed to fetch px-central-admin ctx: [%v]",
err))
_, err = backupDriver.CreateRestore(ctx, createRestoreReq)
if err != nil {
logrus.Errorf("Failed to create restore [%s] in org [%s] on cluster [%s]. Error: [%v]",
restoreName, orgID, clusterName, err)
}
// TODO: validate createClusterResponse also
})
return err
}
| [
"\"KUBECONFIGS\"",
"\"PROVIDERS\""
]
| []
| [
"KUBECONFIGS",
"PROVIDERS"
]
| [] | ["KUBECONFIGS", "PROVIDERS"] | go | 2 | 0 | |
cmd/lncli/main.go | // Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Copyright (C) 2015-2017 The Lightning Network Developers
package main
import (
"fmt"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"strings"
macaroon "gopkg.in/macaroon.v2"
"github.com/btcsuite/btcutil"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/macaroons"
"github.com/urfave/cli"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
const (
defaultDataDir = "data"
defaultChainSubDir = "chain"
defaultTLSCertFilename = "tls.cert"
defaultMacaroonFilename = "admin.macaroon"
defaultRPCPort = "10009"
defaultRPCHostPort = "localhost:" + defaultRPCPort
)
var (
defaultLndDir = btcutil.AppDataDir("lnd", false)
defaultTLSCertPath = filepath.Join(defaultLndDir, defaultTLSCertFilename)
// maxMsgRecvSize is the largest message our client will receive. We
// set this to 200MiB atm.
maxMsgRecvSize = grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 200)
)
func fatal(err error) {
fmt.Fprintf(os.Stderr, "[lncli] %v\n", err)
os.Exit(1)
}
func getWalletUnlockerClient(ctx *cli.Context) (lnrpc.WalletUnlockerClient, func()) {
conn := getClientConn(ctx, true)
cleanUp := func() {
conn.Close()
}
return lnrpc.NewWalletUnlockerClient(conn), cleanUp
}
func getClient(ctx *cli.Context) (lnrpc.LightningClient, func()) {
conn := getClientConn(ctx, false)
cleanUp := func() {
conn.Close()
}
return lnrpc.NewLightningClient(conn), cleanUp
}
func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn {
// First, we'll parse the args from the command.
tlsCertPath, macPath, err := extractPathArgs(ctx)
if err != nil {
fatal(err)
}
// Load the specified TLS certificate and build transport credentials
// with it.
creds, err := credentials.NewClientTLSFromFile(tlsCertPath, "")
if err != nil {
fatal(err)
}
// Create a dial options array.
opts := []grpc.DialOption{
grpc.WithTransportCredentials(creds),
}
// Only process macaroon credentials if --no-macaroons isn't set and
// if we're not skipping macaroon processing.
if !ctx.GlobalBool("no-macaroons") && !skipMacaroons {
// Load the specified macaroon file.
macBytes, err := ioutil.ReadFile(macPath)
if err != nil {
fatal(fmt.Errorf("unable to read macaroon path (check "+
"the network setting!): %v", err))
}
mac := &macaroon.Macaroon{}
if err = mac.UnmarshalBinary(macBytes); err != nil {
fatal(fmt.Errorf("unable to decode macaroon: %v", err))
}
macConstraints := []macaroons.Constraint{
// We add a time-based constraint to prevent replay of the
// macaroon. It's good for 60 seconds by default to make up for
// any discrepancy between client and server clocks, but leaking
// the macaroon before it becomes invalid makes it possible for
// an attacker to reuse the macaroon. In addition, the validity
// time of the macaroon is extended by the time the server clock
// is behind the client clock, or shortened by the time the
// server clock is ahead of the client clock (or invalid
// altogether if, in the latter case, this time is more than 60
// seconds).
// TODO(aakselrod): add better anti-replay protection.
macaroons.TimeoutConstraint(ctx.GlobalInt64("macaroontimeout")),
// Lock macaroon down to a specific IP address.
macaroons.IPLockConstraint(ctx.GlobalString("macaroonip")),
// ... Add more constraints if needed.
}
// Apply constraints to the macaroon.
constrainedMac, err := macaroons.AddConstraints(mac, macConstraints...)
if err != nil {
fatal(err)
}
// Now we append the macaroon credentials to the dial options.
cred := macaroons.NewMacaroonCredential(constrainedMac)
opts = append(opts, grpc.WithPerRPCCredentials(cred))
}
// We need to use a custom dialer so we can also connect to unix sockets
// and not just TCP addresses.
genericDialer := lncfg.ClientAddressDialer(defaultRPCPort)
opts = append(opts, grpc.WithContextDialer(genericDialer))
opts = append(opts, grpc.WithDefaultCallOptions(maxMsgRecvSize))
conn, err := grpc.Dial(ctx.GlobalString("rpcserver"), opts...)
if err != nil {
fatal(fmt.Errorf("unable to connect to RPC server: %v", err))
}
return conn
}
// extractPathArgs parses the TLS certificate and macaroon paths from the
// command.
func extractPathArgs(ctx *cli.Context) (string, string, error) {
// We'll start off by parsing the active chain and network. These are
// needed to determine the correct path to the macaroon when not
// specified.
chain := strings.ToLower(ctx.GlobalString("chain"))
switch chain {
case "bitcoin", "litecoin":
default:
return "", "", fmt.Errorf("unknown chain: %v", chain)
}
network := strings.ToLower(ctx.GlobalString("network"))
switch network {
case "mainnet", "testnet", "regtest", "simnet":
default:
return "", "", fmt.Errorf("unknown network: %v", network)
}
// We'll now fetch the lnddir so we can make a decision on how to
// properly read the macaroons (if needed) and also the cert. This will
// either be the default, or will have been overwritten by the end
// user.
lndDir := cleanAndExpandPath(ctx.GlobalString("lnddir"))
// If the macaroon path as been manually provided, then we'll only
// target the specified file.
var macPath string
if ctx.GlobalString("macaroonpath") != "" {
macPath = cleanAndExpandPath(ctx.GlobalString("macaroonpath"))
} else {
// Otherwise, we'll go into the path:
// lnddir/data/chain/<chain>/<network> in order to fetch the
// macaroon that we need.
macPath = filepath.Join(
lndDir, defaultDataDir, defaultChainSubDir, chain,
network, defaultMacaroonFilename,
)
}
tlsCertPath := cleanAndExpandPath(ctx.GlobalString("tlscertpath"))
// If a custom lnd directory was set, we'll also check if custom paths
// for the TLS cert and macaroon file were set as well. If not, we'll
// override their paths so they can be found within the custom lnd
// directory set. This allows us to set a custom lnd directory, along
// with custom paths to the TLS cert and macaroon file.
if lndDir != defaultLndDir {
tlsCertPath = filepath.Join(lndDir, defaultTLSCertFilename)
}
return tlsCertPath, macPath, nil
}
func main() {
app := cli.NewApp()
app.Name = "lncli"
app.Version = build.Version() + " commit=" + build.Commit
app.Usage = "control plane for your Lightning Network Daemon (lnd)"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "rpcserver",
Value: defaultRPCHostPort,
Usage: "host:port of ln daemon",
},
cli.StringFlag{
Name: "lnddir",
Value: defaultLndDir,
Usage: "path to lnd's base directory",
},
cli.StringFlag{
Name: "tlscertpath",
Value: defaultTLSCertPath,
Usage: "path to TLS certificate",
},
cli.StringFlag{
Name: "chain, c",
Usage: "the chain lnd is running on e.g. bitcoin",
Value: "bitcoin",
},
cli.StringFlag{
Name: "network, n",
Usage: "the network lnd is running on e.g. mainnet, " +
"testnet, etc.",
Value: "mainnet",
},
cli.BoolFlag{
Name: "no-macaroons",
Usage: "disable macaroon authentication",
},
cli.StringFlag{
Name: "macaroonpath",
Usage: "path to macaroon file",
},
cli.Int64Flag{
Name: "macaroontimeout",
Value: 60,
Usage: "anti-replay macaroon validity time in seconds",
},
cli.StringFlag{
Name: "macaroonip",
Usage: "if set, lock macaroon to specific IP address",
},
}
app.Commands = []cli.Command{
createCommand,
unlockCommand,
changePasswordCommand,
newAddressCommand,
estimateFeeCommand,
sendManyCommand,
sendCoinsCommand,
listUnspentCommand,
connectCommand,
disconnectCommand,
openChannelCommand,
closeChannelCommand,
closeAllChannelsCommand,
abandonChannelCommand,
listPeersCommand,
walletBalanceCommand,
channelBalanceCommand,
getInfoCommand,
getRecoveryInfoCommand,
pendingChannelsCommand,
sendPaymentCommand,
payInvoiceCommand,
sendToRouteCommand,
addInvoiceCommand,
lookupInvoiceCommand,
listInvoicesCommand,
listChannelsCommand,
closedChannelsCommand,
listPaymentsCommand,
describeGraphCommand,
getNodeMetricsCommand,
getChanInfoCommand,
getNodeInfoCommand,
queryRoutesCommand,
getNetworkInfoCommand,
debugLevelCommand,
decodePayReqCommand,
listChainTxnsCommand,
stopCommand,
signMessageCommand,
verifyMessageCommand,
feeReportCommand,
updateChannelPolicyCommand,
forwardingHistoryCommand,
exportChanBackupCommand,
verifyChanBackupCommand,
restoreChanBackupCommand,
bakeMacaroonCommand,
listMacaroonIDsCommand,
deleteMacaroonIDCommand,
trackPaymentCommand,
versionCommand,
}
// Add any extra commands determined by build flags.
app.Commands = append(app.Commands, autopilotCommands()...)
app.Commands = append(app.Commands, invoicesCommands()...)
app.Commands = append(app.Commands, routerCommands()...)
app.Commands = append(app.Commands, walletCommands()...)
app.Commands = append(app.Commands, watchtowerCommands()...)
app.Commands = append(app.Commands, wtclientCommands()...)
if err := app.Run(os.Args); err != nil {
fatal(err)
}
}
// cleanAndExpandPath expands environment variables and leading ~ in the
// passed path, cleans the result, and returns it.
// This function is taken from https://github.com/btcsuite/btcd
func cleanAndExpandPath(path string) string {
if path == "" {
return ""
}
// Expand initial ~ to OS specific home directory.
if strings.HasPrefix(path, "~") {
var homeDir string
user, err := user.Current()
if err == nil {
homeDir = user.HomeDir
} else {
homeDir = os.Getenv("HOME")
}
path = strings.Replace(path, "~", homeDir, 1)
}
// NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%,
// but the variables can still be expanded via POSIX-style $VARIABLE.
return filepath.Clean(os.ExpandEnv(path))
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
gitlab.go | package main
import (
"fmt"
"net/http"
"net/url"
"os"
"os/user"
"path"
"strconv"
"strings"
"gopkg.in/go-ini/ini.v1"
)
// GitlabCredentials contains PersonalToken for GitLab API authorization
// and Host for possibly implementing support for self-hosted instances
type GitlabCredentials struct {
Host string
PersonalToken string
}
func (creds GitlabCredentials) query(method, url string) (map[string]interface{}, error) {
req, err := http.NewRequest(method, url, nil)
if err != nil {
return nil, err
}
req.Header.Add("PRIVATE-TOKEN", creds.PersonalToken)
return QueryHTTP(req)
}
func (creds GitlabCredentials) getIssue(repo string, todo Todo) (map[string]interface{}, error) {
json, err := creds.query(
"GET",
// FIXME(#156): possible GitLab API injection attack
"https://"+creds.Host+"/api/v4/projects/"+url.QueryEscape(repo)+"/issues/"+(*todo.ID)[1:]) // self-hosted
if err != nil {
return nil, err
}
return json, nil
}
func (creds GitlabCredentials) postIssue(repo string, todo Todo, body string) (Todo, error) {
params := url.Values{}
params.Add("title", todo.Title)
params.Add("description", body)
json, err := creds.query(
"POST",
"https://"+creds.Host+"/api/v4/projects/"+url.QueryEscape(repo)+"/issues?"+params.Encode()) // self-hosted
if err != nil {
return todo, err
}
id := "#" + strconv.Itoa(int(json["iid"].(float64)))
todo.ID = &id
return todo, err
}
func (creds GitlabCredentials) getHost() string {
return creds.Host
}
// GitlabCredentialsFromFile gets GitlabCredentials from a filepath
func GitlabCredentialsFromFile(filepath string) []GitlabCredentials {
credentials := []GitlabCredentials{}
cfg, err := ini.Load(filepath)
if err != nil {
return credentials
}
for _, section := range cfg.Sections()[1:] {
credentials = append(credentials, GitlabCredentials{
Host: section.Name(),
PersonalToken: section.Key("personal_token").String(),
})
}
return credentials
}
// GitlabCredentialsFromToken returns a GitlabCredentials from a string token
func GitlabCredentialsFromToken(token string) (GitlabCredentials, error) {
credentials := strings.Split(token, ":")
switch len(credentials) {
case 1:
return GitlabCredentials{
Host: "gitlab.com",
PersonalToken: credentials[0],
}, nil
case 2:
return GitlabCredentials{
Host: credentials[0],
PersonalToken: credentials[1],
}, nil
default:
return GitlabCredentials{},
fmt.Errorf("Couldn't parse GitLab credentials from ENV: %s", token)
}
}
func getGitlabCredentials(creds []IssueAPI) []IssueAPI {
tokenEnvar := os.Getenv("GITLAB_PERSONAL_TOKEN")
xdgEnvar := os.Getenv("XDG_CONFIG_HOME")
usr, err := user.Current()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if len(tokenEnvar) != 0 {
for _, credential := range strings.Split(tokenEnvar, ",") {
parsedCredentials, err := GitlabCredentialsFromToken(credential)
if err != nil {
fmt.Fprintln(os.Stderr, err)
continue
}
creds = append(creds, parsedCredentials)
}
}
// custom XDG_CONFIG_HOME
if len(xdgEnvar) != 0 {
filePath := path.Join(xdgEnvar, "snitch/gitlab.ini")
if _, err := os.Stat(filePath); err == nil {
for _, cred := range GitlabCredentialsFromFile(filePath) {
creds = append(creds, cred)
}
}
}
// default XDG_CONFIG_HOME
if len(xdgEnvar) == 0 {
filePath := path.Join(usr.HomeDir, ".config/snitch/gitlab.ini")
if _, err := os.Stat(filePath); err == nil {
for _, cred := range GitlabCredentialsFromFile(filePath) {
creds = append(creds, cred)
}
}
}
filePath := path.Join(usr.HomeDir, ".snitch/gitlab.ini")
if _, err := os.Stat(filePath); err == nil {
for _, cred := range GitlabCredentialsFromFile(filePath) {
creds = append(creds, cred)
}
}
return creds
}
| [
"\"GITLAB_PERSONAL_TOKEN\"",
"\"XDG_CONFIG_HOME\""
]
| []
| [
"GITLAB_PERSONAL_TOKEN",
"XDG_CONFIG_HOME"
]
| [] | ["GITLAB_PERSONAL_TOKEN", "XDG_CONFIG_HOME"] | go | 2 | 0 | |
topo_input_files/synthetics/flat_and_gaussian_create.py | #! /usr/local/python27/bin/python
# For Codor -
# Setting up some
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import scipy.integrate as integrate
import random as rdm
def gaussprof(A,hw,x,x0):
# A function based on the matlab code of
# Codor Khodr (Uni. of Bristol) to generate the
# a Gaussian hill topography profile
# Input:
# A = maximum altitude of the Gaussian function
# hw = half-width (standard deviation) of Gaussian
# x = input abssica
# x0 = center of the Gaussian Hill
# Output:
# f -> the Gaussian hill function, i.e., f(x)
# Generate the Gaussian
n = x.shape[0]
f = np.zeros(n)
# Codor's function parameterised in terms of theta_max. Why?
#A = np.tan(theta_max*np.pi/180.0)*d*np.exp(0.5)/np.sqrt(2)
#print 'A = ',A
for i in range(n):
f[i] = A*np.exp(-1*((x[i]-x0)**2)/(2*(hw**2)))
return f
def gaussprof3D(A,hwx,hwy,x,y,x0,y0):
# Create a 3D Gaussian Hill
# A function based on the matlab code of
# Codor Khodr (Uni. of Bristol) to generate the
# a Gaussian hill topography profile
# Input:
# A = maximum height of the Gaussian Hill
# hwx = half-width (standard deviation) of Gaussian in x-dirn
# hwy = half-width (standard deviation) of Gaussian in y-dirn
# x = input abssica
# x0 = center of the Gaussian Hill
# Output:
# f -> the Gaussian hill function, i.e., f(x)
# Create the mesh and calculate the Gaussian function
xv,yv = np.meshgrid(x,y)
# Flattening (inefficient programming step?)
xvflat = xv.flatten()
yvflat = yv.flatten()
n = len(xvflat)
f = np.zeros(n)
for i in range(n):
term1 = ((xvflat[i]-x0)**2)/(2*(hwx**2))
term2 = ((yvflat[i]-y0)**2)/(2*(hwy**2))
f[i] = A*np.exp(-1*(term1+term2))
return xvflat,yvflat,f
#----- Main Prog
#---------------------------------------------------------------
# Setting up the Gaussian, as per Codor's email of 10-March-2016
#
# Calculating path length using the standard formula
# p.29 of Riley, Hobson and Bence
#---------------------------------------------------------------
para = {'axes.labelsize': 16, 'text.fontsize': 16, 'legend.fontsize': 14, 'xtick.labelsize': 14,'ytick.labelsize': 14, 'figure.subplot.left': 0.12, 'figure.subplot.right': 0.98, 'figure.subplot.bottom': 0.11, 'figure.subplot.top': 0.97}
plt.rcParams.update(para)
#-----------
# 2D CASES
#-----------
L = 6000.
A = 200.
dx = 90.
x0 = L/2.0
hw = 3.0*A
x_regular = np.arange(0,L,dx)
# Calculating the regular topography function.
f_regular = gaussprof(A,hw,x_regular,x0)
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_axes([0.2,0.2,0.7,0.7])
ax1.plot(x_regular,f_regular,'ko')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
# Calculating the irregularly sampled topography function
x_irregular = np.sort(rdm.sample(range(0,int(L)),len(x_regular)))
f_irregular = gaussprof(A,hw,x_irregular,x0)
ax1.plot(x_irregular,f_irregular,'r*')
plt.show()
#-----------------------------------------------------------------
# Now to output to ASCII files
#-----------------------------------------------------------------
dirpath = '/Users/dgreen/Documents/Work/4codor/infratopo/topo_input_files/synthetics/'
regfile = 'gauss_'+str(int(A))+'m_hill_short.dat'
irregfile = 'gauss_'+str(int(A))+'m_hill_irreg_short.dat'
flatfile = 'flat_topo_short.dat'
fr = open(dirpath+regfile, 'w')
for x in range(len(f_regular)):
fr.write('{:4.0f} {:5.1f}\n'.format(x_regular[x],f_regular[x]))
fr.close()
fi = open(dirpath+irregfile, 'w')
for x in range(len(f_irregular)):
fi.write('{:4.0f} {:5.1f}\n'.format(x_irregular[x],f_irregular[x]))
fi.close()
ff = open(dirpath+flatfile, 'w')
for x in range(len(f_regular)):
ff.write('{:4.0f} {:5.1f}\n'.format(x_regular[x],0.0))
ff.close()
#-----------------------------------------------------------------
# Now for the long-range cases (500km)
#-----------------------------------------------------------------
L = 550000.
A = 3000.
dx = 1000.
x0 = 250000.
hw = 60000.
x_regular = np.arange(0,L,dx)
# Calculating the regular topography function.
f_regular = gaussprof(A,hw,x_regular,x0)
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_axes([0.2,0.2,0.7,0.7])
ax1.plot(x_regular,f_regular,'ko')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
dirpath = '/Users/dgreen/Documents/Work/4codor/infratopo/topo_input_files/synthetics/'
longfile = 'gauss_'+str(int(A))+'m_hill_long.dat'
fr = open(dirpath+longfile, 'w')
for x in range(len(f_regular)):
fr.write('{:6.0f} {:5.1f}\n'.format(x_regular[x],f_regular[x]))
fr.close()
longflat = 'flat_topo_long.dat'
fr = open(dirpath+longflat, 'w')
for x in range(len(f_regular)):
fr.write('{:6.0f} {:5.1f}\n'.format(x_regular[x],0.0))
fr.close()
#-----------
# 3D CASES
#-----------
L = 6000.
A = 200.
dx = 90.
dy = dx
x0 = L/2.0
y0 = L/2.0
hwx = 3.0*A
hwy = 3.0*A
x_regular = np.arange(0,L,dx)
y_regular = np.arange(0,L,dy)
xv3d,yv3d,f3d = gaussprof3D(A,hwx,hwy,x_regular,y_regular,x0,y0)
# For plottting (not ascii output) need to convert above to 2D arrays
cols = np.unique(xv3d).shape[0]
X = xv3d.reshape(-1, cols)
Y = yv3d.reshape(-1, cols)
Z = f3d.reshape(-1, cols)
fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax = fig.add_axes([0.25, 0.25, 0.75, 0.75], projection='3d')
ax.set_zlabel('Alt (m)')
ax.set_ylabel('y (m)')
ax.set_xlabel('x (m)')
surf = ax.plot_surface(X,Y,Z,rstride=2,cstride=2,cmap=cm.coolwarm,linewidth=0, antialiased=False)
#fig.colorbar(surf, shrink=0.4, aspect=10)
ax.dist=11
plt.show()
fig.savefig('gauss_3D_example_crude.png',bbox_inches='tight')
# Output to ASCII
regfile3d = 'gauss3d_'+str(int(A))+'m_hill_short.dat'
flatfile3d = 'flat_topo_3d_short.dat'
fr3d = open(dirpath+regfile3d, 'w')
for x in range(len(f3d)):
fr3d.write('{:4.0f} {:4.0f} {:5.1f}\n'.format(xv3d[x],yv3d[x],f3d[x]))
fr3d.close()
fl3d = open(dirpath+flatfile3d, 'w')
for x in range(len(f3d)):
fl3d.write('{:4.0f} {:4.0f} {:5.1f}\n'.format(xv3d[x],yv3d[x],0.0))
fl3d.close() | []
| []
| []
| [] | [] | python | null | null | null |
cmd/tailscale/cli/up.go | // Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cli
import (
"context"
"errors"
"flag"
"fmt"
"log"
"os"
"reflect"
"runtime"
"sort"
"strings"
"sync"
shellquote "github.com/kballard/go-shellquote"
"github.com/peterbourgon/ff/v3/ffcli"
qrcode "github.com/skip2/go-qrcode"
"inet.af/netaddr"
"tailscale.com/client/tailscale"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/safesocket"
"tailscale.com/tailcfg"
"tailscale.com/types/logger"
"tailscale.com/types/preftype"
"tailscale.com/version/distro"
)
var upCmd = &ffcli.Command{
Name: "up",
ShortUsage: "up [flags]",
ShortHelp: "Connect to Tailscale, logging in if needed",
LongHelp: strings.TrimSpace(`
"tailscale up" connects this machine to your Tailscale network,
triggering authentication if necessary.
With no flags, "tailscale up" brings the network online without
changing any settings. (That is, it's the opposite of "tailscale
down").
If flags are specified, the flags must be the complete set of desired
settings. An error is returned if any setting would be changed as a
result of an unspecified flag's default value, unless the --reset
flag is also used.
`),
FlagSet: upFlagSet,
Exec: runUp,
}
func effectiveGOOS() string {
if v := os.Getenv("TS_DEBUG_UP_FLAG_GOOS"); v != "" {
return v
}
return runtime.GOOS
}
var upFlagSet = newUpFlagSet(effectiveGOOS(), &upArgs)
func newUpFlagSet(goos string, upArgs *upArgsT) *flag.FlagSet {
upf := flag.NewFlagSet("up", flag.ExitOnError)
upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs")
upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication")
upf.BoolVar(&upArgs.reset, "reset", false, "reset unspecified settings to their default values")
upf.StringVar(&upArgs.server, "login-server", ipn.DefaultControlURL, "base URL of control server")
upf.BoolVar(&upArgs.acceptRoutes, "accept-routes", false, "accept routes advertised by other Tailscale nodes")
upf.BoolVar(&upArgs.acceptDNS, "accept-dns", true, "accept DNS configuration from the admin panel")
upf.BoolVar(&upArgs.singleRoutes, "host-routes", true, "install host routes to other Tailscale nodes")
upf.StringVar(&upArgs.exitNodeIP, "exit-node", "", "Tailscale IP of the exit node for internet traffic, or empty string to not use an exit node")
upf.BoolVar(&upArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node")
upf.BoolVar(&upArgs.shieldsUp, "shields-up", false, "don't allow incoming connections")
upf.StringVar(&upArgs.advertiseTags, "advertise-tags", "", "comma-separated ACL tags to request; each must start with \"tag:\" (e.g. \"tag:eng,tag:montreal,tag:ssh\")")
upf.StringVar(&upArgs.authKeyOrFile, "authkey", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`)
upf.StringVar(&upArgs.hostname, "hostname", "", "hostname to use instead of the one provided by the OS")
upf.StringVar(&upArgs.advertiseRoutes, "advertise-routes", "", "routes to advertise to other nodes (comma-separated, e.g. \"10.0.0.0/8,192.168.0.0/24\") or empty string to not advertise routes")
upf.BoolVar(&upArgs.advertiseDefaultRoute, "advertise-exit-node", false, "offer to be an exit node for internet traffic for the tailnet")
if safesocket.GOOSUsesPeerCreds(goos) {
upf.StringVar(&upArgs.opUser, "operator", "", "Unix username to allow to operate on tailscaled without sudo")
}
switch goos {
case "linux":
upf.BoolVar(&upArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes")
upf.StringVar(&upArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)")
case "windows":
upf.BoolVar(&upArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)")
}
return upf
}
func defaultNetfilterMode() string {
if distro.Get() == distro.Synology {
return "off"
}
return "on"
}
type upArgsT struct {
qr bool
reset bool
server string
acceptRoutes bool
acceptDNS bool
singleRoutes bool
exitNodeIP string
exitNodeAllowLANAccess bool
shieldsUp bool
forceReauth bool
forceDaemon bool
advertiseRoutes string
advertiseDefaultRoute bool
advertiseTags string
snat bool
netfilterMode string
authKeyOrFile string // "secret" or "file:/path/to/secret"
hostname string
opUser string
}
func (a upArgsT) getAuthKey() (string, error) {
v := a.authKeyOrFile
if strings.HasPrefix(v, "file:") {
file := strings.TrimPrefix(v, "file:")
b, err := os.ReadFile(file)
if err != nil {
return "", err
}
return strings.TrimSpace(string(b)), nil
}
return v, nil
}
var upArgs upArgsT
func warnf(format string, args ...interface{}) {
fmt.Printf("Warning: "+format+"\n", args...)
}
var (
ipv4default = netaddr.MustParseIPPrefix("0.0.0.0/0")
ipv6default = netaddr.MustParseIPPrefix("::/0")
)
func calcAdvertiseRoutes(advertiseRoutes string, advertiseDefaultRoute bool) ([]netaddr.IPPrefix, error) {
routeMap := map[netaddr.IPPrefix]bool{}
if advertiseRoutes != "" {
var default4, default6 bool
advroutes := strings.Split(advertiseRoutes, ",")
for _, s := range advroutes {
ipp, err := netaddr.ParseIPPrefix(s)
if err != nil {
return nil, fmt.Errorf("%q is not a valid IP address or CIDR prefix", s)
}
if ipp != ipp.Masked() {
return nil, fmt.Errorf("%s has non-address bits set; expected %s", ipp, ipp.Masked())
}
if ipp == ipv4default {
default4 = true
} else if ipp == ipv6default {
default6 = true
}
routeMap[ipp] = true
}
if default4 && !default6 {
return nil, fmt.Errorf("%s advertised without its IPv6 counterpart, please also advertise %s", ipv4default, ipv6default)
} else if default6 && !default4 {
return nil, fmt.Errorf("%s advertised without its IPv6 counterpart, please also advertise %s", ipv6default, ipv4default)
}
}
if advertiseDefaultRoute {
routeMap[netaddr.MustParseIPPrefix("0.0.0.0/0")] = true
routeMap[netaddr.MustParseIPPrefix("::/0")] = true
}
routes := make([]netaddr.IPPrefix, 0, len(routeMap))
for r := range routeMap {
routes = append(routes, r)
}
sort.Slice(routes, func(i, j int) bool {
if routes[i].Bits() != routes[j].Bits() {
return routes[i].Bits() < routes[j].Bits()
}
return routes[i].IP().Less(routes[j].IP())
})
return routes, nil
}
// prefsFromUpArgs returns the ipn.Prefs for the provided args.
//
// Note that the parameters upArgs and warnf are named intentionally
// to shadow the globals to prevent accidental misuse of them. This
// function exists for testing and should have no side effects or
// outside interactions (e.g. no making Tailscale local API calls).
func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goos string) (*ipn.Prefs, error) {
routes, err := calcAdvertiseRoutes(upArgs.advertiseRoutes, upArgs.advertiseDefaultRoute)
if err != nil {
return nil, err
}
var exitNodeIP netaddr.IP
if upArgs.exitNodeIP != "" {
var err error
exitNodeIP, err = netaddr.ParseIP(upArgs.exitNodeIP)
if err != nil {
return nil, fmt.Errorf("invalid IP address %q for --exit-node: %v", upArgs.exitNodeIP, err)
}
} else if upArgs.exitNodeAllowLANAccess {
return nil, fmt.Errorf("--exit-node-allow-lan-access can only be used with --exit-node")
}
if upArgs.exitNodeIP != "" {
for _, ip := range st.TailscaleIPs {
if exitNodeIP == ip {
return nil, fmt.Errorf("cannot use %s as the exit node as it is a local IP address to this machine, did you mean --advertise-exit-node?", upArgs.exitNodeIP)
}
}
}
var tags []string
if upArgs.advertiseTags != "" {
tags = strings.Split(upArgs.advertiseTags, ",")
for _, tag := range tags {
err := tailcfg.CheckTag(tag)
if err != nil {
return nil, fmt.Errorf("tag: %q: %s", tag, err)
}
}
}
if len(upArgs.hostname) > 256 {
return nil, fmt.Errorf("hostname too long: %d bytes (max 256)", len(upArgs.hostname))
}
prefs := ipn.NewPrefs()
prefs.ControlURL = upArgs.server
prefs.WantRunning = true
prefs.RouteAll = upArgs.acceptRoutes
prefs.ExitNodeIP = exitNodeIP
prefs.ExitNodeAllowLANAccess = upArgs.exitNodeAllowLANAccess
prefs.CorpDNS = upArgs.acceptDNS
prefs.AllowSingleHosts = upArgs.singleRoutes
prefs.ShieldsUp = upArgs.shieldsUp
prefs.AdvertiseRoutes = routes
prefs.AdvertiseTags = tags
prefs.Hostname = upArgs.hostname
prefs.ForceDaemon = upArgs.forceDaemon
prefs.OperatorUser = upArgs.opUser
if goos == "linux" {
prefs.NoSNAT = !upArgs.snat
switch upArgs.netfilterMode {
case "on":
prefs.NetfilterMode = preftype.NetfilterOn
case "nodivert":
prefs.NetfilterMode = preftype.NetfilterNoDivert
warnf("netfilter=nodivert; add iptables calls to ts-* chains manually.")
case "off":
prefs.NetfilterMode = preftype.NetfilterOff
if defaultNetfilterMode() != "off" {
warnf("netfilter=off; configure iptables yourself.")
}
default:
return nil, fmt.Errorf("invalid value --netfilter-mode=%q", upArgs.netfilterMode)
}
}
return prefs, nil
}
// updatePrefs updates prefs based on curPrefs
//
// It returns a non-nil justEditMP if we're already running and none of
// the flags require a restart, so we can just do an EditPrefs call and
// change the prefs at runtime (e.g. changing hostname, changing
// advertised routes, etc).
//
// It returns simpleUp if we're running a simple "tailscale up" to
// transition to running from a previously-logged-in but down state,
// without changing any settings.
func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, justEditMP *ipn.MaskedPrefs, err error) {
if !env.upArgs.reset {
applyImplicitPrefs(prefs, curPrefs, env.user)
if err := checkForAccidentalSettingReverts(prefs, curPrefs, env); err != nil {
return false, nil, err
}
}
controlURLChanged := curPrefs.ControlURL != prefs.ControlURL &&
!(ipn.IsLoginServerSynonym(curPrefs.ControlURL) && ipn.IsLoginServerSynonym(prefs.ControlURL))
if controlURLChanged && env.backendState == ipn.Running.String() && !env.upArgs.forceReauth {
return false, nil, fmt.Errorf("can't change --login-server without --force-reauth")
}
tagsChanged := !reflect.DeepEqual(curPrefs.AdvertiseTags, prefs.AdvertiseTags)
simpleUp = env.flagSet.NFlag() == 0 &&
curPrefs.Persist != nil &&
curPrefs.Persist.LoginName != "" &&
env.backendState != ipn.NeedsLogin.String()
justEdit := env.backendState == ipn.Running.String() &&
!env.upArgs.forceReauth &&
!env.upArgs.reset &&
env.upArgs.authKeyOrFile == "" &&
!controlURLChanged &&
!tagsChanged
if justEdit {
justEditMP = new(ipn.MaskedPrefs)
justEditMP.WantRunningSet = true
justEditMP.Prefs = *prefs
env.flagSet.Visit(func(f *flag.Flag) {
updateMaskedPrefsFromUpFlag(justEditMP, f.Name)
})
}
return simpleUp, justEditMP, nil
}
func runUp(ctx context.Context, args []string) error {
if len(args) > 0 {
fatalf("too many non-flag arguments: %q", args)
}
st, err := tailscale.Status(ctx)
if err != nil {
return fixTailscaledConnectError(err)
}
origAuthURL := st.AuthURL
// printAuthURL reports whether we should print out the
// provided auth URL from an IPN notify.
printAuthURL := func(url string) bool {
if upArgs.authKeyOrFile != "" {
// Issue 1755: when using an authkey, don't
// show an authURL that might still be pending
// from a previous non-completed interactive
// login.
return false
}
if upArgs.forceReauth && url == origAuthURL {
return false
}
return true
}
if distro.Get() == distro.Synology {
notSupported := "not supported on Synology; see https://github.com/tailscale/tailscale/issues/1995"
if upArgs.acceptRoutes {
return errors.New("--accept-routes is " + notSupported)
}
if upArgs.exitNodeIP != "" {
return errors.New("--exit-node is " + notSupported)
}
if upArgs.netfilterMode != "off" {
return errors.New("--netfilter-mode values besides \"off\" " + notSupported)
}
}
prefs, err := prefsFromUpArgs(upArgs, warnf, st, effectiveGOOS())
if err != nil {
fatalf("%s", err)
}
if len(prefs.AdvertiseRoutes) > 0 {
if err := tailscale.CheckIPForwarding(context.Background()); err != nil {
warnf("%v", err)
}
}
curPrefs, err := tailscale.GetPrefs(ctx)
if err != nil {
return err
}
env := upCheckEnv{
goos: effectiveGOOS(),
user: os.Getenv("USER"),
flagSet: upFlagSet,
upArgs: upArgs,
backendState: st.BackendState,
curExitNodeIP: exitNodeIP(prefs, st),
}
simpleUp, justEditMP, err := updatePrefs(prefs, curPrefs, env)
if err != nil {
fatalf("%s", err)
}
if justEditMP != nil {
_, err := tailscale.EditPrefs(ctx, justEditMP)
return err
}
// At this point we need to subscribe to the IPN bus to watch
// for state transitions and possible need to authenticate.
c, bc, pumpCtx, cancel := connect(ctx)
defer cancel()
running := make(chan bool, 1) // gets value once in state ipn.Running
gotEngineUpdate := make(chan bool, 1) // gets value upon an engine update
pumpErr := make(chan error, 1)
go func() { pumpErr <- pump(pumpCtx, bc, c) }()
printed := !simpleUp
var loginOnce sync.Once
startLoginInteractive := func() { loginOnce.Do(func() { bc.StartLoginInteractive() }) }
bc.SetNotifyCallback(func(n ipn.Notify) {
if n.Engine != nil {
select {
case gotEngineUpdate <- true:
default:
}
}
if n.ErrMessage != nil {
msg := *n.ErrMessage
if msg == ipn.ErrMsgPermissionDenied {
switch effectiveGOOS() {
case "windows":
msg += " (Tailscale service in use by other user?)"
default:
msg += " (try 'sudo tailscale up [...]')"
}
}
fatalf("backend error: %v\n", msg)
}
if s := n.State; s != nil {
switch *s {
case ipn.NeedsLogin:
printed = true
startLoginInteractive()
case ipn.NeedsMachineAuth:
printed = true
fmt.Fprintf(os.Stderr, "\nTo authorize your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL())
case ipn.Running:
// Done full authentication process
if printed {
// Only need to print an update if we printed the "please click" message earlier.
fmt.Fprintf(os.Stderr, "Success.\n")
}
select {
case running <- true:
default:
}
cancel()
}
}
if url := n.BrowseToURL; url != nil && printAuthURL(*url) {
printed = true
fmt.Fprintf(os.Stderr, "\nTo authenticate, visit:\n\n\t%s\n\n", *url)
if upArgs.qr {
q, err := qrcode.New(*url, qrcode.Medium)
if err != nil {
log.Printf("QR code error: %v", err)
} else {
fmt.Fprintf(os.Stderr, "%s\n", q.ToString(false))
}
}
}
})
// Wait for backend client to be connected so we know
// we're subscribed to updates. Otherwise we can miss
// an update upon its transition to running. Do so by causing some traffic
// back to the bus that we then wait on.
bc.RequestEngineStatus()
select {
case <-gotEngineUpdate:
case <-pumpCtx.Done():
return pumpCtx.Err()
case err := <-pumpErr:
return err
}
// Special case: bare "tailscale up" means to just start
// running, if there's ever been a login.
if simpleUp {
_, err := tailscale.EditPrefs(ctx, &ipn.MaskedPrefs{
Prefs: ipn.Prefs{
WantRunning: true,
},
WantRunningSet: true,
})
if err != nil {
return err
}
} else {
authKey, err := upArgs.getAuthKey()
if err != nil {
return err
}
opts := ipn.Options{
StateKey: ipn.GlobalDaemonStateKey,
AuthKey: authKey,
UpdatePrefs: prefs,
}
// On Windows, we still run in mostly the "legacy" way that
// predated the server's StateStore. That is, we send an empty
// StateKey and send the prefs directly. Although the Windows
// supports server mode, though, the transition to StateStore
// is only half complete. Only server mode uses it, and the
// Windows service (~tailscaled) is the one that computes the
// StateKey based on the connection identity. So for now, just
// do as the Windows GUI's always done:
if effectiveGOOS() == "windows" {
// The Windows service will set this as needed based
// on our connection's identity.
opts.StateKey = ""
opts.Prefs = prefs
}
bc.Start(opts)
if upArgs.forceReauth {
startLoginInteractive()
}
}
// This whole 'up' mechanism is too complicated and results in
// hairy stuff like this select. We're ultimately waiting for
// 'running' to be done, but even in the case where
// it succeeds, other parts may shut down concurrently so we
// need to prioritize reads from 'running' if it's
// readable; its send does happen before the pump mechanism
// shuts down. (Issue 2333)
select {
case <-running:
return nil
case <-pumpCtx.Done():
select {
case <-running:
return nil
default:
}
return pumpCtx.Err()
case err := <-pumpErr:
select {
case <-running:
return nil
default:
}
return err
}
}
var (
prefsOfFlag = map[string][]string{} // "exit-node" => ExitNodeIP, ExitNodeID
)
func init() {
// Both these have the same ipn.Pref:
addPrefFlagMapping("advertise-exit-node", "AdvertiseRoutes")
addPrefFlagMapping("advertise-routes", "AdvertiseRoutes")
// And this flag has two ipn.Prefs:
addPrefFlagMapping("exit-node", "ExitNodeIP", "ExitNodeID")
// The rest are 1:1:
addPrefFlagMapping("accept-dns", "CorpDNS")
addPrefFlagMapping("accept-routes", "RouteAll")
addPrefFlagMapping("advertise-tags", "AdvertiseTags")
addPrefFlagMapping("host-routes", "AllowSingleHosts")
addPrefFlagMapping("hostname", "Hostname")
addPrefFlagMapping("login-server", "ControlURL")
addPrefFlagMapping("netfilter-mode", "NetfilterMode")
addPrefFlagMapping("shields-up", "ShieldsUp")
addPrefFlagMapping("snat-subnet-routes", "NoSNAT")
addPrefFlagMapping("exit-node-allow-lan-access", "ExitNodeAllowLANAccess")
addPrefFlagMapping("unattended", "ForceDaemon")
addPrefFlagMapping("operator", "OperatorUser")
}
func addPrefFlagMapping(flagName string, prefNames ...string) {
prefsOfFlag[flagName] = prefNames
prefType := reflect.TypeOf(ipn.Prefs{})
for _, pref := range prefNames {
// Crash at runtime if there's a typo in the prefName.
if _, ok := prefType.FieldByName(pref); !ok {
panic(fmt.Sprintf("invalid ipn.Prefs field %q", pref))
}
}
}
// preflessFlag reports whether flagName is a flag that doesn't
// correspond to an ipn.Pref.
func preflessFlag(flagName string) bool {
switch flagName {
case "authkey", "force-reauth", "reset", "qr":
return true
}
return false
}
func updateMaskedPrefsFromUpFlag(mp *ipn.MaskedPrefs, flagName string) {
if preflessFlag(flagName) {
return
}
if prefs, ok := prefsOfFlag[flagName]; ok {
for _, pref := range prefs {
reflect.ValueOf(mp).Elem().FieldByName(pref + "Set").SetBool(true)
}
return
}
panic(fmt.Sprintf("internal error: unhandled flag %q", flagName))
}
const accidentalUpPrefix = "Error: changing settings via 'tailscale up' requires mentioning all\n" +
"non-default flags. To proceed, either re-run your command with --reset or\n" +
"use the command below to explicitly mention the current value of\n" +
"all non-default settings:\n\n" +
"\ttailscale up"
// upCheckEnv are extra parameters describing the environment as
// needed by checkForAccidentalSettingReverts and friends.
type upCheckEnv struct {
goos string
user string
flagSet *flag.FlagSet
upArgs upArgsT
backendState string
curExitNodeIP netaddr.IP
}
// checkForAccidentalSettingReverts (the "up checker") checks for
// people running "tailscale up" with a subset of the flags they
// originally ran it with.
//
// For example, in Tailscale 1.6 and prior, a user might've advertised
// a tag, but later tried to change just one other setting and forgot
// to mention the tag later and silently wiped it out. We now
// require --reset to change preferences to flag default values when
// the flag is not mentioned on the command line.
//
// curPrefs is what's currently active on the server.
//
// mp is the mask of settings actually set, where mp.Prefs is the new
// preferences to set, including any values set from implicit flags.
func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheckEnv) error {
if curPrefs.ControlURL == "" {
// Don't validate things on initial "up" before a control URL has been set.
return nil
}
flagIsSet := map[string]bool{}
env.flagSet.Visit(func(f *flag.Flag) {
flagIsSet[f.Name] = true
})
if len(flagIsSet) == 0 {
// A bare "tailscale up" is a special case to just
// mean bringing the network up without any changes.
return nil
}
// flagsCur is what flags we'd need to use to keep the exact
// settings as-is.
flagsCur := prefsToFlags(env, curPrefs)
flagsNew := prefsToFlags(env, newPrefs)
var missing []string
for flagName := range flagsCur {
valCur, valNew := flagsCur[flagName], flagsNew[flagName]
if flagIsSet[flagName] {
continue
}
if reflect.DeepEqual(valCur, valNew) {
continue
}
if flagName == "login-server" && ipn.IsLoginServerSynonym(valCur) && ipn.IsLoginServerSynonym(valNew) {
continue
}
missing = append(missing, fmtFlagValueArg(flagName, valCur))
}
if len(missing) == 0 {
return nil
}
sort.Strings(missing)
// Compute the stringification of the explicitly provided args in flagSet
// to prepend to the command to run.
var explicit []string
env.flagSet.Visit(func(f *flag.Flag) {
type isBool interface {
IsBoolFlag() bool
}
if ib, ok := f.Value.(isBool); ok && ib.IsBoolFlag() {
if f.Value.String() == "false" {
explicit = append(explicit, "--"+f.Name+"=false")
} else {
explicit = append(explicit, "--"+f.Name)
}
} else {
explicit = append(explicit, fmtFlagValueArg(f.Name, f.Value.String()))
}
})
var sb strings.Builder
sb.WriteString(accidentalUpPrefix)
for _, a := range append(explicit, missing...) {
fmt.Fprintf(&sb, " %s", a)
}
sb.WriteString("\n\n")
return errors.New(sb.String())
}
// applyImplicitPrefs mutates prefs to add implicit preferences. Currently
// this is just the operator user, which only needs to be set if it doesn't
// match the current user.
//
// curUser is os.Getenv("USER"). It's pulled out for testability.
func applyImplicitPrefs(prefs, oldPrefs *ipn.Prefs, curUser string) {
if prefs.OperatorUser == "" && oldPrefs.OperatorUser == curUser {
prefs.OperatorUser = oldPrefs.OperatorUser
}
}
func flagAppliesToOS(flag, goos string) bool {
switch flag {
case "netfilter-mode", "snat-subnet-routes":
return goos == "linux"
case "unattended":
return goos == "windows"
}
return true
}
func prefsToFlags(env upCheckEnv, prefs *ipn.Prefs) (flagVal map[string]interface{}) {
ret := make(map[string]interface{})
exitNodeIPStr := func() string {
if !prefs.ExitNodeIP.IsZero() {
return prefs.ExitNodeIP.String()
}
if prefs.ExitNodeID.IsZero() || env.curExitNodeIP.IsZero() {
return ""
}
return env.curExitNodeIP.String()
}
fs := newUpFlagSet(env.goos, new(upArgsT) /* dummy */)
fs.VisitAll(func(f *flag.Flag) {
if preflessFlag(f.Name) {
return
}
set := func(v interface{}) {
if flagAppliesToOS(f.Name, env.goos) {
ret[f.Name] = v
} else {
ret[f.Name] = nil
}
}
switch f.Name {
default:
panic(fmt.Sprintf("unhandled flag %q", f.Name))
case "login-server":
set(prefs.ControlURL)
case "accept-routes":
set(prefs.RouteAll)
case "host-routes":
set(prefs.AllowSingleHosts)
case "accept-dns":
set(prefs.CorpDNS)
case "shields-up":
set(prefs.ShieldsUp)
case "exit-node":
set(exitNodeIPStr())
case "exit-node-allow-lan-access":
set(prefs.ExitNodeAllowLANAccess)
case "advertise-tags":
set(strings.Join(prefs.AdvertiseTags, ","))
case "hostname":
set(prefs.Hostname)
case "operator":
set(prefs.OperatorUser)
case "advertise-routes":
var sb strings.Builder
for i, r := range withoutExitNodes(prefs.AdvertiseRoutes) {
if i > 0 {
sb.WriteByte(',')
}
sb.WriteString(r.String())
}
set(sb.String())
case "advertise-exit-node":
set(hasExitNodeRoutes(prefs.AdvertiseRoutes))
case "snat-subnet-routes":
set(!prefs.NoSNAT)
case "netfilter-mode":
set(prefs.NetfilterMode.String())
case "unattended":
set(prefs.ForceDaemon)
}
})
return ret
}
func fmtFlagValueArg(flagName string, val interface{}) string {
if val == true {
return "--" + flagName
}
if val == "" {
return "--" + flagName + "="
}
return fmt.Sprintf("--%s=%v", flagName, shellquote.Join(fmt.Sprint(val)))
}
func hasExitNodeRoutes(rr []netaddr.IPPrefix) bool {
var v4, v6 bool
for _, r := range rr {
if r.Bits() == 0 {
if r.IP().Is4() {
v4 = true
} else if r.IP().Is6() {
v6 = true
}
}
}
return v4 && v6
}
// withoutExitNodes returns rr unchanged if it has only 1 or 0 /0
// routes. If it has both IPv4 and IPv6 /0 routes, then it returns
// a copy with all /0 routes removed.
func withoutExitNodes(rr []netaddr.IPPrefix) []netaddr.IPPrefix {
if !hasExitNodeRoutes(rr) {
return rr
}
var out []netaddr.IPPrefix
for _, r := range rr {
if r.Bits() > 0 {
out = append(out, r)
}
}
return out
}
// exitNodeIP returns the exit node IP from p, using st to map
// it from its ID form to an IP address if needed.
func exitNodeIP(p *ipn.Prefs, st *ipnstate.Status) (ip netaddr.IP) {
if p == nil {
return
}
if !p.ExitNodeIP.IsZero() {
return p.ExitNodeIP
}
id := p.ExitNodeID
if id.IsZero() {
return
}
for _, p := range st.Peer {
if p.ID == id {
if len(p.TailscaleIPs) > 0 {
return p.TailscaleIPs[0]
}
break
}
}
return
}
| [
"\"TS_DEBUG_UP_FLAG_GOOS\"",
"\"USER\"",
"\"USER\""
]
| []
| [
"USER",
"TS_DEBUG_UP_FLAG_GOOS"
]
| [] | ["USER", "TS_DEBUG_UP_FLAG_GOOS"] | go | 2 | 0 | |
movieverse/public_datasets.py | # ------ Python standard library imports ---------------------------------------
from typing import Optional
import os
# ------ External imports ------------------------------------------------------
# ------ Imports from own package or module ------------------------------------
from movieverse.movieverse import Movieverse
from movieverse.metadatalib import MetaDataLib
#-------------------------------------------------------------------------------
def _dataset_directory():
fallback = os.path.join(os.path.expanduser("~"), ".movieverse_data")
dir_ = os.environ.get("MOVIEVERSE_DATASET_DIR", fallback)
if not os.path.exists(dir_):
os.makedirs(dir_)
return dir_
def load_movielens(dataset: str = '100k',
movieverse_name: Optional[str] = None,
directory: str = '') -> Movieverse:
path = directory or _dataset_directory()
if dataset == '100k':
url = 'http://files.grouplens.org/datasets/movielens/ml-100k.zip'
| []
| []
| [
"MOVIEVERSE_DATASET_DIR"
]
| [] | ["MOVIEVERSE_DATASET_DIR"] | python | 1 | 0 | |
internal/web/routes.go | package web
import (
"net/http"
"github.com/justinas/alice"
)
func (app *App) routes() http.Handler {
baseMiddleware := alice.New(app.recoverPanic)
webMiddleware := alice.New(
app.sessionManager.LoadAndSave,
app.remember,
app.authenticate,
app.flashMessage,
app.inertiaManager.Middleware,
)
mux := http.NewServeMux()
mux.Handle("/", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.commandIndex))
mux.Handle("/command/create", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.commandCreate))
mux.Handle("/command/edit", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.commandEdit))
mux.Handle("/command/refresh-token", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.commandRefreshToken))
mux.Handle("/command/delete", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.commandDelete))
mux.Handle("/call", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.callIndex))
mux.Handle("/call/history", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.callHistory))
mux.Handle("/call/delete", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.callDelete))
mux.Handle("/user/create", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.userCreate))
mux.Handle("/user/edit", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.userEdit))
mux.Handle("/user/delete", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.userDelete))
mux.Handle("/login", webMiddleware.Append(app.redirectIfAuthenticated).ThenFunc(app.login))
mux.Handle("/logout", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.logout))
mux.Handle("/user", webMiddleware.Append(app.redirectIfNotAuthenticated).ThenFunc(app.userIndex))
fileServer := http.FileServer(http.Dir("./public/"))
mux.Handle("/css/", fileServer)
mux.Handle("/images/", fileServer)
mux.Handle("/js/", fileServer)
mux.Handle("/favicon.ico", fileServer)
return baseMiddleware.Then(mux)
}
| []
| []
| []
| [] | [] | go | null | null | null |
function.go | package p
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"time"
"cloud.google.com/go/bigquery"
joonix "github.com/joonix/log"
"github.com/sirupsen/logrus"
)
type PubSubMessage struct {
Data []byte `json:"data"`
}
type HomeAssistantMessage struct {
State string `json:"state"`
LastChanged string `json:"last_changed"`
EntityId string `json:"entity_id"`
}
type HomeAssistantBigquery struct {
State float64 `bigquery:"state"`
LastChanged time.Time `bigquery:"occured_at"`
EntityId string `bigquery:"entity_id"`
}
// Main consumes a Pub/Sub message.
func Main(ctx context.Context, m PubSubMessage) error {
logrus.SetFormatter(joonix.NewFormatter())
logrus.Infof(string(m.Data))
var homeAssistantMessage HomeAssistantMessage
err := json.Unmarshal(m.Data, &homeAssistantMessage)
if err != nil {
return fmt.Errorf("error unmarshalling message : %w", err)
}
temperature, err := strconv.ParseFloat(homeAssistantMessage.State, 32)
if err != nil {
return fmt.Errorf("state is not a float : %w", err)
}
occuredAt, err := time.Parse(time.RFC3339Nano, homeAssistantMessage.LastChanged)
if err != nil {
return fmt.Errorf("LastChanged is not RFC3339Nano : %w", err)
}
projectId := os.Getenv("GCP_PROJECTID")
if projectId == "" {
return fmt.Errorf("environment variable GCP_PROJECTID is mandatory")
}
client, err := bigquery.NewClient(ctx, projectId)
if err != nil {
return fmt.Errorf("bigquery.NewClient: %v", err)
}
defer func(client *bigquery.Client) {
_ = client.Close()
}(client)
dataset := os.Getenv("BIGQUERY_DATASET")
if dataset == "" {
return fmt.Errorf("environment variable BIGQUERY_DATASET is mandatory")
}
table := os.Getenv("BIGQUERY_TABLE")
if table == "" {
return fmt.Errorf("environment variable BIGQUERY_TABLE is mandatory")
}
inserter := client.Dataset(dataset).Table(table).Inserter()
items := []*HomeAssistantBigquery{
{EntityId: homeAssistantMessage.EntityId, State: temperature, LastChanged: occuredAt},
}
if err := inserter.Put(ctx, items); err != nil {
return fmt.Errorf("error when inserting into BQ : %w", err)
}
return nil
}
| [
"\"GCP_PROJECTID\"",
"\"BIGQUERY_DATASET\"",
"\"BIGQUERY_TABLE\""
]
| []
| [
"BIGQUERY_TABLE",
"BIGQUERY_DATASET",
"GCP_PROJECTID"
]
| [] | ["BIGQUERY_TABLE", "BIGQUERY_DATASET", "GCP_PROJECTID"] | go | 3 | 0 | |
build/params_debug.go | // +build debug
package build
import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"os"
)
const BootstrappersFile = "fileppnet.pi"
const GenesisFile = "fileppnet.car"
const UpgradeBreezeHeight = -1
const BreezeGasTampingDuration = 0
const UpgradeSmokeHeight = -1
const UpgradeIgnitionHeight = -2
const UpgradeRefuelHeight = -3
const UpgradeTapeHeight = -4
const UpgradeActorsV2Height = 10
const UpgradeLiftoffHeight = -5
const UpgradeKumquatHeight = 15
const UpgradeCalicoHeight = 20
const UpgradePersianHeight = 25
const UpgradeOrangeHeight = 27
const UpgradeClausHeight = 30
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
}
func init() {
BuildType |= BuildDebug
policy.SetSupportedProofTypes(
abi.RegisteredSealProof_StackedDrg2KiBV1,
abi.RegisteredSealProof_StackedDrg8MiBV1,
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
)
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
miner.PreCommitChallengeDelay = abi.ChainEpoch(60)
miner2.PreCommitChallengeDelay = abi.ChainEpoch(60)
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
SetAddressNetwork(address.Mainnet)
}
Devnet = false
}
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
const BootstrapPeerThreshold = 1 | [
"\"LOTUS_USE_TEST_ADDRESSES\""
]
| []
| [
"LOTUS_USE_TEST_ADDRESSES"
]
| [] | ["LOTUS_USE_TEST_ADDRESSES"] | go | 1 | 0 | |
vendor/src/github.com/docker/docker/integration-cli/docker_cli_build_test.go | package main
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"text/template"
"time"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/integration/checker"
icmd "github.com/docker/docker/pkg/integration/cmd"
"github.com/docker/docker/pkg/stringutils"
"github.com/go-check/check"
)
func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) {
name := "testbuildjsonemptyrun"
_, err := buildImage(
name,
`
FROM busybox
RUN []
`,
true)
if err != nil {
c.Fatal("error when dealing with a RUN statement with empty JSON array")
}
}
func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) {
name := "testbuildshcmdjsonentrypoint"
_, err := buildImage(
name,
`
FROM busybox
ENTRYPOINT ["echo"]
CMD echo test
`,
true)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--rm", name)
if daemonPlatform == "windows" {
if !strings.Contains(out, "cmd /S /C echo test") {
c.Fatalf("CMD did not contain cmd /S /C echo test : %q", out)
}
} else {
if strings.TrimSpace(out) != "/bin/sh -c echo test" {
c.Fatalf("CMD did not contain /bin/sh -c : %q", out)
}
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) {
// Windows does not support FROM scratch or the USER command
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
_, err := buildImage(name, `
FROM scratch
ENV user foo
USER ${user}
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.User")
if res != `"foo"` {
c.Fatal("User foo from environment not in Config.User on image")
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) {
name := "testbuildenvironmentreplacement"
var volumePath string
if daemonPlatform == "windows" {
volumePath = "c:/quux"
} else {
volumePath = "/quux"
}
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
ENV volume `+volumePath+`
VOLUME ${volume}
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Volumes")
var volumes map[string]interface{}
if err := json.Unmarshal([]byte(res), &volumes); err != nil {
c.Fatal(err)
}
if _, ok := volumes[volumePath]; !ok {
c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image")
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) {
// Windows does not support FROM scratch or the EXPOSE command
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
_, err := buildImage(name, `
FROM scratch
ENV port 80
EXPOSE ${port}
ENV ports " 99 100 "
EXPOSE ${ports}
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.ExposedPorts")
var exposedPorts map[string]interface{}
if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil {
c.Fatal(err)
}
exp := []int{80, 99, 100}
for _, p := range exp {
tmp := fmt.Sprintf("%d/tcp", p)
if _, ok := exposedPorts[tmp]; !ok {
c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p)
}
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) {
name := "testbuildenvironmentreplacement"
_, err := buildImage(name, `
FROM busybox
ENV MYWORKDIR /work
RUN mkdir ${MYWORKDIR}
WORKDIR ${MYWORKDIR}
`, true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) {
name := "testbuildenvironmentreplacement"
ctx, err := fakeContext(`
FROM `+minimalBaseImage()+`
ENV baz foo
ENV quux bar
ENV dot .
ENV fee fff
ENV gee ggg
ADD ${baz} ${dot}
COPY ${quux} ${dot}
ADD ${zzz:-${fee}} ${dot}
COPY ${zzz:-${gee}} ${dot}
`,
map[string]string{
"foo": "test1",
"bar": "test2",
"fff": "test3",
"ggg": "test4",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
_, err := buildImage(name,
`
FROM busybox
ENV foo zzz
ENV bar ${foo}
ENV abc1='$foo'
ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}"
RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo)
ENV abc2="\$foo"
RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo)
ENV abc3 '$foo'
RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo)
ENV abc4 "\$foo"
RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo)
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Env")
envResult := []string{}
if err = json.Unmarshal([]byte(res), &envResult); err != nil {
c.Fatal(err)
}
found := false
envCount := 0
for _, env := range envResult {
parts := strings.SplitN(env, "=", 2)
if parts[0] == "bar" {
found = true
if parts[1] != "zzz" {
c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "zzz" {
c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "foo" {
c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
}
}
}
if !found {
c.Fatal("Never found the `bar` env variable")
}
if envCount != 4 {
c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult)
}
}
func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) {
// The volume paths used in this test are invalid on Windows
testRequires(c, DaemonIsLinux)
name := "testbuildhandleescapes"
_, err := buildImage(name,
`
FROM scratch
ENV FOO bar
VOLUME ${FOO}
`, true)
if err != nil {
c.Fatal(err)
}
var result map[string]map[string]struct{}
res := inspectFieldJSON(c, name, "Config.Volumes")
if err = json.Unmarshal([]byte(res), &result); err != nil {
c.Fatal(err)
}
if _, ok := result["bar"]; !ok {
c.Fatalf("Could not find volume bar set from env foo in volumes table, got %q", result)
}
deleteImages(name)
_, err = buildImage(name,
`
FROM scratch
ENV FOO bar
VOLUME \${FOO}
`, true)
if err != nil {
c.Fatal(err)
}
res = inspectFieldJSON(c, name, "Config.Volumes")
if err = json.Unmarshal([]byte(res), &result); err != nil {
c.Fatal(err)
}
if _, ok := result["${FOO}"]; !ok {
c.Fatalf("Could not find volume ${FOO} set from env foo in volumes table, got %q", result)
}
deleteImages(name)
// this test in particular provides *7* backslashes and expects 6 to come back.
// Like above, the first escape is swallowed and the rest are treated as
// literals, this one is just less obvious because of all the character noise.
_, err = buildImage(name,
`
FROM scratch
ENV FOO bar
VOLUME \\\\\\\${FOO}
`, true)
if err != nil {
c.Fatal(err)
}
res = inspectFieldJSON(c, name, "Config.Volumes")
if err = json.Unmarshal([]byte(res), &result); err != nil {
c.Fatal(err)
}
if _, ok := result[`\\\${FOO}`]; !ok {
c.Fatalf(`Could not find volume \\\${FOO} set from env foo in volumes table, got %q`, result)
}
}
func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) {
name := "testbuildonbuildlowercase"
name2 := "testbuildonbuildlowercase2"
_, err := buildImage(name,
`
FROM busybox
onbuild run echo quux
`, true)
if err != nil {
c.Fatal(err)
}
_, out, err := buildImageWithOut(name2, fmt.Sprintf(`
FROM %s
`, name), true)
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "quux") {
c.Fatalf("Did not receive the expected echo text, got %s", out)
}
if strings.Contains(out, "ONBUILD ONBUILD") {
c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out)
}
}
func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvescapes"
_, err := buildImage(name,
`
FROM busybox
ENV TEST foo
CMD echo \$
`,
true)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "-t", name)
if strings.TrimSpace(out) != "$" {
c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
}
}
func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvoverwrite"
_, err := buildImage(name,
`
FROM busybox
ENV TEST foo
CMD echo ${TEST}
`,
true)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name)
if strings.TrimSpace(out) != "bar" {
c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
}
}
func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) {
name1 := "onbuildcmd"
name2 := "onbuildgenerated"
_, err := buildImage(name1, `
FROM busybox
ONBUILD CMD ["hello world"]
ONBUILD ENTRYPOINT ["echo"]
ONBUILD RUN ["true"]`,
false)
if err != nil {
c.Fatal(err)
}
_, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", name2)
if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) {
c.Fatalf("did not get echo output from onbuild. Got: %q", out)
}
}
func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) {
name1 := "onbuildcmd"
name2 := "onbuildgenerated"
_, err := buildImage(name1, `
FROM busybox
ONBUILD ENTRYPOINT ["echo"]`,
false)
if err != nil {
c.Fatal(err)
}
_, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", name2)
if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) {
c.Fatal("got malformed output from onbuild", out)
}
}
func (s *DockerSuite) TestBuildCacheAdd(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildtwoimageswithadd"
server, err := fakeStorage(map[string]string{
"robots.txt": "hello",
"index.html": "world",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
if _, err := buildImage(name,
fmt.Sprintf(`FROM scratch
ADD %s/robots.txt /`, server.URL()),
true); err != nil {
c.Fatal(err)
}
if err != nil {
c.Fatal(err)
}
deleteImages(name)
_, out, err := buildImageWithOut(name,
fmt.Sprintf(`FROM scratch
ADD %s/index.html /`, server.URL()),
true)
if err != nil {
c.Fatal(err)
}
if strings.Contains(out, "Using cache") {
c.Fatal("2nd build used cache on ADD, it shouldn't")
}
}
func (s *DockerSuite) TestBuildLastModified(c *check.C) {
name := "testbuildlastmodified"
server, err := fakeStorage(map[string]string{
"file": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
var out, out2 string
dFmt := `FROM busybox
ADD %s/file /`
dockerfile := fmt.Sprintf(dFmt, server.URL())
if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil {
c.Fatal(err)
}
out, _ = dockerCmd(c, "run", name, "ls", "-le", "/file")
// Build it again and make sure the mtime of the file didn't change.
// Wait a few seconds to make sure the time changed enough to notice
time.Sleep(2 * time.Second)
if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil {
c.Fatal(err)
}
out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file")
if out != out2 {
c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2)
}
// Now 'touch' the file and make sure the timestamp DID change this time
// Create a new fakeStorage instead of just using Add() to help windows
server, err = fakeStorage(map[string]string{
"file": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
dockerfile = fmt.Sprintf(dFmt, server.URL())
if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil {
c.Fatal(err)
}
out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file")
if out == out2 {
c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2)
}
}
// Regression for https://github.com/docker/docker/pull/27805
// Makes sure that we don't use the cache if the contents of
// a file in a subfolder of the context is modified and we re-build.
func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) {
name := "testbuildmodifyfileinfolder"
ctx, err := fakeContext(`FROM busybox
RUN ["mkdir", "/test"]
ADD folder/file /test/changetarget`,
map[string]string{})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if err := ctx.Add("folder/file", "first"); err != nil {
c.Fatal(err)
}
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
if err := ctx.Add("folder/file", "second"); err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("cache was used even though file contents in folder was changed")
}
}
func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddimg"
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
ADD test_file /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod),
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// Issue #3960: "ADD src ." hangs
func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) {
name := "testaddsinglefiletoworkdir"
ctx, err := fakeContext(`FROM busybox
ADD test_file .`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
errChan := make(chan error)
go func() {
_, err := buildImageFromContext(name, ctx, true)
errChan <- err
close(errChan)
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("Build with adding to workdir timed out")
case err := <-errChan:
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddsinglefiletoexistdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
ADD test_file /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
server, err := fakeStorage(map[string]string{
"robots.txt": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
name := "testcopymultiplefilestofile"
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_file1 test_file2 /exists/
ADD test_file3 test_file4 %s/robots.txt /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
`, server.URL()),
map[string]string{
"test_file1": "test1",
"test_file2": "test2",
"test_file3": "test3",
"test_file4": "test4",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// This test is mainly for user namespaces to verify that new directories
// are created as the remapped root uid/gid pair
func (s *DockerSuite) TestBuildAddToNewDestination(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddtonewdest"
ctx, err := fakeContext(`FROM busybox
ADD . /new_dir
RUN ls -l /
RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`,
map[string]string{
"test_dir/test_file": "test file",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// This test is mainly for user namespaces to verify that new directories
// are created as the remapped root uid/gid pair
func (s *DockerSuite) TestBuildCopyToNewParentDirectory(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopytonewdir"
ctx, err := fakeContext(`FROM busybox
COPY test_dir /new_dir
RUN ls -l /new_dir
RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`,
map[string]string{
"test_dir/test_file": "test file",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// This test is mainly for user namespaces to verify that new directories
// are created as the remapped root uid/gid pair
func (s *DockerSuite) TestBuildWorkdirIsContainerRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testworkdirownership"
if _, err := buildImage(name, `FROM busybox
WORKDIR /new_dir
RUN ls -l /
RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently passing on Windows
name := "testaddfilewithwhitespace"
ctx, err := fakeContext(`FROM busybox
RUN mkdir "/test dir"
RUN mkdir "/test_dir"
ADD [ "test file1", "/test_file1" ]
ADD [ "test_file2", "/test file2" ]
ADD [ "test file3", "/test file3" ]
ADD [ "test dir/test_file4", "/test_dir/test_file4" ]
ADD [ "test_dir/test_file5", "/test dir/test_file5" ]
ADD [ "test dir/test_file6", "/test dir/test_file6" ]
RUN [ $(cat "/test_file1") = 'test1' ]
RUN [ $(cat "/test file2") = 'test2' ]
RUN [ $(cat "/test file3") = 'test3' ]
RUN [ $(cat "/test_dir/test_file4") = 'test4' ]
RUN [ $(cat "/test dir/test_file5") = 'test5' ]
RUN [ $(cat "/test dir/test_file6") = 'test6' ]`,
map[string]string{
"test file1": "test1",
"test_file2": "test2",
"test file3": "test3",
"test dir/test_file4": "test4",
"test_dir/test_file5": "test5",
"test dir/test_file6": "test6",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) {
dockerfile := `FROM busybox
RUN mkdir "/test dir"
RUN mkdir "/test_dir"
COPY [ "test file1", "/test_file1" ]
COPY [ "test_file2", "/test file2" ]
COPY [ "test file3", "/test file3" ]
COPY [ "test dir/test_file4", "/test_dir/test_file4" ]
COPY [ "test_dir/test_file5", "/test dir/test_file5" ]
COPY [ "test dir/test_file6", "/test dir/test_file6" ]
RUN [ $(cat "/test_file1") = 'test1' ]
RUN [ $(cat "/test file2") = 'test2' ]
RUN [ $(cat "/test file3") = 'test3' ]
RUN [ $(cat "/test_dir/test_file4") = 'test4' ]
RUN [ $(cat "/test dir/test_file5") = 'test5' ]
RUN [ $(cat "/test dir/test_file6") = 'test6' ]`
if daemonPlatform == "windows" {
dockerfile = `FROM ` + WindowsBaseImage + `
RUN mkdir "C:/test dir"
RUN mkdir "C:/test_dir"
COPY [ "test file1", "/test_file1" ]
COPY [ "test_file2", "/test file2" ]
COPY [ "test file3", "/test file3" ]
COPY [ "test dir/test_file4", "/test_dir/test_file4" ]
COPY [ "test_dir/test_file5", "/test dir/test_file5" ]
COPY [ "test dir/test_file6", "/test dir/test_file6" ]
RUN find "test1" "C:/test_file1"
RUN find "test2" "C:/test file2"
RUN find "test3" "C:/test file3"
RUN find "test4" "C:/test_dir/test_file4"
RUN find "test5" "C:/test dir/test_file5"
RUN find "test6" "C:/test dir/test_file6"`
}
name := "testcopyfilewithwhitespace"
ctx, err := fakeContext(dockerfile,
map[string]string{
"test file1": "test1",
"test_file2": "test2",
"test file3": "test3",
"test dir/test_file4": "test4",
"test_dir/test_file5": "test5",
"test dir/test_file6": "test6",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) {
name := "testcopywildcard"
server, err := fakeStorage(map[string]string{
"robots.txt": "hello",
"index.html": "world",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
COPY file*.txt /tmp/
RUN ls /tmp/file1.txt /tmp/file2.txt
RUN [ "mkdir", "/tmp1" ]
COPY dir* /tmp1/
RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file
RUN [ "mkdir", "/tmp2" ]
ADD dir/*dir %s/robots.txt /tmp2/
RUN ls /tmp2/nest_nest_file /tmp2/robots.txt
`, server.URL()),
map[string]string{
"file1.txt": "test1",
"file2.txt": "test2",
"dir/nested_file": "nested file",
"dir/nested_dir/nest_nest_file": "2 times nested",
"dirt": "dirty",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
// Now make sure we use a cache the 2nd time
id2, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("didn't use the cache")
}
}
func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) {
name := "testcopywildcardinname"
ctx, err := fakeContext(`FROM busybox
COPY *.txt /tmp/
RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ]
`, map[string]string{"*.txt": "hi there"})
if err != nil {
// Normally we would do c.Fatal(err) here but given that
// the odds of this failing are so rare, it must be because
// the OS we're running the client on doesn't support * in
// filenames (like windows). So, instead of failing the test
// just let it pass. Then we don't need to explicitly
// say which OSs this works on or not.
return
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatalf("should have built: %q", err)
}
}
func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) {
name := "testcopywildcardcache"
ctx, err := fakeContext(`FROM busybox
COPY file1.txt /tmp/`,
map[string]string{
"file1.txt": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
// Now make sure we use a cache the 2nd time even with wild cards.
// Use the same context so the file is the same and the checksum will match
ctx.Add("Dockerfile", `FROM busybox
COPY file*.txt /tmp/`)
id2, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("didn't use the cache")
}
}
func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddsinglefiletononexistingdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
ADD test_file /test_dir/
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testadddircontenttoroot"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
ADD test_dir /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testadddircontenttoexistingdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
ADD test_dir/ /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`,
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddwholedirtoroot"
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
ADD test_dir /test_dir
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod),
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// Testing #5941
func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) {
name := "testaddetctoroot"
ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
ADD . /`,
map[string]string{
"etc/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// Testing #9401
func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testaddpreservesfilesspecialbits"
ctx, err := fakeContext(`FROM busybox
ADD suidbin /usr/bin/suidbin
RUN chmod 4755 /usr/bin/suidbin
RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]
ADD ./data/ /
RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`,
map[string]string{
"suidbin": "suidbin",
"/data/usr/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopysinglefiletoroot"
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
COPY test_file /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod),
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// Issue #3960: "ADD src ." hangs - adapted for COPY
func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) {
name := "testcopysinglefiletoworkdir"
ctx, err := fakeContext(`FROM busybox
COPY test_file .`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
errChan := make(chan error)
go func() {
_, err := buildImageFromContext(name, ctx, true)
errChan <- err
close(errChan)
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("Build with adding to workdir timed out")
case err := <-errChan:
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopysinglefiletoexistdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_file /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopysinglefiletononexistdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
COPY test_file /test_dir/
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopydircontenttoroot"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
COPY test_dir /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopydircontenttoexistdir"
ctx, err := fakeContext(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_dir/ /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`,
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
name := "testcopywholedirtoroot"
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
COPY test_dir /test_dir
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod),
map[string]string{
"test_dir/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) {
name := "testcopyetctoroot"
ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
COPY . /`,
map[string]string{
"etc/test_file": "test1",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently working on Windows
dockerfile := `
FROM scratch
ADD links.tar /
ADD foo.txt /symlink/
`
targetFile := "foo.txt"
var (
name = "test-link-absolute"
)
ctx, err := fakeContext(dockerfile, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
var symlinkTarget string
if runtime.GOOS == "windows" {
var driveLetter string
if abs, err := filepath.Abs(tempDir); err != nil {
c.Fatal(err)
} else {
driveLetter = abs[:1]
}
tempDirWithoutDrive := tempDir[2:]
symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive)
} else {
symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir)
}
tarPath := filepath.Join(ctx.Dir, "links.tar")
nonExistingFile := filepath.Join(tempDir, targetFile)
fooPath := filepath.Join(ctx.Dir, targetFile)
tarOut, err := os.Create(tarPath)
if err != nil {
c.Fatal(err)
}
tarWriter := tar.NewWriter(tarOut)
header := &tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: symlinkTarget,
Mode: 0755,
Uid: 0,
Gid: 0,
}
err = tarWriter.WriteHeader(header)
if err != nil {
c.Fatal(err)
}
tarWriter.Close()
tarOut.Close()
foo, err := os.Create(fooPath)
if err != nil {
c.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
c.Fatal(err)
}
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
}
func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) {
testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox
const (
dockerfileTemplate = `
FROM busybox
RUN ln -s /../../../../../../../../%s /x
VOLUME /x
ADD foo.txt /x/`
targetFile = "foo.txt"
)
var (
name = "test-link-absolute-volume"
dockerfile = ""
)
tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir)
nonExistingFile := filepath.Join(tempDir, targetFile)
ctx, err := fakeContext(dockerfile, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
fooPath := filepath.Join(ctx.Dir, targetFile)
foo, err := os.Create(fooPath)
if err != nil {
c.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
c.Fatal(err)
}
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
}
// Issue #5270 - ensure we throw a better error than "unexpected EOF"
// when we can't access files in the context.
func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) {
testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows
{
name := "testbuildinaccessiblefiles"
ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
// This is used to ensure we detect inaccessible files early during build in the cli client
pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess")
if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown file to root: %s", err)
}
if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name))
buildCmd.Dir = ctx.Dir
out, _, err := runCommandWithOutput(buildCmd)
if err == nil {
c.Fatalf("build should have failed: %s %s", err, out)
}
// check if we've detected the failure before we started building
if !strings.Contains(out, "no permission to read from ") {
c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out)
}
if !strings.Contains(out, "Error checking context") {
c.Fatalf("output should've contained the string: Error checking context")
}
}
{
name := "testbuildinaccessibledirectory"
ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
// This is used to ensure we detect inaccessible directories early during build in the cli client
pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown directory to root: %s", err)
}
if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
c.Fatalf("failed to chmod directory to 444: %s", err)
}
if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name))
buildCmd.Dir = ctx.Dir
out, _, err := runCommandWithOutput(buildCmd)
if err == nil {
c.Fatalf("build should have failed: %s %s", err, out)
}
// check if we've detected the failure before we started building
if !strings.Contains(out, "can't stat") {
c.Fatalf("output should've contained the string: can't access %s", out)
}
if !strings.Contains(out, "Error checking context") {
c.Fatalf("output should've contained the string: Error checking context\ngot:%s", out)
}
}
{
name := "testlinksok"
ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
target := "../../../../../../../../../../../../../../../../../../../azA"
if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil {
c.Fatal(err)
}
defer os.Remove(target)
// This is used to ensure we don't follow links when checking if everything in the context is accessible
// This test doesn't require that we run commands as an unprivileged user
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
{
name := "testbuildignoredinaccessible"
ctx, err := fakeContext("FROM scratch\nADD . /foo/",
map[string]string{
"directoryWeCantStat/bar": "foo",
".dockerignore": "directoryWeCantStat",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
// This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern
pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown directory to root: %s", err)
}
if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
c.Fatalf("failed to chmod directory to 444: %s", err)
}
if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
result := icmd.RunCmd(icmd.Cmd{
Dir: ctx.Dir,
Command: []string{"su", "unprivilegeduser", "-c",
fmt.Sprintf("%s build -t %s .", dockerBinary, name)},
})
result.Assert(c, icmd.Expected{})
}
}
func (s *DockerSuite) TestBuildForceRm(c *check.C) {
containerCountBefore, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
name := "testbuildforcerm"
ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
RUN true
RUN thiswillfail`, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
dockerCmdInDir(c, ctx.Dir, "build", "-t", name, "--force-rm", ".")
containerCountAfter, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
if containerCountBefore != containerCountAfter {
c.Fatalf("--force-rm shouldn't have left containers behind")
}
}
func (s *DockerSuite) TestBuildRm(c *check.C) {
name := "testbuildrm"
ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
ADD foo /
ADD foo /`, map[string]string{"foo": "bar"})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
{
containerCountBefore, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".")
if err != nil {
c.Fatal("failed to build the image", out)
}
containerCountAfter, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
if containerCountBefore != containerCountAfter {
c.Fatalf("-rm shouldn't have left containers behind")
}
deleteImages(name)
}
{
containerCountBefore, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".")
if err != nil {
c.Fatal("failed to build the image", out)
}
containerCountAfter, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
if containerCountBefore != containerCountAfter {
c.Fatalf("--rm shouldn't have left containers behind")
}
deleteImages(name)
}
{
containerCountBefore, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".")
if err != nil {
c.Fatal("failed to build the image", out)
}
containerCountAfter, err := getContainerCount()
if err != nil {
c.Fatalf("failed to get the container count: %s", err)
}
if containerCountBefore == containerCountAfter {
c.Fatalf("--rm=false should have left containers behind")
}
deleteImages(name)
}
}
func (s *DockerSuite) TestBuildWithVolumes(c *check.C) {
testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows
var (
result map[string]map[string]struct{}
name = "testbuildvolumes"
emptyMap = make(map[string]struct{})
expected = map[string]map[string]struct{}{
"/test1": emptyMap,
"/test2": emptyMap,
"/test3": emptyMap,
"/test4": emptyMap,
"/test5": emptyMap,
"/test6": emptyMap,
"[/test7": emptyMap,
"/test8]": emptyMap,
}
)
_, err := buildImage(name,
`FROM scratch
VOLUME /test1
VOLUME /test2
VOLUME /test3 /test4
VOLUME ["/test5", "/test6"]
VOLUME [/test7 /test8]
`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Volumes")
err = json.Unmarshal([]byte(res), &result)
if err != nil {
c.Fatal(err)
}
equal := reflect.DeepEqual(&result, &expected)
if !equal {
c.Fatalf("Volumes %s, expected %s", result, expected)
}
}
func (s *DockerSuite) TestBuildMaintainer(c *check.C) {
name := "testbuildmaintainer"
expected := "dockerio"
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
MAINTAINER dockerio`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Author")
if res != expected {
c.Fatalf("Maintainer %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildUser(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuilduser"
expected := "dockerio"
_, err := buildImage(name,
`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
USER dockerio
RUN [ $(whoami) = 'dockerio' ]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.User")
if res != expected {
c.Fatalf("User %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) {
name := "testbuildrelativeworkdir"
var (
expected1 string
expected2 string
expected3 string
expected4 string
expectedFinal string
)
if daemonPlatform == "windows" {
expected1 = `C:/`
expected2 = `C:/test1`
expected3 = `C:/test2`
expected4 = `C:/test2/test3`
expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox
} else {
expected1 = `/`
expected2 = `/test1`
expected3 = `/test2`
expected4 = `/test2/test3`
expectedFinal = `/test2/test3`
}
_, err := buildImage(name,
`FROM busybox
RUN sh -c "[ "$PWD" = "`+expected1+`" ]"
WORKDIR test1
RUN sh -c "[ "$PWD" = "`+expected2+`" ]"
WORKDIR /test2
RUN sh -c "[ "$PWD" = "`+expected3+`" ]"
WORKDIR test3
RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.WorkingDir")
if res != expectedFinal {
c.Fatalf("Workdir %s, expected %s", res, expectedFinal)
}
}
// #22181 Regression test. Single end-to-end test of using
// Windows semantics. Most path handling verifications are in unit tests
func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildwindowsworkdirprocessing"
_, err := buildImage(name,
`FROM busybox
WORKDIR C:\\foo
WORKDIR bar
RUN sh -c "[ "$PWD" = "C:/foo/bar" ]"
`,
true)
if err != nil {
c.Fatal(err)
}
}
// #22181 Regression test. Most paths handling verifications are in unit test.
// One functional test for end-to-end
func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildwindowsaddcopypathprocessing"
// TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to
// support backslash such as .\\ being equivalent to ./ and c:\\ being
// equivalent to c:/. This is not currently (nor ever has been) supported
// by docker on the Windows platform.
dockerfile := `
FROM busybox
# No trailing slash on COPY/ADD
# Results in dir being changed to a file
WORKDIR /wc1
COPY wc1 c:/wc1
WORKDIR /wc2
ADD wc2 c:/wc2
WORKDIR c:/
RUN sh -c "[ $(cat c:/wc1) = 'hellowc1' ]"
RUN sh -c "[ $(cat c:/wc2) = 'worldwc2' ]"
# Trailing slash on COPY/ADD, Windows-style path.
WORKDIR /wd1
COPY wd1 c:/wd1/
WORKDIR /wd2
ADD wd2 c:/wd2/
RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]"
RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]"
`
ctx, err := fakeContext(dockerfile, map[string]string{
"wc1": "hellowc1",
"wc2": "worldwc2",
"wd1": "hellowd1",
"wd2": "worldwd2",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, false)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) {
name := "testbuildworkdirwithenvvariables"
var expected string
if daemonPlatform == "windows" {
expected = `C:\test1\test2`
} else {
expected = `/test1/test2`
}
_, err := buildImage(name,
`FROM busybox
ENV DIRPATH /test1
ENV SUBDIRNAME test2
WORKDIR $DIRPATH
WORKDIR $SUBDIRNAME/$MISSING_VAR`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.WorkingDir")
if res != expected {
c.Fatalf("Workdir %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) {
// cat /test1/test2/foo gets permission denied for the user
testRequires(c, NotUserNamespace)
var expected string
if daemonPlatform == "windows" {
expected = `C:/test1/test2`
} else {
expected = `/test1/test2`
}
name := "testbuildrelativecopy"
dockerfile := `
FROM busybox
WORKDIR /test1
WORKDIR test2
RUN sh -c "[ "$PWD" = '` + expected + `' ]"
COPY foo ./
RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]"
ADD foo ./bar/baz
RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]"
COPY foo ./bar/baz2
RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]"
WORKDIR ..
COPY foo ./
RUN sh -c "[ $(cat /test1/foo) = 'hello' ]"
COPY foo /test3/
RUN sh -c "[ $(cat /test3/foo) = 'hello' ]"
WORKDIR /test4
COPY . .
RUN sh -c "[ $(cat /test4/foo) = 'hello' ]"
WORKDIR /test5/test6
COPY foo ../
RUN sh -c "[ $(cat /test5/foo) = 'hello' ]"
`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, false)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildBlankName(c *check.C) {
name := "testbuildblankname"
_, _, stderr, err := buildImageWithStdoutStderr(name,
`FROM busybox
ENV =`,
true)
if err == nil {
c.Fatal("Build was supposed to fail but didn't")
}
if !strings.Contains(stderr, "ENV names can not be blank") {
c.Fatalf("Missing error message, got: %s", stderr)
}
_, _, stderr, err = buildImageWithStdoutStderr(name,
`FROM busybox
LABEL =`,
true)
if err == nil {
c.Fatal("Build was supposed to fail but didn't")
}
if !strings.Contains(stderr, "LABEL names can not be blank") {
c.Fatalf("Missing error message, got: %s", stderr)
}
_, _, stderr, err = buildImageWithStdoutStderr(name,
`FROM busybox
ARG =foo`,
true)
if err == nil {
c.Fatal("Build was supposed to fail but didn't")
}
if !strings.Contains(stderr, "ARG names can not be blank") {
c.Fatalf("Missing error message, got: %s", stderr)
}
}
func (s *DockerSuite) TestBuildEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows
name := "testbuildenv"
expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]"
_, err := buildImage(name,
`FROM busybox
ENV PATH /test:$PATH
ENV PORT 2375
RUN [ $(env | grep PORT) = 'PORT=2375' ]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Env")
if res != expected {
c.Fatalf("Env %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildPATH(c *check.C) {
testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows
defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
fn := func(dockerfile string, exp string) {
_, err := buildImage("testbldpath", dockerfile, true)
c.Assert(err, check.IsNil)
res := inspectField(c, "testbldpath", "Config.Env")
if res != exp {
c.Fatalf("Env %q, expected %q for dockerfile:%q", res, exp, dockerfile)
}
}
tests := []struct{ dockerfile, exp string }{
{"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"},
{"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"},
{"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"},
{"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"},
{"FROM scratch\nENV PATH=/test", "[PATH=/test]"},
{"FROM busybox\nENV PATH=/test", "[PATH=/test]"},
{"FROM scratch\nENV PATH=''", "[PATH=]"},
{"FROM busybox\nENV PATH=''", "[PATH=]"},
}
for _, test := range tests {
fn(test.dockerfile, test.exp)
}
}
func (s *DockerSuite) TestBuildContextCleanup(c *check.C) {
testRequires(c, SameHostDaemon)
name := "testbuildcontextcleanup"
entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
_, err = buildImage(name,
`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
if err = compareDirectoryEntries(entries, entriesFinal); err != nil {
c.Fatalf("context should have been deleted, but wasn't")
}
}
func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) {
testRequires(c, SameHostDaemon)
name := "testbuildcontextcleanup"
entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
_, err = buildImage(name,
`FROM `+minimalBaseImage()+`
RUN /non/existing/command`,
true)
if err == nil {
c.Fatalf("expected build to fail, but it didn't")
}
entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
if err = compareDirectoryEntries(entries, entriesFinal); err != nil {
c.Fatalf("context should have been deleted, but wasn't")
}
}
func (s *DockerSuite) TestBuildCmd(c *check.C) {
name := "testbuildcmd"
expected := "[/bin/echo Hello World]"
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
CMD ["/bin/echo", "Hello World"]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Cmd")
if res != expected {
c.Fatalf("Cmd %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildExpose(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildexpose"
expected := "map[2375/tcp:{}]"
_, err := buildImage(name,
`FROM scratch
EXPOSE 2375`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.ExposedPorts")
if res != expected {
c.Fatalf("Exposed ports %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
// start building docker file with a large number of ports
portList := make([]string, 50)
line := make([]string, 100)
expectedPorts := make([]int, len(portList)*len(line))
for i := 0; i < len(portList); i++ {
for j := 0; j < len(line); j++ {
p := i*len(line) + j + 1
line[j] = strconv.Itoa(p)
expectedPorts[p-1] = p
}
if i == len(portList)-1 {
portList[i] = strings.Join(line, " ")
} else {
portList[i] = strings.Join(line, " ") + ` \`
}
}
dockerfile := `FROM scratch
EXPOSE {{range .}} {{.}}
{{end}}`
tmpl := template.Must(template.New("dockerfile").Parse(dockerfile))
buf := bytes.NewBuffer(nil)
tmpl.Execute(buf, portList)
name := "testbuildexpose"
_, err := buildImage(name, buf.String(), true)
if err != nil {
c.Fatal(err)
}
// check if all the ports are saved inside Config.ExposedPorts
res := inspectFieldJSON(c, name, "Config.ExposedPorts")
var exposedPorts map[string]interface{}
if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil {
c.Fatal(err)
}
for _, p := range expectedPorts {
ep := fmt.Sprintf("%d/tcp", p)
if _, ok := exposedPorts[ep]; !ok {
c.Errorf("Port(%s) is not exposed", ep)
} else {
delete(exposedPorts, ep)
}
}
if len(exposedPorts) != 0 {
c.Errorf("Unexpected extra exposed ports %v", exposedPorts)
}
}
func (s *DockerSuite) TestBuildExposeOrder(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
buildID := func(name, exposed string) string {
_, err := buildImage(name, fmt.Sprintf(`FROM scratch
EXPOSE %s`, exposed), true)
if err != nil {
c.Fatal(err)
}
id := inspectField(c, name, "Id")
return id
}
id1 := buildID("testbuildexpose1", "80 2375")
id2 := buildID("testbuildexpose2", "2375 80")
if id1 != id2 {
c.Errorf("EXPOSE should invalidate the cache only when ports actually changed")
}
}
func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildexposeuppercaseproto"
expected := "map[5678/udp:{}]"
_, err := buildImage(name,
`FROM scratch
EXPOSE 5678/UDP`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.ExposedPorts")
if res != expected {
c.Fatalf("Exposed ports %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) {
name := "testbuildentrypointinheritance"
name2 := "testbuildentrypointinheritance2"
_, err := buildImage(name,
`FROM busybox
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Entrypoint")
expected := "[/bin/echo]"
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
_, err = buildImage(name2,
fmt.Sprintf(`FROM %s
ENTRYPOINT []`, name),
true)
if err != nil {
c.Fatal(err)
}
res = inspectField(c, name2, "Config.Entrypoint")
expected = "[]"
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) {
name := "testbuildentrypoint"
expected := "[]"
_, err := buildImage(name,
`FROM busybox
ENTRYPOINT []`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Entrypoint")
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEntrypoint(c *check.C) {
name := "testbuildentrypoint"
expected := "[/bin/echo]"
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Entrypoint")
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
// #6445 ensure ONBUILD triggers aren't committed to grandchildren
func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) {
var (
out2, out3 string
)
{
name1 := "testonbuildtrigger1"
dockerfile1 := `
FROM busybox
RUN echo "GRANDPARENT"
ONBUILD RUN echo "ONBUILD PARENT"
`
ctx, err := fakeContext(dockerfile1, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".")
if err != nil {
c.Fatalf("build failed to complete: %s, %v", out1, err)
}
}
{
name2 := "testonbuildtrigger2"
dockerfile2 := `
FROM testonbuildtrigger1
`
ctx, err := fakeContext(dockerfile2, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".")
if err != nil {
c.Fatalf("build failed to complete: %s, %v", out2, err)
}
}
{
name3 := "testonbuildtrigger3"
dockerfile3 := `
FROM testonbuildtrigger2
`
ctx, err := fakeContext(dockerfile3, nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".")
if err != nil {
c.Fatalf("build failed to complete: %s, %v", out3, err)
}
}
// ONBUILD should be run in second build.
if !strings.Contains(out2, "ONBUILD PARENT") {
c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent")
}
// ONBUILD should *not* be run in third build.
if strings.Contains(out3, "ONBUILD PARENT") {
c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent")
}
}
func (s *DockerSuite) TestBuildWithCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildwithcache"
id1, err := buildImage(name,
`FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImage(name,
`FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func (s *DockerSuite) TestBuildWithoutCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildwithoutcache"
name2 := "testbuildwithoutcache2"
id1, err := buildImage(name,
`FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`,
true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImage(name2,
`FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`,
false)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildConditionalCache(c *check.C) {
name := "testbuildconditionalcache"
dockerfile := `
FROM busybox
ADD foo /tmp/`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatalf("Error building #1: %s", err)
}
if err := ctx.Add("foo", "bye"); err != nil {
c.Fatalf("Error modifying foo: %s", err)
}
id2, err := buildImageFromContext(name, ctx, false)
if err != nil {
c.Fatalf("Error building #2: %s", err)
}
if id2 == id1 {
c.Fatal("Should not have used the cache")
}
id3, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatalf("Error building #3: %s", err)
}
if id3 != id2 {
c.Fatal("Should have used the cache")
}
}
func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) {
// local files are not owned by the correct user
testRequires(c, NotUserNamespace)
name := "testbuildaddlocalfilewithcache"
name2 := "testbuildaddlocalfilewithcache2"
dockerfile := `
FROM busybox
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) {
name := "testbuildaddmultiplelocalfilewithcache"
name2 := "testbuildaddmultiplelocalfilewithcache2"
dockerfile := `
FROM busybox
MAINTAINER dockerio
ADD foo Dockerfile /usr/lib/bla/
RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) {
// local files are not owned by the correct user
testRequires(c, NotUserNamespace)
name := "testbuildaddlocalfilewithoutcache"
name2 := "testbuildaddlocalfilewithoutcache2"
dockerfile := `
FROM busybox
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, false)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) {
name := "testbuildcopydirbutnotfile"
name2 := "testbuildcopydirbutnotfile2"
dockerfile := `
FROM ` + minimalBaseImage() + `
COPY dir /tmp/`
ctx, err := fakeContext(dockerfile, map[string]string{
"dir/foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
// Check that adding file with similar name doesn't mess with cache
if err := ctx.Add("dir_file", "hello2"); err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but wasn't")
}
}
func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) {
name := "testbuildaddcurrentdirwithcache"
name2 := name + "2"
name3 := name + "3"
name4 := name + "4"
dockerfile := `
FROM ` + minimalBaseImage() + `
MAINTAINER dockerio
ADD . /usr/lib/bla`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
// Check that adding file invalidate cache of "ADD ."
if err := ctx.Add("bar", "hello2"); err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
// Check that changing file invalidate cache of "ADD ."
if err := ctx.Add("foo", "hello1"); err != nil {
c.Fatal(err)
}
id3, err := buildImageFromContext(name3, ctx, true)
if err != nil {
c.Fatal(err)
}
if id2 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
// Check that changing file to same content with different mtime does not
// invalidate cache of "ADD ."
time.Sleep(1 * time.Second) // wait second because of mtime precision
if err := ctx.Add("foo", "hello1"); err != nil {
c.Fatal(err)
}
id4, err := buildImageFromContext(name4, ctx, true)
if err != nil {
c.Fatal(err)
}
if id3 != id4 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) {
name := "testbuildaddcurrentdirwithoutcache"
name2 := "testbuildaddcurrentdirwithoutcache2"
dockerfile := `
FROM ` + minimalBaseImage() + `
MAINTAINER dockerio
ADD . /usr/lib/bla`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, false)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) {
name := "testbuildaddremotefilewithcache"
server, err := fakeStorage(map[string]string{
"baz": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
id1, err := buildImage(name,
fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImage(name,
fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) {
name := "testbuildaddremotefilewithoutcache"
name2 := "testbuildaddremotefilewithoutcache2"
server, err := fakeStorage(map[string]string{
"baz": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
id1, err := buildImage(name,
fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImage(name2,
fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
false)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) {
name := "testbuildaddremotefilemtime"
name2 := name + "2"
name3 := name + "3"
files := map[string]string{"baz": "hello"}
server, err := fakeStorage(files)
if err != nil {
c.Fatal(err)
}
defer server.Close()
ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but wasn't - #1")
}
// Now create a different server with same contents (causes different mtime)
// The cache should still be used
// allow some time for clock to pass as mtime precision is only 1s
time.Sleep(2 * time.Second)
server2, err := fakeStorage(files)
if err != nil {
c.Fatal(err)
}
defer server2.Close()
ctx2, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil)
if err != nil {
c.Fatal(err)
}
defer ctx2.Close()
id3, err := buildImageFromContext(name3, ctx2, true)
if err != nil {
c.Fatal(err)
}
if id1 != id3 {
c.Fatal("The cache should have been used but wasn't")
}
}
func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) {
name := "testbuildaddlocalandremotefilewithcache"
server, err := fakeStorage(map[string]string{
"baz": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
map[string]string{
"foo": "hello world",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
}
func testContextTar(c *check.C, compression archive.Compression) {
ctx, err := fakeContext(
`FROM busybox
ADD foo /foo
CMD ["cat", "/foo"]`,
map[string]string{
"foo": "bar",
},
)
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
context, err := archive.Tar(ctx.Dir, compression)
if err != nil {
c.Fatalf("failed to build context tar: %v", err)
}
name := "contexttar"
buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-")
buildCmd.Stdin = context
if out, _, err := runCommandWithOutput(buildCmd); err != nil {
c.Fatalf("build failed to complete: %v %v", out, err)
}
}
func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) {
testContextTar(c, archive.Gzip)
}
func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) {
testContextTar(c, archive.Uncompressed)
}
func (s *DockerSuite) TestBuildNoContext(c *check.C) {
buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-")
buildCmd.Stdin = strings.NewReader(
`FROM busybox
CMD ["echo", "ok"]`)
if out, _, err := runCommandWithOutput(buildCmd); err != nil {
c.Fatalf("build failed to complete: %v %v", out, err)
}
if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" {
c.Fatalf("run produced invalid output: %q, expected %q", out, "ok")
}
}
// TODO: TestCaching
func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) {
name := "testbuildaddlocalandremotefilewithoutcache"
name2 := "testbuildaddlocalandremotefilewithoutcache2"
server, err := fakeStorage(map[string]string{
"baz": "hello",
})
if err != nil {
c.Fatal(err)
}
defer server.Close()
ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
map[string]string{
"foo": "hello world",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
id2, err := buildImageFromContext(name2, ctx, false)
if err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("The cache should have been invalidated but hasn't.")
}
}
func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildimg"
_, err := buildImage(name,
`FROM busybox:latest
RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test
VOLUME /test`,
true)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test")
if expected := "drw-------"; !strings.Contains(out, expected) {
c.Fatalf("expected %s received %s", expected, out)
}
if expected := "daemon daemon"; !strings.Contains(out, expected) {
c.Fatalf("expected %s received %s", expected, out)
}
}
// testing #1405 - config.Cmd does not get cleaned up if
// utilizing cache
func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) {
name := "testbuildcmdcleanup"
if _, err := buildImage(name,
`FROM busybox
RUN echo "hello"`,
true); err != nil {
c.Fatal(err)
}
ctx, err := fakeContext(`FROM busybox
RUN echo "hello"
ADD foo /foo
ENTRYPOINT ["/bin/echo"]`,
map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Cmd")
// Cmd must be cleaned up
if res != "[]" {
c.Fatalf("Cmd %s, expected nil", res)
}
}
func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) {
name := "testbuildaddnotfound"
expected := "foo: no such file or directory"
if daemonPlatform == "windows" {
expected = "foo: The system cannot find the file specified"
}
ctx, err := fakeContext(`FROM `+minimalBaseImage()+`
ADD foo /usr/local/bar`,
map[string]string{"bar": "hello"})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
if !strings.Contains(err.Error(), expected) {
c.Fatalf("Wrong error %v, must be about missing foo file or directory", err)
}
} else {
c.Fatal("Error must not be nil")
}
}
func (s *DockerSuite) TestBuildInheritance(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildinheritance"
_, err := buildImage(name,
`FROM scratch
EXPOSE 2375`,
true)
if err != nil {
c.Fatal(err)
}
ports1 := inspectField(c, name, "Config.ExposedPorts")
_, err = buildImage(name,
fmt.Sprintf(`FROM %s
ENTRYPOINT ["/bin/echo"]`, name),
true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Entrypoint")
if expected := "[/bin/echo]"; res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
ports2 := inspectField(c, name, "Config.ExposedPorts")
if ports1 != ports2 {
c.Fatalf("Ports must be same: %s != %s", ports1, ports2)
}
}
func (s *DockerSuite) TestBuildFails(c *check.C) {
name := "testbuildfails"
_, err := buildImage(name,
`FROM busybox
RUN sh -c "exit 23"`,
true)
if err != nil {
if !strings.Contains(err.Error(), "returned a non-zero code: 23") {
c.Fatalf("Wrong error %v, must be about non-zero code 23", err)
}
} else {
c.Fatal("Error must not be nil")
}
}
func (s *DockerSuite) TestBuildOnBuild(c *check.C) {
name := "testbuildonbuild"
_, err := buildImage(name,
`FROM busybox
ONBUILD RUN touch foobar`,
true)
if err != nil {
c.Fatal(err)
}
_, err = buildImage(name,
fmt.Sprintf(`FROM %s
RUN [ -f foobar ]`, name),
true)
if err != nil {
c.Fatal(err)
}
}
// gh #2446
func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) {
makeLink := `ln -s /foo /bar`
if daemonPlatform == "windows" {
makeLink = `mklink /D C:\bar C:\foo`
}
name := "testbuildaddtosymlinkdest"
ctx, err := fakeContext(`FROM busybox
RUN sh -c "mkdir /foo"
RUN `+makeLink+`
ADD foo /bar/
RUN sh -c "[ -f /bar/foo ]"
RUN sh -c "[ -f /foo/foo ]"`,
map[string]string{
"foo": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) {
name := "testbuildescapewhitespace"
_, err := buildImage(name, `
# ESCAPE=\
FROM busybox
MAINTAINER "Docker \
IO <io@\
docker.com>"
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Author")
if res != "\"Docker IO <[email protected]>\"" {
c.Fatalf("Parsed string did not match the escaped string. Got: %q", res)
}
}
func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) {
// Verify that strings that look like ints are still passed as strings
name := "testbuildstringing"
_, err := buildImage(name, `
FROM busybox
MAINTAINER 123
`, true)
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "inspect", name)
if !strings.Contains(out, "\"123\"") {
c.Fatalf("Output does not contain the int as a string:\n%s", out)
}
}
func (s *DockerSuite) TestBuildDockerignore(c *check.C) {
name := "testbuilddockerignore"
dockerfile := `
FROM busybox
ADD . /bla
RUN sh -c "[[ -f /bla/src/x.go ]]"
RUN sh -c "[[ -f /bla/Makefile ]]"
RUN sh -c "[[ ! -e /bla/src/_vendor ]]"
RUN sh -c "[[ ! -e /bla/.gitignore ]]"
RUN sh -c "[[ ! -e /bla/README.md ]]"
RUN sh -c "[[ ! -e /bla/dir/foo ]]"
RUN sh -c "[[ ! -e /bla/foo ]]"
RUN sh -c "[[ ! -e /bla/.git ]]"
RUN sh -c "[[ ! -e v.cc ]]"
RUN sh -c "[[ ! -e src/v.cc ]]"
RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"Makefile": "all:",
".git/HEAD": "ref: foo",
"src/x.go": "package main",
"src/_vendor/v.go": "package main",
"src/_vendor/v.cc": "package main",
"src/v.cc": "package main",
"v.cc": "package main",
"dir/foo": "",
".gitignore": "",
"README.md": "readme",
".dockerignore": `
.git
pkg
.gitignore
src/_vendor
*.md
**/*.cc
dir`,
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) {
name := "testbuilddockerignorecleanpaths"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "foo",
"foo2": "foo2",
"dir1/foo": "foo in dir1",
".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) {
name := "testbuilddockerignoreexceptions"
dockerfile := `
FROM busybox
ADD . /bla
RUN sh -c "[[ -f /bla/src/x.go ]]"
RUN sh -c "[[ -f /bla/Makefile ]]"
RUN sh -c "[[ ! -e /bla/src/_vendor ]]"
RUN sh -c "[[ ! -e /bla/.gitignore ]]"
RUN sh -c "[[ ! -e /bla/README.md ]]"
RUN sh -c "[[ -e /bla/dir/dir/foo ]]"
RUN sh -c "[[ ! -e /bla/dir/foo1 ]]"
RUN sh -c "[[ -f /bla/dir/e ]]"
RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]"
RUN sh -c "[[ ! -e /bla/foo ]]"
RUN sh -c "[[ ! -e /bla/.git ]]"
RUN sh -c "[[ -e /bla/dir/a.cc ]]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"Makefile": "all:",
".git/HEAD": "ref: foo",
"src/x.go": "package main",
"src/_vendor/v.go": "package main",
"dir/foo": "",
"dir/foo1": "",
"dir/dir/f1": "",
"dir/dir/foo": "",
"dir/e": "",
"dir/e-dir/foo": "",
".gitignore": "",
"README.md": "readme",
"dir/a.cc": "hello",
".dockerignore": `
.git
pkg
.gitignore
src/_vendor
*.md
dir
!dir/e*
!dir/dir/foo
**/*.cc
!**/*.cc`,
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) {
name := "testbuilddockerignoredockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "! ls /tmp/Dockerfile"
RUN ls /tmp/.dockerignore`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": dockerfile,
".dockerignore": "Dockerfile\n",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't ignore Dockerfile correctly:%s", err)
}
// now try it with ./Dockerfile
ctx.Add(".dockerignore", "./Dockerfile\n")
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err)
}
}
func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) {
name := "testbuilddockerignoredockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN ls /tmp/Dockerfile
RUN sh -c "! ls /tmp/MyDockerfile"
RUN ls /tmp/.dockerignore`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": "Should not use me",
"MyDockerfile": dockerfile,
".dockerignore": "MyDockerfile\n",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err)
}
// now try it with ./MyDockerfile
ctx.Add(".dockerignore", "./MyDockerfile\n")
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err)
}
}
func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) {
name := "testbuilddockerignoredockerignore"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "! ls /tmp/.dockerignore"
RUN ls /tmp/Dockerfile`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": dockerfile,
".dockerignore": ".dockerignore\n",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't ignore .dockerignore correctly:%s", err)
}
}
func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) {
var id1 string
var id2 string
name := "testbuilddockerignoretouchdockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": dockerfile,
".dockerignore": "Dockerfile\n",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if id1, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't build it correctly:%s", err)
}
if id2, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't build it correctly:%s", err)
}
if id1 != id2 {
c.Fatalf("Didn't use the cache - 1")
}
// Now make sure touching Dockerfile doesn't invalidate the cache
if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
c.Fatalf("Didn't add Dockerfile: %s", err)
}
if id2, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't build it correctly:%s", err)
}
if id1 != id2 {
c.Fatalf("Didn't use the cache - 2")
}
// One more time but just 'touch' it instead of changing the content
if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
c.Fatalf("Didn't add Dockerfile: %s", err)
}
if id2, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("Didn't build it correctly:%s", err)
}
if id1 != id2 {
c.Fatalf("Didn't use the cache - 3")
}
}
func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) {
name := "testbuilddockerignorewholedir"
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ -f /Makefile ]]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": "FROM scratch",
"Makefile": "all:",
".gitignore": "",
".dockerignore": ".*\n",
})
c.Assert(err, check.IsNil)
defer ctx.Close()
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
c.Assert(ctx.Add(".dockerfile", "*"), check.IsNil)
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
c.Assert(ctx.Add(".dockerfile", "."), check.IsNil)
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
c.Assert(ctx.Add(".dockerfile", "?"), check.IsNil)
if _, err = buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) {
name := "testbuilddockerignorebadexclusion"
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ -f /Makefile ]]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": "FROM scratch",
"Makefile": "all:",
".gitignore": "",
".dockerignore": "!\n",
})
c.Assert(err, check.IsNil)
defer ctx.Close()
if _, err = buildImageFromContext(name, ctx, true); err == nil {
c.Fatalf("Build was supposed to fail but didn't")
}
if err.Error() != "failed to build the image: Error checking context: 'Illegal exclusion pattern: !'.\n" {
c.Fatalf("Incorrect output, got:%q", err.Error())
}
}
func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) {
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.dockerignore ]]"
RUN sh -c "[[ ! -e /Dockerfile ]]"
RUN sh -c "[[ ! -e /file1 ]]"
RUN sh -c "[[ ! -e /dir ]]"`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": "FROM scratch",
"file1": "",
"dir/dfile1": "",
})
c.Assert(err, check.IsNil)
defer ctx.Close()
// All of these should result in ignoring all files
for _, variant := range []string{"**", "**/", "**/**", "*"} {
ctx.Add(".dockerignore", variant)
_, err = buildImageFromContext("noname", ctx, true)
c.Assert(err, check.IsNil, check.Commentf("variant: %s", variant))
}
}
func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) {
dockerfile := `
FROM busybox
COPY . /
#RUN sh -c "[[ -e /.dockerignore ]]"
RUN sh -c "[[ -e /Dockerfile ]] && \
[[ ! -e /file0 ]] && \
[[ ! -e /dir1/file0 ]] && \
[[ ! -e /dir2/file0 ]] && \
[[ ! -e /file1 ]] && \
[[ ! -e /dir1/file1 ]] && \
[[ ! -e /dir1/dir2/file1 ]] && \
[[ ! -e /dir1/file2 ]] && \
[[ -e /dir1/dir2/file2 ]] && \
[[ ! -e /dir1/dir2/file4 ]] && \
[[ ! -e /dir1/dir2/file5 ]] && \
[[ ! -e /dir1/dir2/file6 ]] && \
[[ ! -e /dir1/dir3/file7 ]] && \
[[ ! -e /dir1/dir3/file8 ]] && \
[[ -e /dir1/dir3 ]] && \
[[ -e /dir1/dir4 ]] && \
[[ ! -e 'dir1/dir5/fileAA' ]] && \
[[ -e 'dir1/dir5/fileAB' ]] && \
[[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing
RUN echo all done!`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": "FROM scratch",
"file0": "",
"dir1/file0": "",
"dir1/dir2/file0": "",
"file1": "",
"dir1/file1": "",
"dir1/dir2/file1": "",
"dir1/file2": "",
"dir1/dir2/file2": "", // remains
"dir1/dir2/file4": "",
"dir1/dir2/file5": "",
"dir1/dir2/file6": "",
"dir1/dir3/file7": "",
"dir1/dir3/file8": "",
"dir1/dir4/file9": "",
"dir1/dir5/fileAA": "",
"dir1/dir5/fileAB": "",
"dir1/dir5/fileB": "",
".dockerignore": `
**/file0
**/*file1
**/dir1/file2
dir1/**/file4
**/dir2/file5
**/dir1/dir2/file6
dir1/dir3/**
**/dir4/**
**/file?A
**/file\?B
**/dir5/file.
`,
})
c.Assert(err, check.IsNil)
defer ctx.Close()
_, err = buildImageFromContext("noname", ctx, true)
c.Assert(err, check.IsNil)
}
func (s *DockerSuite) TestBuildLineBreak(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildlinebreak"
_, err := buildImage(name,
`FROM busybox
RUN sh -c 'echo root:testpass \
> /tmp/passwd'
RUN mkdir -p /var/run/sshd
RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]"
RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`,
true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEOLInLine(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildeolinline"
_, err := buildImage(name,
`FROM busybox
RUN sh -c 'echo root:testpass > /tmp/passwd'
RUN echo "foo \n bar"; echo "baz"
RUN mkdir -p /var/run/sshd
RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]"
RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`,
true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildcomments"
_, err := buildImage(name,
`FROM busybox
# This is an ordinary comment.
RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh
RUN [ ! -x /hello.sh ]
# comment with line break \
RUN chmod +x /hello.sh
RUN [ -x /hello.sh ]
RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ]
RUN [ "$(/hello.sh)" = "hello world" ]`,
true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildusers"
_, err := buildImage(name,
`FROM busybox
# Make sure our defaults work
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ]
# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0)
USER root
RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ]
# Setup dockerio user and group
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \
echo 'dockerio:x:1001:' >> /etc/group
# Make sure we can switch to our user and all the information is exactly as we expect it to be
USER dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
# Switch back to root and double check that worked exactly as we might expect it to
USER root
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \
# Add a "supplementary" group for our dockerio user \
echo 'supplementary:x:1002:dockerio' >> /etc/group
# ... and then go verify that we get it like we expect
USER dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
USER 1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
# super test the new "user:group" syntax
USER dockerio:dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER 1001:dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER dockerio:1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER 1001:1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER dockerio:supplementary
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER dockerio:1002
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER 1001:supplementary
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER 1001:1002
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
# make sure unknown uid/gid still works properly
USER 1042:1043
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`,
true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEnvUsage(c *check.C) {
// /docker/world/hello is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildenvusage"
dockerfile := `FROM busybox
ENV HOME /root
ENV PATH $HOME/bin:$PATH
ENV PATH /tmp:$PATH
RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ]
ENV FOO /foo/baz
ENV BAR /bar
ENV BAZ $BAR
ENV FOOPATH $PATH:$FOO
RUN [ "$BAR" = "$BAZ" ]
RUN [ "$FOOPATH" = "$PATH:/foo/baz" ]
ENV FROM hello/docker/world
ENV TO /docker/world/hello
ADD $FROM $TO
RUN [ "$(cat $TO)" = "hello" ]
ENV abc=def
ENV ghi=$abc
RUN [ "$ghi" = "def" ]
`
ctx, err := fakeContext(dockerfile, map[string]string{
"hello/docker/world": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) {
// /docker/world/hello is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildenvusage2"
dockerfile := `FROM busybox
ENV abc=def def="hello world"
RUN [ "$abc,$def" = "def,hello world" ]
ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too"
RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ]
ENV abc=zzz FROM=hello/docker/world
ENV abc=zzz TO=/docker/world/hello
ADD $FROM $TO
RUN [ "$abc,$(cat $TO)" = "zzz,hello" ]
ENV abc 'yyy'
RUN [ $abc = 'yyy' ]
ENV abc=
RUN [ "$abc" = "" ]
# use grep to make sure if the builder substitutes \$foo by mistake
# we don't get a false positive
ENV abc=\$foo
RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo)
ENV abc \$foo
RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo)
ENV abc=\'foo\' abc2=\"foo\"
RUN [ "$abc,$abc2" = "'foo',\"foo\"" ]
ENV abc "foo"
RUN [ "$abc" = "foo" ]
ENV abc 'foo'
RUN [ "$abc" = 'foo' ]
ENV abc \'foo\'
RUN [ "$abc" = "'foo'" ]
ENV abc \"foo\"
RUN [ "$abc" = '"foo"' ]
ENV abc=ABC
RUN [ "$abc" = "ABC" ]
ENV def1=${abc:-DEF} def2=${ccc:-DEF}
ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:}
RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ]
ENV mypath=${mypath:+$mypath:}/home
ENV mypath=${mypath:+$mypath:}/away
RUN [ "$mypath" = '/home:/away' ]
ENV e1=bar
ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11
RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ]
ENV ee1 bar
ENV ee2 $ee1
ENV ee3 $ee11
ENV ee4 \$ee1
ENV ee5 \$ee11
RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ]
ENV eee1="foo" eee2='foo'
ENV eee3 "foo"
ENV eee4 'foo'
RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ]
`
ctx, err := fakeContext(dockerfile, map[string]string{
"hello/docker/world": "hello",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddScript(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddscript"
dockerfile := `
FROM busybox
ADD test /test
RUN ["chmod","+x","/test"]
RUN ["/test"]
RUN [ "$(cat /testfile)" = 'test!' ]`
ctx, err := fakeContext(dockerfile, map[string]string{
"test": "#!/bin/sh\necho 'test!' > /testfile",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildAddTar(c *check.C) {
// /test/foo is not owned by the correct user
testRequires(c, NotUserNamespace)
name := "testbuildaddtar"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar /
RUN cat /test/foo | grep Hi
ADD test.tar /test.tar
RUN cat /test.tar/test/foo | grep Hi
ADD test.tar /unlikely-to-exist
RUN cat /unlikely-to-exist/test/foo | grep Hi
ADD test.tar /unlikely-to-exist-trailing-slash/
RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi
RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir
ADD test.tar /existing-directory
RUN cat /existing-directory/test/foo | grep Hi
ADD test.tar /existing-directory-trailing-slash/
RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("build failed to complete for TestBuildAddTar: %v", err)
}
}
func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) {
name := "testbuildaddbrokentar"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar /`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
// Corrupt the tar by removing one byte off the end
stat, err := testTar.Stat()
if err != nil {
c.Fatalf("failed to stat tar archive: %v", err)
}
if err := testTar.Truncate(stat.Size() - 1); err != nil {
c.Fatalf("failed to truncate tar archive: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err == nil {
c.Fatalf("build should have failed for TestBuildAddBrokenTar")
}
}
func (s *DockerSuite) TestBuildAddNonTar(c *check.C) {
name := "testbuildaddnontar"
// Should not try to extract test.tar
ctx, err := fakeContext(`
FROM busybox
ADD test.tar /
RUN test -f /test.tar`,
map[string]string{"test.tar": "not_a_tar_file"})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("build failed for TestBuildAddNonTar")
}
}
func (s *DockerSuite) TestBuildAddTarXz(c *check.C) {
// /test/foo is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildaddtarxz"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar.xz /
RUN cat /test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
xzCompressCmd := exec.Command("xz", "-k", "test.tar")
xzCompressCmd.Dir = tmpDir
out, _, err := runCommandWithOutput(xzCompressCmd)
if err != nil {
c.Fatal(err, out)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
}
}
func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddtarxzgz"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar.xz.gz /
RUN ls /test.tar.xz.gz`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
xzCompressCmd := exec.Command("xz", "-k", "test.tar")
xzCompressCmd.Dir = tmpDir
out, _, err := runCommandWithOutput(xzCompressCmd)
if err != nil {
c.Fatal(err, out)
}
gzipCompressCmd := exec.Command("gzip", "test.tar.xz")
gzipCompressCmd.Dir = tmpDir
out, _, err = runCommandWithOutput(gzipCompressCmd)
if err != nil {
c.Fatal(err, out)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
}
}
func (s *DockerSuite) TestBuildFromGit(c *check.C) {
name := "testbuildfromgit"
git, err := newFakeGit("repo", map[string]string{
"Dockerfile": `FROM busybox
ADD first /first
RUN [ -f /first ]
MAINTAINER docker`,
"first": "test git data",
}, true)
if err != nil {
c.Fatal(err)
}
defer git.Close()
_, err = buildImageFromPath(name, git.RepoURL, true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) {
name := "testbuildfromgit"
git, err := newFakeGit("repo", map[string]string{
"docker/Dockerfile": `FROM busybox
ADD first /first
RUN [ -f /first ]
MAINTAINER docker`,
"docker/first": "test git data",
}, true)
if err != nil {
c.Fatal(err)
}
defer git.Close()
u := fmt.Sprintf("%s#master:docker", git.RepoURL)
_, err = buildImageFromPath(name, u, true)
if err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildFromGitwithF(c *check.C) {
name := "testbuildfromgitwithf"
git, err := newFakeGit("repo", map[string]string{
"myApp/myDockerfile": `FROM busybox
RUN echo hi from Dockerfile`,
}, true)
if err != nil {
c.Fatal(err)
}
defer git.Close()
out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "myApp/myDockerfile", git.RepoURL)
if err != nil {
c.Fatalf("Error on build. Out: %s\nErr: %v", out, err)
}
if !strings.Contains(out, "hi from Dockerfile") {
c.Fatalf("Missing expected output, got:\n%s", out)
}
}
func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) {
name := "testbuildfromremotetarball"
buffer := new(bytes.Buffer)
tw := tar.NewWriter(buffer)
defer tw.Close()
dockerfile := []byte(`FROM busybox
MAINTAINER docker`)
if err := tw.WriteHeader(&tar.Header{
Name: "Dockerfile",
Size: int64(len(dockerfile)),
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write(dockerfile); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
server, err := fakeBinaryStorage(map[string]*bytes.Buffer{
"testT.tar": buffer,
})
c.Assert(err, check.IsNil)
defer server.Close()
_, err = buildImageFromPath(name, server.URL()+"/testT.tar", true)
c.Assert(err, check.IsNil)
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) {
name := "testbuildcmdcleanuponentrypoint"
if _, err := buildImage(name,
`FROM `+minimalBaseImage()+`
CMD ["test"]
ENTRYPOINT ["echo"]`,
true); err != nil {
c.Fatal(err)
}
if _, err := buildImage(name,
fmt.Sprintf(`FROM %s
ENTRYPOINT ["cat"]`, name),
true); err != nil {
c.Fatal(err)
}
res := inspectField(c, name, "Config.Cmd")
if res != "[]" {
c.Fatalf("Cmd %s, expected nil", res)
}
res = inspectField(c, name, "Config.Entrypoint")
if expected := "[cat]"; res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildClearCmd(c *check.C) {
name := "testbuildclearcmd"
_, err := buildImage(name,
`From `+minimalBaseImage()+`
ENTRYPOINT ["/bin/bash"]
CMD []`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Cmd")
if res != "[]" {
c.Fatalf("Cmd %s, expected %s", res, "[]")
}
}
func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) {
// Skip on Windows. Base image on Windows has a CMD set in the image.
testRequires(c, DaemonIsLinux)
name := "testbuildemptycmd"
if _, err := buildImage(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true); err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Cmd")
if res != "null" {
c.Fatalf("Cmd %s, expected %s", res, "null")
}
}
func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) {
name := "testbuildonbuildparent"
if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil {
c.Fatal(err)
}
_, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true)
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "# Executing 1 build trigger") {
c.Fatal("failed to find the build trigger output", out)
}
}
func (s *DockerSuite) TestBuildInvalidTag(c *check.C) {
name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200)
_, out, err := buildImageWithOut(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true)
// if the error doesn't check for illegal tag name, or the image is built
// then this should fail
if !strings.Contains(out, "Error parsing reference") || strings.Contains(out, "Sending build context to Docker daemon") {
c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out)
}
}
func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) {
name := "testbuildcmdshc"
if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Cmd")
expected := `["/bin/sh","-c","echo cmd"]`
if daemonPlatform == "windows" {
expected = `["cmd","/S","/C","echo cmd"]`
}
if res != expected {
c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
}
}
func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) {
// Test to make sure that when we strcat arrays we take into account
// the arg separator to make sure ["echo","hi"] and ["echo hi"] don't
// look the same
name := "testbuildcmdspaces"
var id1 string
var id2 string
var err error
if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil {
c.Fatal(err)
}
if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("Should not have resulted in the same CMD")
}
// Now do the same with ENTRYPOINT
if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil {
c.Fatal(err)
}
if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil {
c.Fatal(err)
}
if id1 == id2 {
c.Fatal("Should not have resulted in the same ENTRYPOINT")
}
}
func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) {
name := "testbuildcmdjson"
if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Cmd")
expected := `["echo","cmd"]`
if res != expected {
c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
}
}
func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) {
if _, err := buildImage("parent", `
FROM busybox
ENTRYPOINT exit 130
`, true); err != nil {
c.Fatal(err)
}
if _, status, _ := dockerCmdWithError("run", "parent"); status != 130 {
c.Fatalf("expected exit code 130 but received %d", status)
}
if _, err := buildImage("child", `
FROM parent
ENTRYPOINT exit 5
`, true); err != nil {
c.Fatal(err)
}
if _, status, _ := dockerCmdWithError("run", "child"); status != 5 {
c.Fatalf("expected exit code 5 but received %d", status)
}
}
func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) {
var (
name = "testbuildepinherit"
name2 = "testbuildepinherit2"
expected = `["/bin/sh","-c","echo quux"]`
)
if daemonPlatform == "windows" {
expected = `["cmd","/S","/C","echo quux"]`
}
if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil {
c.Fatal(err)
}
if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name2, "Config.Entrypoint")
if res != expected {
c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res)
}
out, _ := dockerCmd(c, "run", name2)
expected = "quux"
if strings.TrimSpace(out) != expected {
c.Fatalf("Expected output is %s, got %s", expected, out)
}
}
func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) {
name := "testbuildentrypoint"
_, err := buildImage(name,
`FROM busybox
ENTRYPOINT echo`,
true)
if err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "--rm", name)
}
func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildexoticshellinterpolation"
_, err := buildImage(name, `
FROM busybox
ENV SOME_VAR a.b.c
RUN [ "$SOME_VAR" = 'a.b.c' ]
RUN [ "${SOME_VAR}" = 'a.b.c' ]
RUN [ "${SOME_VAR%.*}" = 'a.b' ]
RUN [ "${SOME_VAR%%.*}" = 'a' ]
RUN [ "${SOME_VAR#*.}" = 'b.c' ]
RUN [ "${SOME_VAR##*.}" = 'c' ]
RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ]
RUN [ "${#SOME_VAR}" = '5' ]
RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ]
RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ]
RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ]
RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ]
`, false)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) {
// This testcase is supposed to generate an error because the
// JSON array we're passing in on the CMD uses single quotes instead
// of double quotes (per the JSON spec). This means we interpret it
// as a "string" instead of "JSON array" and pass it on to "sh -c" and
// it should barf on it.
name := "testbuildsinglequotefails"
if _, err := buildImage(name,
`FROM busybox
CMD [ '/bin/sh', '-c', 'echo hi' ]`,
true); err != nil {
c.Fatal(err)
}
if _, _, err := dockerCmdWithError("run", "--rm", name); err == nil {
c.Fatal("The image was not supposed to be able to run")
}
}
func (s *DockerSuite) TestBuildVerboseOut(c *check.C) {
name := "testbuildverboseout"
expected := "\n123\n"
if daemonPlatform == "windows" {
expected = "\n123\r\n"
}
_, out, err := buildImageWithOut(name,
`FROM busybox
RUN echo 123`,
false)
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, expected) {
c.Fatalf("Output should contain %q: %q", "123", out)
}
}
func (s *DockerSuite) TestBuildWithTabs(c *check.C) {
name := "testbuildwithtabs"
_, err := buildImage(name,
"FROM busybox\nRUN echo\tone\t\ttwo", true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "ContainerConfig.Cmd")
expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]`
expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
if daemonPlatform == "windows" {
expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]`
expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
}
if res != expected1 && res != expected2 {
c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2)
}
}
func (s *DockerSuite) TestBuildLabels(c *check.C) {
name := "testbuildlabel"
expected := `{"License":"GPL","Vendor":"Acme"}`
_, err := buildImage(name,
`FROM busybox
LABEL Vendor=Acme
LABEL License GPL`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildLabelsCache(c *check.C) {
name := "testbuildlabelcache"
id1, err := buildImage(name,
`FROM busybox
LABEL Vendor=Acme`, false)
if err != nil {
c.Fatalf("Build 1 should have worked: %v", err)
}
id2, err := buildImage(name,
`FROM busybox
LABEL Vendor=Acme`, true)
if err != nil || id1 != id2 {
c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err)
}
id2, err = buildImage(name,
`FROM busybox
LABEL Vendor=Acme1`, true)
if err != nil || id1 == id2 {
c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err)
}
id2, err = buildImage(name,
`FROM busybox
LABEL Vendor Acme`, true) // Note: " " and "=" should be same
if err != nil || id1 != id2 {
c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err)
}
// Now make sure the cache isn't used by mistake
id1, err = buildImage(name,
`FROM busybox
LABEL f1=b1 f2=b2`, false)
if err != nil {
c.Fatalf("Build 5 should have worked: %q", err)
}
id2, err = buildImage(name,
`FROM busybox
LABEL f1="b1 f2=b2"`, true)
if err != nil || id1 == id2 {
c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err)
}
}
func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) {
// This test makes sure that -q works correctly when build is successful:
// stdout has only the image ID (long image ID) and stderr is empty.
var stdout, stderr string
var err error
outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$")
tt := []struct {
Name string
BuildFunc func(string)
}{
{
Name: "quiet_build_stdin_success",
BuildFunc: func(name string) {
_, stdout, stderr, err = buildImageWithStdoutStderr(name, "FROM busybox", true, "-q", "--force-rm", "--rm")
},
},
{
Name: "quiet_build_ctx_success",
BuildFunc: func(name string) {
ctx, err := fakeContext("FROM busybox", map[string]string{
"quiet_build_success_fctx": "test",
})
if err != nil {
c.Fatalf("Failed to create context: %s", err.Error())
}
defer ctx.Close()
_, stdout, stderr, err = buildImageFromContextWithStdoutStderr(name, ctx, true, "-q", "--force-rm", "--rm")
},
},
{
Name: "quiet_build_git_success",
BuildFunc: func(name string) {
git, err := newFakeGit("repo", map[string]string{
"Dockerfile": "FROM busybox",
}, true)
if err != nil {
c.Fatalf("Failed to create the git repo: %s", err.Error())
}
defer git.Close()
_, stdout, stderr, err = buildImageFromGitWithStdoutStderr(name, git, true, "-q", "--force-rm", "--rm")
},
},
}
for _, te := range tt {
te.BuildFunc(te.Name)
if err != nil {
c.Fatalf("Test %s shouldn't fail, but got the following error: %s", te.Name, err.Error())
}
if outRegexp.Find([]byte(stdout)) == nil {
c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, stdout)
}
if stderr != "" {
c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, stderr)
}
}
}
func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) {
// This test makes sure that -q works correctly when build fails by
// comparing between the stderr output in quiet mode and in stdout
// and stderr output in verbose mode
testRequires(c, Network)
testName := "quiet_build_not_exists_image"
buildCmd := "FROM busybox11"
_, _, qstderr, qerr := buildImageWithStdoutStderr(testName, buildCmd, false, "-q", "--force-rm", "--rm")
_, vstdout, vstderr, verr := buildImageWithStdoutStderr(testName, buildCmd, false, "--force-rm", "--rm")
if verr == nil || qerr == nil {
c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", testName))
}
if qstderr != vstdout+vstderr {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, qstderr, vstdout+vstderr))
}
}
func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) {
// This test makes sure that -q works correctly when build fails by
// comparing between the stderr output in quiet mode and in stdout
// and stderr output in verbose mode
tt := []struct {
TestName string
BuildCmds string
}{
{"quiet_build_no_from_at_the_beginning", "RUN whoami"},
{"quiet_build_unknown_instr", "FROMD busybox"},
}
for _, te := range tt {
_, _, qstderr, qerr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "-q", "--force-rm", "--rm")
_, vstdout, vstderr, verr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "--force-rm", "--rm")
if verr == nil || qerr == nil {
c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", te.TestName))
}
if qstderr != vstdout+vstderr {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout+vstderr))
}
}
}
func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) {
// This test ensures that when given a wrong URL, stderr in quiet mode and
// stderr in verbose mode are identical.
// TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout
URL := "http://something.invalid"
Name := "quiet_build_wrong_remote"
_, _, qstderr, qerr := buildImageWithStdoutStderr(Name, "", false, "-q", "--force-rm", "--rm", URL)
_, _, vstderr, verr := buildImageWithStdoutStderr(Name, "", false, "--force-rm", "--rm", URL)
if qerr == nil || verr == nil {
c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", Name))
}
if qstderr != vstderr {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", Name, qstderr, vstderr))
}
}
func (s *DockerSuite) TestBuildStderr(c *check.C) {
// This test just makes sure that no non-error output goes
// to stderr
name := "testbuildstderr"
_, _, stderr, err := buildImageWithStdoutStderr(name,
"FROM busybox\nRUN echo one", true)
if err != nil {
c.Fatal(err)
}
if runtime.GOOS == "windows" &&
daemonPlatform != "windows" {
// Windows to non-Windows should have a security warning
if !strings.Contains(stderr, "SECURITY WARNING:") {
c.Fatalf("Stderr contains unexpected output: %q", stderr)
}
} else {
// Other platform combinations should have no stderr written too
if stderr != "" {
c.Fatalf("Stderr should have been empty, instead it's: %q", stderr)
}
}
}
func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) {
testRequires(c, UnixCli) // test uses chown: not available on windows
testRequires(c, DaemonIsLinux)
name := "testbuildchownsinglefile"
ctx, err := fakeContext(`
FROM busybox
COPY test /
RUN ls -l /test
RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ]
`, map[string]string{
"test": "test",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil {
c.Fatal(err)
}
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) {
name := "testbuildsymlinkbreakout"
tmpdir, err := ioutil.TempDir("", name)
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(`
from busybox
add symlink.tar /
add inject /symlink/
`), 0644); err != nil {
c.Fatal(err)
}
inject := filepath.Join(ctx, "inject")
if err := ioutil.WriteFile(inject, nil, 0644); err != nil {
c.Fatal(err)
}
f, err := os.Create(filepath.Join(ctx, "symlink.tar"))
if err != nil {
c.Fatal(err)
}
w := tar.NewWriter(f)
w.WriteHeader(&tar.Header{
Name: "symlink2",
Typeflag: tar.TypeSymlink,
Linkname: "/../../../../../../../../../../../../../../",
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.WriteHeader(&tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: filepath.Join("symlink2", tmpdir),
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.Close()
f.Close()
if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil {
c.Fatal(err)
}
if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil {
c.Fatal("symlink breakout - inject")
} else if !os.IsNotExist(err) {
c.Fatalf("unexpected error: %v", err)
}
}
func (s *DockerSuite) TestBuildXZHost(c *check.C) {
// /usr/local/sbin/xz gets permission denied for the user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildxzhost"
ctx, err := fakeContext(`
FROM busybox
ADD xz /usr/local/sbin/
RUN chmod 755 /usr/local/sbin/xz
ADD test.xz /
RUN [ ! -e /injected ]`,
map[string]string{
"test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" +
"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" +
"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21",
"xz": "#!/bin/sh\ntouch /injected",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) {
// /foo/file gets permission denied for the user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127
var (
name = "testbuildvolumescontent"
expected = "some text"
volName = "/foo"
)
if daemonPlatform == "windows" {
volName = "C:/foo"
}
ctx, err := fakeContext(`
FROM busybox
COPY content /foo/file
VOLUME `+volName+`
CMD cat /foo/file`,
map[string]string{
"content": expected,
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, false); err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--rm", name)
if out != expected {
c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out)
}
}
func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) {
ctx, err := fakeContext(`FROM busybox
RUN echo from Dockerfile`,
map[string]string{
"Dockerfile": "FROM busybox\nRUN echo from Dockerfile",
"files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile",
"files/dFile": "FROM busybox\nRUN echo from files/dFile",
"dFile": "FROM busybox\nRUN echo from dFile",
"files/dFile2": "FROM busybox\nRUN echo from files/dFile2",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".")
if err != nil {
c.Fatalf("Failed to build: %s\n%s", out, err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("test1 should have used Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "from files/Dockerfile") {
c.Fatalf("test2 should have used files/Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "from files/dFile") {
c.Fatalf("test3 should have used files/dFile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "from dFile") {
c.Fatalf("test4 should have used dFile, output:%s", out)
}
dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5")
c.Assert(err, check.IsNil)
nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile")
if _, err = os.Create(nonDockerfileFile); err != nil {
c.Fatal(err)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".")
if err == nil {
c.Fatalf("test5 was supposed to fail to find passwd")
}
if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) {
c.Fatalf("wrong error message:%v\nexpected to contain=%v", out, expected)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..")
if err != nil {
c.Fatalf("test6 failed: %s", err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("test6 should have used root Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..")
if err != nil {
c.Fatalf("test7 failed: %s", err)
}
if !strings.Contains(out, "from files/Dockerfile") {
c.Fatalf("test7 should have used files Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".")
if err == nil || !strings.Contains(out, "must be within the build context") {
c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err)
}
tmpDir := os.TempDir()
out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir)
if err != nil {
c.Fatalf("test9 - failed: %s", err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("test9 should have used root Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".")
if err != nil {
c.Fatalf("test10 should have worked: %s", err)
}
if !strings.Contains(out, "from files/dFile2") {
c.Fatalf("test10 should have used files/dFile2, output:%s", out)
}
}
func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) {
testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows
testRequires(c, DaemonIsLinux)
ctx, err := fakeContext(`FROM busybox
RUN echo from dockerfile`,
map[string]string{
"dockerfile": "FROM busybox\nRUN echo from dockerfile",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".")
if err != nil {
c.Fatalf("Failed to build: %s\n%s", out, err)
}
if !strings.Contains(out, "from dockerfile") {
c.Fatalf("Missing proper output: %s", out)
}
}
func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) {
testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows
testRequires(c, DaemonIsLinux)
ctx, err := fakeContext(`FROM busybox
RUN echo from Dockerfile`,
map[string]string{
"dockerfile": "FROM busybox\nRUN echo from dockerfile",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".")
if err != nil {
c.Fatalf("Failed to build: %s\n%s", out, err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("Missing proper output: %s", out)
}
}
func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) {
server, err := fakeStorage(map[string]string{"baz": `FROM busybox
RUN echo from baz
COPY * /tmp/
RUN find /tmp/`})
if err != nil {
c.Fatal(err)
}
defer server.Close()
ctx, err := fakeContext(`FROM busybox
RUN echo from Dockerfile`,
map[string]string{})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
// Make sure that -f is ignored and that we don't use the Dockerfile
// that's in the current dir
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz")
if err != nil {
c.Fatalf("Failed to build: %s\n%s", out, err)
}
if !strings.Contains(out, "from baz") ||
strings.Contains(out, "/tmp/baz") ||
!strings.Contains(out, "/tmp/Dockerfile") {
c.Fatalf("Missing proper output: %s", out)
}
}
func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why
ctx, err := fakeContext(`FROM busybox
RUN echo "from Dockerfile"`,
map[string]string{})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
// Make sure that -f is ignored and that we don't use the Dockerfile
// that's in the current dir
dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-")
dockerCommand.Dir = ctx.Dir
dockerCommand.Stdin = strings.NewReader(`FROM busybox
RUN echo "from baz"
COPY * /tmp/
RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`)
out, status, err := runCommandWithOutput(dockerCommand)
if err != nil || status != 0 {
c.Fatalf("Error building: %s", err)
}
if !strings.Contains(out, "from baz") ||
strings.Contains(out, "/tmp/baz") ||
!strings.Contains(out, "/tmp/Dockerfile") {
c.Fatalf("Missing proper output: %s", out)
}
}
func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) {
name := "testbuildfromofficial"
fromNames := []string{
"busybox",
"docker.io/busybox",
"index.docker.io/busybox",
"library/busybox",
"docker.io/library/busybox",
"index.docker.io/library/busybox",
}
for idx, fromName := range fromNames {
imgName := fmt.Sprintf("%s%d", name, idx)
_, err := buildImage(imgName, "FROM "+fromName, true)
if err != nil {
c.Errorf("Build failed using FROM %s: %s", fromName, err)
}
deleteImages(imgName)
}
}
func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) {
testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2)
testRequires(c, DaemonIsLinux)
name := "testbuilddockerfileoutsidecontext"
tmpdir, err := ioutil.TempDir("", name)
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil {
c.Fatal(err)
}
wd, err := os.Getwd()
if err != nil {
c.Fatal(err)
}
defer os.Chdir(wd)
if err := os.Chdir(ctx); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil {
c.Fatal(err)
}
if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil {
c.Fatal(err)
}
if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil {
c.Fatal(err)
}
for _, dockerfilePath := range []string{
filepath.Join("..", "outsideDockerfile"),
filepath.Join(ctx, "dockerfile1"),
filepath.Join(ctx, "dockerfile2"),
} {
result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".")
c.Assert(result, icmd.Matches, icmd.Expected{
Err: "must be within the build context",
ExitCode: 1,
})
deleteImages(name)
}
os.Chdir(tmpdir)
// Path to Dockerfile should be resolved relative to working directory, not relative to context.
// There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail
out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx)
if err == nil {
c.Fatalf("Expected error. Out: %s", out)
}
}
func (s *DockerSuite) TestBuildSpaces(c *check.C) {
// Test to make sure that leading/trailing spaces on a command
// doesn't change the error msg we get
var (
err1 error
err2 error
)
name := "testspaces"
ctx, err := fakeContext("FROM busybox\nCOPY\n",
map[string]string{
"Dockerfile": "FROM busybox\nCOPY\n",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil {
c.Fatal("Build 1 was supposed to fail, but didn't")
}
ctx.Add("Dockerfile", "FROM busybox\nCOPY ")
if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil {
c.Fatal("Build 2 was supposed to fail, but didn't")
}
removeLogTimestamps := func(s string) string {
return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`)
}
// Skip over the times
e1 := removeLogTimestamps(err1.Error())
e2 := removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2)
}
ctx.Add("Dockerfile", "FROM busybox\n COPY")
if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil {
c.Fatal("Build 3 was supposed to fail, but didn't")
}
// Skip over the times
e1 = removeLogTimestamps(err1.Error())
e2 = removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2)
}
ctx.Add("Dockerfile", "FROM busybox\n COPY ")
if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil {
c.Fatal("Build 4 was supposed to fail, but didn't")
}
// Skip over the times
e1 = removeLogTimestamps(err1.Error())
e2 = removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2)
}
}
func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) {
// Test to make sure that spaces in quotes aren't lost
name := "testspacesquotes"
dockerfile := `FROM busybox
RUN echo " \
foo "`
_, out, err := buildImageWithOut(name, dockerfile, false)
if err != nil {
c.Fatal("Build failed:", err)
}
expecting := "\n foo \n"
// Windows uses the builtin echo, which preserves quotes
if daemonPlatform == "windows" {
expecting = "\" foo \""
}
if !strings.Contains(out, expecting) {
c.Fatalf("Bad output: %q expecting to contain %q", out, expecting)
}
}
// #4393
func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This should error out
buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-")
buildCmd.Stdin = strings.NewReader(`
FROM busybox
RUN touch /foo
VOLUME /foo
`)
out, _, err := runCommandWithOutput(buildCmd)
if err == nil || !strings.Contains(out, "file exists") {
c.Fatalf("expected build to fail when file exists in container at requested volume path")
}
}
func (s *DockerSuite) TestBuildMissingArgs(c *check.C) {
// Test to make sure that all Dockerfile commands (except the ones listed
// in skipCmds) will generate an error if no args are provided.
// Note: INSERT is deprecated so we exclude it because of that.
skipCmds := map[string]struct{}{
"CMD": {},
"RUN": {},
"ENTRYPOINT": {},
"INSERT": {},
}
if daemonPlatform == "windows" {
skipCmds = map[string]struct{}{
"CMD": {},
"RUN": {},
"ENTRYPOINT": {},
"INSERT": {},
"STOPSIGNAL": {},
"ARG": {},
"USER": {},
"EXPOSE": {},
}
}
for cmd := range command.Commands {
cmd = strings.ToUpper(cmd)
if _, ok := skipCmds[cmd]; ok {
continue
}
var dockerfile string
if cmd == "FROM" {
dockerfile = cmd
} else {
// Add FROM to make sure we don't complain about it missing
dockerfile = "FROM busybox\n" + cmd
}
ctx, err := fakeContext(dockerfile, map[string]string{})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
var out string
if out, err = buildImageFromContext("args", ctx, true); err == nil {
c.Fatalf("%s was supposed to fail. Out:%s", cmd, out)
}
if !strings.Contains(err.Error(), cmd+" requires") {
c.Fatalf("%s returned the wrong type of error:%s", cmd, err)
}
}
}
func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) {
testRequires(c, DaemonIsLinux)
_, out, err := buildImageWithOut("sc", "FROM scratch", true)
if err == nil {
c.Fatalf("Build was supposed to fail")
}
if !strings.Contains(out, "No image was generated") {
c.Fatalf("Wrong error message: %v", out)
}
}
func (s *DockerSuite) TestBuildDotDotFile(c *check.C) {
ctx, err := fakeContext("FROM busybox\n",
map[string]string{
"..gitme": "",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err = buildImageFromContext("sc", ctx, false); err != nil {
c.Fatalf("Build was supposed to work: %s", err)
}
}
func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) {
testRequires(c, DaemonIsLinux) // No hello-world Windows image
name := "testbuildrunonejson"
ctx, err := fakeContext(`FROM hello-world:frozen
RUN [ "/hello" ]`, map[string]string{})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", name, ".")
if err != nil {
c.Fatalf("failed to build the image: %s, %v", out, err)
}
if !strings.Contains(out, "Hello from Docker") {
c.Fatalf("bad output: %s", out)
}
}
func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) {
name := "testbuildemptystringvolume"
_, err := buildImage(name, `
FROM busybox
ENV foo=""
VOLUME $foo
`, false)
if err == nil {
c.Fatal("Should have failed to build")
}
}
func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) {
testRequires(c, SameHostDaemon)
testRequires(c, DaemonIsLinux)
cgroupParent := "test"
data, err := ioutil.ReadFile("/proc/self/cgroup")
if err != nil {
c.Fatalf("failed to read '/proc/self/cgroup - %v", err)
}
selfCgroupPaths := parseCgroupPaths(string(data))
_, found := selfCgroupPaths["memory"]
if !found {
c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths)
}
cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-")
cmd.Stdin = strings.NewReader(`
FROM busybox
RUN cat /proc/self/cgroup
`)
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), out)
c.Assert(err, check.IsNil)
if !m {
c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, out)
}
}
func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) {
// Check to make sure our build output prints the Dockerfile cmd
// property - there was a bug that caused it to be duplicated on the
// Step X line
name := "testbuildnodupoutput"
_, out, err := buildImageWithOut(name, `
FROM busybox
RUN env`, false)
if err != nil {
c.Fatalf("Build should have worked: %q", err)
}
exp := "\nStep 2/2 : RUN env\n"
if !strings.Contains(out, exp) {
c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp)
}
}
// GH15826
func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) {
// Explicit check to ensure that build starts from step 1 rather than 0
name := "testbuildstartsfromone"
_, out, err := buildImageWithOut(name, `
FROM busybox`, false)
if err != nil {
c.Fatalf("Build should have worked: %q", err)
}
exp := "\nStep 1/1 : FROM busybox\n"
if !strings.Contains(out, exp) {
c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp)
}
}
func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) {
// Test to make sure the bad command is quoted with just "s and
// not as a Go []string
name := "testbuildbadrunerrmsg"
_, out, err := buildImageWithOut(name, `
FROM busybox
RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3
if err == nil {
c.Fatal("Should have failed to build")
}
shell := "/bin/sh -c"
exitCode := "127"
if daemonPlatform == "windows" {
shell = "cmd /S /C"
// architectural - Windows has to start the container to determine the exe is bad, Linux does not
exitCode = "1"
}
exp := `The command '` + shell + ` badEXE a1 \& a2 a3' returned a non-zero code: ` + exitCode
if !strings.Contains(out, exp) {
c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp)
}
}
func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) {
repoName := s.setupTrustedImage(c, "trusted-build")
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, repoName)
name := "testtrustedbuild"
buildCmd := buildImageCmd(name, dockerFile, true)
s.trustedCmd(buildCmd)
out, _, err := runCommandWithOutput(buildCmd)
if err != nil {
c.Fatalf("Error running trusted build: %s\n%s", err, out)
}
if !strings.Contains(out, fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7])) {
c.Fatalf("Unexpected output on trusted build:\n%s", out)
}
// We should also have a tag reference for the image.
if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 {
c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out)
}
// We should now be able to remove the tag reference.
if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 {
c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out)
}
}
func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL)
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, repoName)
name := "testtrustedbuilduntrustedtag"
buildCmd := buildImageCmd(name, dockerFile, true)
s.trustedCmd(buildCmd)
out, _, err := runCommandWithOutput(buildCmd)
if err == nil {
c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out)
}
if !strings.Contains(out, "does not have trust data for") {
c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out)
}
}
func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) {
testRequires(c, DaemonIsLinux)
tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tempDir)
// Make a real context directory in this temp directory with a simple
// Dockerfile.
realContextDirname := filepath.Join(tempDir, "context")
if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil {
c.Fatal(err)
}
if err = ioutil.WriteFile(
filepath.Join(realContextDirname, "Dockerfile"),
[]byte(`
FROM busybox
RUN echo hello world
`),
os.FileMode(0644),
); err != nil {
c.Fatal(err)
}
// Make a symlink to the real context directory.
contextSymlinkName := filepath.Join(tempDir, "context_link")
if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil {
c.Fatal(err)
}
// Executing the build with the symlink as the specified context should
// *not* fail.
if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 {
c.Fatalf("build failed with exit status %d: %s", exitStatus, out)
}
}
func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) {
testRequires(c, NotaryHosting)
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
repoName := strings.TrimSuffix(latestTag, ":latest")
// Now create the releases role
s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public)
s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private)
s.notaryPublish(c, repoName)
// push a different tag to the releases role
otherTag := fmt.Sprintf("%s:other", repoName)
dockerCmd(c, "tag", "busybox", otherTag)
pushCmd := exec.Command(dockerBinary, "push", otherTag)
s.trustedCmd(pushCmd)
out, _, err := runCommandWithOutput(pushCmd)
c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out))
s.assertTargetInRoles(c, repoName, "other", "targets/releases")
s.assertTargetNotInRoles(c, repoName, "other", "targets")
out, status := dockerCmd(c, "rmi", otherTag)
c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out))
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, otherTag)
name := "testtrustedbuildreleasesrole"
buildCmd := buildImageCmd(name, dockerFile, true)
s.trustedCmd(buildCmd)
out, _, err = runCommandWithOutput(buildCmd)
c.Assert(err, check.IsNil, check.Commentf("Trusted build failed: %s", out))
c.Assert(out, checker.Contains, fmt.Sprintf("FROM %s@sha", repoName))
}
func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) {
testRequires(c, NotaryHosting)
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
repoName := strings.TrimSuffix(latestTag, ":latest")
// Now create a non-releases delegation role
s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public)
s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private)
s.notaryPublish(c, repoName)
// push a different tag to the other role
otherTag := fmt.Sprintf("%s:other", repoName)
dockerCmd(c, "tag", "busybox", otherTag)
pushCmd := exec.Command(dockerBinary, "push", otherTag)
s.trustedCmd(pushCmd)
out, _, err := runCommandWithOutput(pushCmd)
c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out))
s.assertTargetInRoles(c, repoName, "other", "targets/other")
s.assertTargetNotInRoles(c, repoName, "other", "targets")
out, status := dockerCmd(c, "rmi", otherTag)
c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out))
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, otherTag)
name := "testtrustedbuildotherrole"
buildCmd := buildImageCmd(name, dockerFile, true)
s.trustedCmd(buildCmd)
out, _, err = runCommandWithOutput(buildCmd)
c.Assert(err, check.NotNil, check.Commentf("Trusted build expected to fail: %s", out))
}
// Issue #15634: COPY fails when path starts with "null"
func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) {
name := "testbuildnullstringinaddcopyvolume"
volName := "nullvolume"
if daemonPlatform == "windows" {
volName = `C:\\nullvolume`
}
ctx, err := fakeContext(`
FROM busybox
ADD null /
COPY nullfile /
VOLUME `+volName+`
`,
map[string]string{
"null": "test1",
"nullfile": "test2",
},
)
c.Assert(err, check.IsNil)
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
c.Assert(err, check.IsNil)
}
func (s *DockerSuite) TestBuildStopSignal(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet
imgName := "test_build_stop_signal"
_, err := buildImage(imgName,
`FROM busybox
STOPSIGNAL SIGKILL`,
true)
c.Assert(err, check.IsNil)
res := inspectFieldJSON(c, imgName, "Config.StopSignal")
if res != `"SIGKILL"` {
c.Fatalf("Signal %s, expected SIGKILL", res)
}
containerName := "test-container-stop-signal"
dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top")
res = inspectFieldJSON(c, containerName, "Config.StopSignal")
if res != `"SIGKILL"` {
c.Fatalf("Signal %s, expected SIGKILL", res)
}
}
func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
args := []string{"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)}
var dockerfile string
if daemonPlatform == "windows" {
// Bugs in Windows busybox port - use the default base image and native cmd stuff
dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+`
ARG %s
RUN echo %%%s%%
CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey)
} else {
dockerfile = fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s
CMD echo $%s`, envKey, envKey, envKey)
}
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal)
}
containerName := "bldargCont"
out, _ := dockerCmd(c, "run", "--name", containerName, imgName)
out = strings.Trim(out, " \r\n'")
if out != "" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envDef := "bar1"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=%s`, envKey, envDef)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal)
}
out, _ := dockerCmd(c, "history", "--no-trunc", imgName)
outputTabs := strings.Split(out, "\n")[1]
if !strings.Contains(outputTabs, envDef) {
c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s`, envKey, envKey)
origImgID := ""
var err error
if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil {
c.Fatal(err)
}
imgNameCache := "bldargtestcachehit"
if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID != origImgID {
if err != nil {
c.Fatal(err)
}
c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
extraEnvKey := "foo1"
extraEnvVal := "bar1"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ARG %s
RUN echo $%s`, envKey, extraEnvKey, envKey)
origImgID := ""
var err error
if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil {
c.Fatal(err)
}
imgNameCache := "bldargtestcachemiss"
args = append(args, "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal))
if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID {
if err != nil {
c.Fatal(err)
}
c.Fatalf("build used cache, expected a miss!")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
newEnvVal := "bar1"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s`, envKey, envKey)
origImgID := ""
var err error
if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil {
c.Fatal(err)
}
imgNameCache := "bldargtestcachemiss"
args = []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal),
}
if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID {
if err != nil {
c.Fatal(err)
}
c.Fatalf("build used cache, expected a miss!")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ENV %s %s
RUN echo $%s
CMD echo $%s
`, envKey, envKey, envValOveride, envKey, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ENV %s %s
ARG %s
RUN echo $%s
CMD echo $%s
`, envKey, envValOveride, envKey, envKey, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldvarstest"
wdVar := "WDIR"
wdVal := "/tmp/"
addVar := "AFILE"
addVal := "addFile"
copyVar := "CFILE"
copyVal := "copyFile"
envVar := "foo"
envVal := "bar"
exposeVar := "EPORT"
exposeVal := "9999"
userVar := "USER"
userVal := "testUser"
volVar := "VOL"
volVal := "/testVol/"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal),
"--build-arg", fmt.Sprintf("%s=%s", addVar, addVal),
"--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal),
"--build-arg", fmt.Sprintf("%s=%s", envVar, envVal),
"--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal),
"--build-arg", fmt.Sprintf("%s=%s", userVar, userVal),
"--build-arg", fmt.Sprintf("%s=%s", volVar, volVal),
}
ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
ARG %s
WORKDIR ${%s}
ARG %s
ADD ${%s} testDir/
ARG %s
COPY $%s testDir/
ARG %s
ENV %s=${%s}
ARG %s
EXPOSE $%s
ARG %s
USER $%s
ARG %s
VOLUME ${%s}`,
wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar,
envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar),
map[string]string{
addVal: "some stuff",
copyVal: "some stuff",
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(imgName, ctx, true, args...); err != nil {
c.Fatal(err)
}
var resMap map[string]interface{}
var resArr []string
res := ""
res = inspectField(c, imgName, "Config.WorkingDir")
if res != filepath.ToSlash(filepath.Clean(wdVal)) {
c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res)
}
inspectFieldAndMarshall(c, imgName, "Config.Env", &resArr)
found := false
for _, v := range resArr {
if fmt.Sprintf("%s=%s", envVar, envVal) == v {
found = true
break
}
}
if !found {
c.Fatalf("Config.Env value mismatch. Expected <key=value> to exist: %s=%s, got: %v",
envVar, envVal, resArr)
}
inspectFieldAndMarshall(c, imgName, "Config.ExposedPorts", &resMap)
if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok {
c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap)
}
res = inspectField(c, imgName, "Config.User")
if res != userVal {
c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res)
}
inspectFieldAndMarshall(c, imgName, "Config.Volumes", &resMap)
if _, ok := resMap[volVal]; !ok {
c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldvarstest"
envKey := "foo"
envVal := "bar"
envKey1 := "foo1"
envValOveride := "barOverride"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ENV %s %s
ENV %s ${%s}
RUN echo $%s
CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
ARG %s
CMD echo $%s`, envKey, envKey, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Contains(out, envVal) {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("able to access environment variable in output: %q expected to be missing", out)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support --build-arg
imgName := "bldargtest"
envKey := "HTTP_PROXY"
envVal := "bar"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
CMD echo $%s`, envKey, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride),
}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=%s
ENV %s $%s
RUN echo $%s
CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 1 {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
args := []string{
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
}
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
CMD echo $%s`, envKey, envKey)
errStr := "One or more build-args"
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err == nil {
c.Fatalf("build succeeded, expected to fail. Output: %v", out)
} else if !strings.Contains(out, errStr) {
c.Fatalf("Unexpected error. output: %q, expected error: %q", out, errStr)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envKey1 := "foo1"
envKey2 := "foo2"
envKey3 := "foo3"
args := []string{}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=""
ARG %s=''
ARG %s="''"
ARG %s='""'
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3,
envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3,
envKey2, envKey3)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envKey1 := "foo1"
envKey2 := "foo2"
args := []string{}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=
ARG %s=""
ARG %s=''
RUN [ "$%s" == "$%s" ]
RUN [ "$%s" == "$%s" ]
RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
args := []string{}
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN env`, envKey)
if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envKey) != 1 {
if err != nil {
c.Fatalf("build failed to complete: %q %q", out, err)
}
c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", out)
}
}
func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) {
volName := "testname:/foo"
if daemonPlatform == "windows" {
volName = "testname:C:\\foo"
}
dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops")
dockerFile := `FROM busybox
VOLUME ` + volName + `
RUN ls /foo/oops
`
_, err := buildImage("test", dockerFile, false)
c.Assert(err, check.NotNil, check.Commentf("image build should have failed"))
}
func (s *DockerSuite) TestBuildTagEvent(c *check.C) {
since := daemonUnixTime(c)
dockerFile := `FROM busybox
RUN echo events
`
_, err := buildImage("test", dockerFile, false)
c.Assert(err, check.IsNil)
until := daemonUnixTime(c)
out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image")
events := strings.Split(strings.TrimSpace(out), "\n")
actions := eventActionsByIDAndType(c, events, "test:latest", "image")
var foundTag bool
for _, a := range actions {
if a == "tag" {
foundTag = true
break
}
}
c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out))
}
// #15780
func (s *DockerSuite) TestBuildMultipleTags(c *check.C) {
dockerfile := `
FROM busybox
MAINTAINER test-15780
`
cmd := exec.Command(dockerBinary, "build", "-t", "tag1", "-t", "tag2:v2",
"-t", "tag1:latest", "-t", "tag1", "--no-cache", "-")
cmd.Stdin = strings.NewReader(dockerfile)
_, err := runCommand(cmd)
c.Assert(err, check.IsNil)
id1, err := getIDByName("tag1")
c.Assert(err, check.IsNil)
id2, err := getIDByName("tag2:v2")
c.Assert(err, check.IsNil)
c.Assert(id1, check.Equals, id2)
}
// #17290
func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) {
name := "testbuildbrokensymlink"
ctx, err := fakeContext(`
FROM busybox
COPY . ./`,
map[string]string{
"foo": "bar",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
err = os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
// warm up cache
_, err = buildImageFromContext(name, ctx, true)
c.Assert(err, checker.IsNil)
// add new file to context, should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644)
c.Assert(err, checker.IsNil)
_, out, err := buildImageFromContextWithOut(name, ctx, true)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Not(checker.Contains), "Using cache")
}
func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) {
name := "testbuildbrokensymlink"
ctx, err := fakeContext(`
FROM busybox
COPY asymlink target`,
map[string]string{
"foo": "bar",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
id, err := buildImageFromContext(name, ctx, true)
c.Assert(err, checker.IsNil)
out, _ := dockerCmd(c, "run", "--rm", id, "cat", "target")
c.Assert(out, checker.Matches, "bar")
// change target file should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644)
c.Assert(err, checker.IsNil)
id, out, err = buildImageFromContextWithOut(name, ctx, true)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Not(checker.Contains), "Using cache")
out, _ = dockerCmd(c, "run", "--rm", id, "cat", "target")
c.Assert(out, checker.Matches, "baz")
}
func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) {
name := "testbuildbrokensymlink"
ctx, err := fakeContext(`
FROM busybox
COPY asymlink /`,
map[string]string{
"foo/abc": "bar",
"foo/def": "baz",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
id, err := buildImageFromContext(name, ctx, true)
c.Assert(err, checker.IsNil)
out, _ := dockerCmd(c, "run", "--rm", id, "cat", "abc", "def")
c.Assert(out, checker.Matches, "barbaz")
// change target file should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644)
c.Assert(err, checker.IsNil)
id, out, err = buildImageFromContextWithOut(name, ctx, true)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Not(checker.Contains), "Using cache")
out, _ = dockerCmd(c, "run", "--rm", id, "cat", "abc", "def")
c.Assert(out, checker.Matches, "barbax")
}
// TestBuildSymlinkBasename tests that target file gets basename from symlink,
// not from the target file.
func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) {
name := "testbuildbrokensymlink"
ctx, err := fakeContext(`
FROM busybox
COPY asymlink /`,
map[string]string{
"foo": "bar",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
id, err := buildImageFromContext(name, ctx, true)
c.Assert(err, checker.IsNil)
out, _ := dockerCmd(c, "run", "--rm", id, "cat", "asymlink")
c.Assert(out, checker.Matches, "bar")
}
// #17827
func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) {
name := "testbuildrootsource"
ctx, err := fakeContext(`
FROM busybox
COPY / /data`,
map[string]string{
"foo": "bar",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
// warm up cache
_, err = buildImageFromContext(name, ctx, true)
c.Assert(err, checker.IsNil)
// change file, should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644)
c.Assert(err, checker.IsNil)
_, out, err := buildImageFromContextWithOut(name, ctx, true)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Not(checker.Contains), "Using cache")
}
// #19375
func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) {
cmd := exec.Command(dockerBinary, "build", "github.com/docker/v1.10-migrator.git")
cmd.Env = append(cmd.Env, "PATH=")
out, _, err := runCommandWithOutput(cmd)
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ")
cmd = exec.Command(dockerBinary, "build", "https://github.com/docker/v1.10-migrator.git")
cmd.Env = append(cmd.Env, "PATH=")
out, _, err = runCommandWithOutput(cmd)
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ")
}
// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir
func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildworkdirwindowspath"
_, err := buildImage(name, `
FROM `+WindowsBaseImage+`
RUN mkdir C:\\work
WORKDIR C:\\work
RUN if "%CD%" NEQ "C:\work" exit -1
`, true)
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestBuildLabel(c *check.C) {
name := "testbuildlabel"
testLabel := "foo"
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
LABEL default foo
`, false, "--label", testLabel)
c.Assert(err, checker.IsNil)
res := inspectFieldJSON(c, name, "Config.Labels")
var labels map[string]string
if err := json.Unmarshal([]byte(res), &labels); err != nil {
c.Fatal(err)
}
if _, ok := labels[testLabel]; !ok {
c.Fatal("label not found in image")
}
}
func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) {
name := "testbuildlabel"
_, err := buildImage(name, "FROM busybox", false, "--label", "foo=bar")
c.Assert(err, checker.IsNil)
res, err := inspectImage(name, "json .Config.Labels")
c.Assert(err, checker.IsNil)
var labels map[string]string
if err := json.Unmarshal([]byte(res), &labels); err != nil {
c.Fatal(err)
}
v, ok := labels["foo"]
if !ok {
c.Fatal("label `foo` not found in image")
}
c.Assert(v, checker.Equals, "bar")
}
func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) {
name := "testbuildlabelcachecommit"
testLabel := "foo"
if _, err := buildImage(name, `
FROM `+minimalBaseImage()+`
LABEL default foo
`, false); err != nil {
c.Fatal(err)
}
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
LABEL default foo
`, true, "--label", testLabel)
c.Assert(err, checker.IsNil)
res := inspectFieldJSON(c, name, "Config.Labels")
var labels map[string]string
if err := json.Unmarshal([]byte(res), &labels); err != nil {
c.Fatal(err)
}
if _, ok := labels[testLabel]; !ok {
c.Fatal("label not found in image")
}
}
func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) {
name := "testbuildlabelmultiple"
testLabels := map[string]string{
"foo": "bar",
"123": "456",
}
labelArgs := []string{}
for k, v := range testLabels {
labelArgs = append(labelArgs, "--label", k+"="+v)
}
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
LABEL default foo
`, false, labelArgs...)
if err != nil {
c.Fatal("error building image with labels", err)
}
res := inspectFieldJSON(c, name, "Config.Labels")
var labels map[string]string
if err := json.Unmarshal([]byte(res), &labels); err != nil {
c.Fatal(err)
}
for k, v := range testLabels {
if x, ok := labels[k]; !ok || x != v {
c.Fatalf("label %s=%s not found in image", k, v)
}
}
}
func (s *DockerSuite) TestBuildLabelOverwrite(c *check.C) {
name := "testbuildlabeloverwrite"
testLabel := "foo"
testValue := "bar"
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
LABEL `+testLabel+`+ foo
`, false, []string{"--label", testLabel + "=" + testValue}...)
if err != nil {
c.Fatal("error building image with labels", err)
}
res := inspectFieldJSON(c, name, "Config.Labels")
var labels map[string]string
if err := json.Unmarshal([]byte(res), &labels); err != nil {
c.Fatal(err)
}
v, ok := labels[testLabel]
if !ok {
c.Fatal("label not found in image")
}
if v != testValue {
c.Fatal("label not overwritten")
}
}
func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) {
dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL)
baseImage := privateRegistryURL + "/baseimage"
_, err := buildImage(baseImage, `
FROM busybox
ENV env1 val1
`, true)
c.Assert(err, checker.IsNil)
dockerCmd(c, "push", baseImage)
dockerCmd(c, "rmi", baseImage)
_, err = buildImage(baseImage, fmt.Sprintf(`
FROM %s
ENV env2 val2
`, baseImage), true)
c.Assert(err, checker.IsNil)
}
func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) {
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
workingDir, err := os.Getwd()
c.Assert(err, checker.IsNil)
absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth"))
c.Assert(err, checker.IsNil)
testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute)
os.Setenv("PATH", testPath)
repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL)
tmp, err := ioutil.TempDir("", "integration-cli-")
c.Assert(err, checker.IsNil)
externalAuthConfig := `{ "credsStore": "shell-test" }`
configPath := filepath.Join(tmp, "config.json")
err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644)
c.Assert(err, checker.IsNil)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL)
b, err := ioutil.ReadFile(configPath)
c.Assert(err, checker.IsNil)
c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":")
dockerCmd(c, "--config", tmp, "tag", "busybox", repoName)
dockerCmd(c, "--config", tmp, "push", repoName)
// make sure the image is pulled when building
dockerCmd(c, "rmi", repoName)
buildCmd := exec.Command(dockerBinary, "--config", tmp, "build", "-")
buildCmd.Stdin = strings.NewReader(fmt.Sprintf("FROM %s", repoName))
out, _, err := runCommandWithOutput(buildCmd)
c.Assert(err, check.IsNil, check.Commentf(out))
}
// Test cases in #22036
func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) {
// Command line option labels will always override
name := "scratchy"
expected := `{"bar":"from-flag","foo":"from-flag"}`
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
LABEL foo=from-dockerfile`,
true, "--label", "foo=from-flag", "--label", "bar=from-flag")
c.Assert(err, check.IsNil)
res := inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
name = "from"
expected = `{"foo":"from-dockerfile"}`
_, err = buildImage(name,
`FROM `+minimalBaseImage()+`
LABEL foo from-dockerfile`,
true)
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option label will override even via `FROM`
name = "new"
expected = `{"bar":"from-dockerfile2","foo":"new"}`
_, err = buildImage(name,
`FROM from
LABEL bar from-dockerfile2`,
true, "--label", "foo=new")
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option without a value set (--label foo, --label bar=)
// will be treated as --label foo="", --label bar=""
name = "scratchy2"
expected = `{"bar":"","foo":""}`
_, err = buildImage(name,
`FROM `+minimalBaseImage()+`
LABEL foo=from-dockerfile`,
true, "--label", "foo", "--label", "bar=")
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option without a value set (--label foo, --label bar=)
// will be treated as --label foo="", --label bar=""
// This time is for inherited images
name = "new2"
expected = `{"bar":"","foo":""}`
_, err = buildImage(name,
`FROM from
LABEL bar from-dockerfile2`,
true, "--label", "foo=", "--label", "bar")
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option labels with only `FROM`
name = "scratchy"
expected = `{"bar":"from-flag","foo":"from-flag"}`
_, err = buildImage(name,
`FROM `+minimalBaseImage(),
true, "--label", "foo=from-flag", "--label", "bar=from-flag")
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option labels with env var
name = "scratchz"
expected = `{"bar":"$PATH"}`
_, err = buildImage(name,
`FROM `+minimalBaseImage(),
true, "--label", "bar=$PATH")
c.Assert(err, check.IsNil)
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
}
// Test case for #22855
func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) {
name := "test-delete-committed-file"
_, err := buildImage(name,
`FROM busybox
RUN echo test > file
RUN test -e file
RUN rm file
RUN sh -c "! test -e file"`, false)
if err != nil {
c.Fatal(err)
}
}
// #20083
func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) {
// TODO Windows: Figure out why this test is flakey on TP5. If you add
// something like RUN sleep 5, or even RUN ls /tmp after the ADD line,
// it is more reliable, but that's not a good fix.
testRequires(c, DaemonIsLinux)
name := "testbuilddockerignorecleanpaths"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "(ls -la /tmp/#1)"
RUN sh -c "(! ls -la /tmp/#2)"
RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"`
ctx, err := fakeContext(dockerfile, map[string]string{
"foo": "foo",
"foo2": "foo2",
"dir1/foo": "foo in dir1",
"#1": "# file 1",
"#2": "# file 2",
".dockerignore": `# Visual C++ cache files
# because we have git ;-)
# The above comment is from #20083
foo
#dir1/foo
foo2
# The following is considered as comment as # is at the beginning
#1
# The following is not considered as comment as # is not at the beginning
#2
`,
})
if err != nil {
c.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
c.Fatal(err)
}
}
// Test case for #23221
func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) {
name := "test-with-utf8-bom"
dockerfile := []byte(`FROM busybox`)
bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...)
ctx, err := fakeContextFromNewTempDir()
c.Assert(err, check.IsNil)
defer ctx.Close()
err = ctx.addFile("Dockerfile", bomDockerfile)
c.Assert(err, check.IsNil)
_, err = buildImageFromContext(name, ctx, true)
c.Assert(err, check.IsNil)
}
// Test case for UTF-8 BOM in .dockerignore, related to #23221
func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) {
name := "test-with-utf8-bom-dockerignore"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN ls -la /tmp
RUN sh -c "! ls /tmp/Dockerfile"
RUN ls /tmp/.dockerignore`
dockerignore := []byte("./Dockerfile\n")
bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...)
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": dockerfile,
})
c.Assert(err, check.IsNil)
defer ctx.Close()
err = ctx.addFile(".dockerignore", bomDockerignore)
c.Assert(err, check.IsNil)
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
c.Fatal(err)
}
}
// #22489 Shell test to confirm config gets updated correctly
func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) {
name := "testbuildshellupdatesconfig"
expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]`
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
SHELL ["foo", "-bar"]`,
true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "ContainerConfig.Cmd")
if res != expected {
c.Fatalf("%s, expected %s", res, expected)
}
res = inspectFieldJSON(c, name, "ContainerConfig.Shell")
if res != `["foo","-bar"]` {
c.Fatalf(`%s, expected ["foo","-bar"]`, res)
}
}
// #22489 Changing the shell multiple times and CMD after.
func (s *DockerSuite) TestBuildShellMultiple(c *check.C) {
name := "testbuildshellmultiple"
_, out, _, err := buildImageWithStdoutStderr(name,
`FROM busybox
RUN echo defaultshell
SHELL ["echo"]
RUN echoshell
SHELL ["ls"]
RUN -l
CMD -l`,
true)
if err != nil {
c.Fatal(err)
}
// Must contain 'defaultshell' twice
if len(strings.Split(out, "defaultshell")) != 3 {
c.Fatalf("defaultshell should have appeared twice in %s", out)
}
// Must contain 'echoshell' twice
if len(strings.Split(out, "echoshell")) != 3 {
c.Fatalf("echoshell should have appeared twice in %s", out)
}
// Must contain "total " (part of ls -l)
if !strings.Contains(out, "total ") {
c.Fatalf("%s should have contained 'total '", out)
}
// A container started from the image uses the shell-form CMD.
// Last shell is ls. CMD is -l. So should contain 'total '.
outrun, _ := dockerCmd(c, "run", "--rm", name)
if !strings.Contains(outrun, "total ") {
c.Fatalf("Expected started container to run ls -l. %s", outrun)
}
}
// #22489. Changed SHELL with ENTRYPOINT
func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) {
name := "testbuildshellentrypoint"
_, err := buildImage(name,
`FROM busybox
SHELL ["ls"]
ENTRYPOINT -l`,
true)
if err != nil {
c.Fatal(err)
}
// A container started from the image uses the shell-form ENTRYPOINT.
// Shell is ls. ENTRYPOINT is -l. So should contain 'total '.
outrun, _ := dockerCmd(c, "run", "--rm", name)
if !strings.Contains(outrun, "total ") {
c.Fatalf("Expected started container to run ls -l. %s", outrun)
}
}
// #22489 Shell test to confirm shell is inherited in a subsequent build
func (s *DockerSuite) TestBuildShellInherited(c *check.C) {
name1 := "testbuildshellinherited1"
_, err := buildImage(name1,
`FROM busybox
SHELL ["ls"]`,
true)
if err != nil {
c.Fatal(err)
}
name2 := "testbuildshellinherited2"
_, out, _, err := buildImageWithStdoutStderr(name2,
`FROM `+name1+`
RUN -l`,
true)
if err != nil {
c.Fatal(err)
}
// ls -l has "total " followed by some number in it, ls without -l does not.
if !strings.Contains(out, "total ") {
c.Fatalf("Should have seen total in 'ls -l'.\n%s", out)
}
}
// #22489 Shell test to confirm non-JSON doesn't work
func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) {
name := "testbuildshellnotjson"
_, err := buildImage(name,
`FROM `+minimalBaseImage()+`
sHeLl exec -form`, // Casing explicit to ensure error is upper-cased.
true)
if err == nil {
c.Fatal("Image build should have failed")
}
if !strings.Contains(err.Error(), "SHELL requires the arguments to be in JSON form") {
c.Fatal("Error didn't indicate that arguments must be in JSON form")
}
}
// #22489 Windows shell test to confirm native is powershell if executing a PS command
// This would error if the default shell were still cmd.
func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildshellpowershell"
_, out, err := buildImageWithOut(name,
`FROM `+minimalBaseImage()+`
SHELL ["powershell", "-command"]
RUN Write-Host John`,
true)
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "\nJohn\n") {
c.Fatalf("Line with 'John' not found in output %q", out)
}
}
// Verify that escape is being correctly applied to words when escape directive is not \.
// Tests WORKDIR, ADD
func (s *DockerSuite) TestBuildEscapeNotBackslashWordTest(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildescapenotbackslashwordtesta"
_, out, err := buildImageWithOut(name,
`# escape= `+"`"+`
FROM `+minimalBaseImage()+`
WORKDIR c:\windows
RUN dir /w`,
true)
if err != nil {
c.Fatal(err)
}
if !strings.Contains(strings.ToLower(out), "[system32]") {
c.Fatalf("Line with '[windows]' not found in output %q", out)
}
name = "testbuildescapenotbackslashwordtestb"
_, out, err = buildImageWithOut(name,
`# escape= `+"`"+`
FROM `+minimalBaseImage()+`
SHELL ["powershell.exe"]
WORKDIR c:\foo
ADD Dockerfile c:\foo\
RUN dir Dockerfile`,
true)
if err != nil {
c.Fatal(err)
}
if !strings.Contains(strings.ToLower(out), "-a----") {
c.Fatalf("Line with '-a----' not found in output %q", out)
}
}
// #22868. Make sure shell-form CMD is marked as escaped in the config of the image
func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildcmdshellescaped"
_, err := buildImage(name, `
FROM `+minimalBaseImage()+`
CMD "ipconfig"
`, true)
if err != nil {
c.Fatal(err)
}
res := inspectFieldJSON(c, name, "Config.ArgsEscaped")
if res != "true" {
c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res)
}
dockerCmd(c, "run", "--name", "inspectme", name)
dockerCmd(c, "wait", "inspectme")
res = inspectFieldJSON(c, name, "Config.Cmd")
if res != `["cmd","/S","/C","\"ipconfig\""]` {
c.Fatalf("CMD was not escaped Config.Cmd: got %v", res)
}
}
func (s *DockerSuite) TestContinueCharSpace(c *check.C) {
// Test to make sure that we don't treat a \ as a continuation
// character IF there are spaces (or tabs) after it on the same line
name := "testbuildcont"
_, err := buildImage(name, "FROM busybox\nRUN echo hi \\\t\nbye", true)
c.Assert(err, check.NotNil, check.Commentf("Build 1 should fail - didn't"))
_, err = buildImage(name, "FROM busybox\nRUN echo hi \\ \nbye", true)
c.Assert(err, check.NotNil, check.Commentf("Build 2 should fail - didn't"))
}
// Test case for #24912.
func (s *DockerSuite) TestBuildStepsWithProgress(c *check.C) {
name := "testbuildstepswithprogress"
totalRun := 5
_, out, err := buildImageWithOut(name, "FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun), true)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun))
for i := 2; i <= 1+totalRun; i++ {
c.Assert(out, checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun))
}
}
func (s *DockerSuite) TestBuildWithFailure(c *check.C) {
name := "testbuildwithfailure"
// First test case can only detect `nobody` in runtime so all steps will show up
buildCmd := "FROM busybox\nRUN nobody"
_, stdout, _, err := buildImageWithStdoutStderr(name, buildCmd, false, "--force-rm", "--rm")
c.Assert(err, checker.NotNil)
c.Assert(stdout, checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(stdout, checker.Contains, "Step 2/2 : RUN nobody")
// Second test case `FFOM` should have been detected before build runs so no steps
buildCmd = "FFOM nobody\nRUN nobody"
_, stdout, _, err = buildImageWithStdoutStderr(name, buildCmd, false, "--force-rm", "--rm")
c.Assert(err, checker.NotNil)
c.Assert(stdout, checker.Not(checker.Contains), "Step 1/2 : FROM busybox")
c.Assert(stdout, checker.Not(checker.Contains), "Step 2/2 : RUN nobody")
}
func (s *DockerSuite) TestBuildCacheFrom(c *check.C) {
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
dockerfile := `
FROM busybox
ENV FOO=bar
ADD baz /
RUN touch bax`
ctx, err := fakeContext(dockerfile, map[string]string{
"Dockerfile": dockerfile,
"baz": "baz",
})
c.Assert(err, checker.IsNil)
defer ctx.Close()
id1, err := buildImageFromContext("build1", ctx, true)
c.Assert(err, checker.IsNil)
// rebuild with cache-from
id2, out, err := buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1")
c.Assert(err, checker.IsNil)
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3)
dockerCmd(c, "rmi", "build2")
// no cache match with unknown source
id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=nosuchtag")
c.Assert(err, checker.IsNil)
c.Assert(id1, checker.Not(checker.Equals), id2)
c.Assert(strings.Count(out, "Using cache"), checker.Equals, 0)
dockerCmd(c, "rmi", "build2")
// clear parent images
tempDir, err := ioutil.TempDir("", "test-build-cache-from-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
tempFile := filepath.Join(tempDir, "img.tar")
dockerCmd(c, "save", "-o", tempFile, "build1")
dockerCmd(c, "rmi", "build1")
dockerCmd(c, "load", "-i", tempFile)
parentID, _ := dockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1")
c.Assert(strings.TrimSpace(parentID), checker.Equals, "")
// cache still applies without parents
id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1")
c.Assert(err, checker.IsNil)
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3)
history1, _ := dockerCmd(c, "history", "-q", "build2")
// Retry, no new intermediate images
id3, out, err := buildImageFromContextWithOut("build3", ctx, true, "--cache-from=build1")
c.Assert(err, checker.IsNil)
c.Assert(id1, checker.Equals, id3)
c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3)
history2, _ := dockerCmd(c, "history", "-q", "build3")
c.Assert(history1, checker.Equals, history2)
dockerCmd(c, "rmi", "build2")
dockerCmd(c, "rmi", "build3")
dockerCmd(c, "rmi", "build1")
dockerCmd(c, "load", "-i", tempFile)
// Modify file, everything up to last command and layers are reused
dockerfile = `
FROM busybox
ENV FOO=bar
ADD baz /
RUN touch newfile`
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644)
c.Assert(err, checker.IsNil)
id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1")
c.Assert(err, checker.IsNil)
c.Assert(id1, checker.Not(checker.Equals), id2)
c.Assert(strings.Count(out, "Using cache"), checker.Equals, 2)
layers1Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1")
layers2Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2")
var layers1 []string
var layers2 []string
c.Assert(json.Unmarshal([]byte(layers1Str), &layers1), checker.IsNil)
c.Assert(json.Unmarshal([]byte(layers2Str), &layers2), checker.IsNil)
c.Assert(len(layers1), checker.Equals, len(layers2))
for i := 0; i < len(layers1)-1; i++ {
c.Assert(layers1[i], checker.Equals, layers2[i])
}
c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1])
}
func (s *DockerSuite) TestBuildNetNone(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildnetnone"
_, out, err := buildImageWithOut(name, `
FROM busybox
RUN ping -c 1 8.8.8.8
`, true, "--network=none")
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, "unreachable")
}
func (s *DockerSuite) TestBuildNetContainer(c *check.C) {
testRequires(c, DaemonIsLinux)
id, _ := dockerCmd(c, "run", "--hostname", "foobar", "-d", "busybox", "nc", "-ll", "-p", "1234", "-e", "hostname")
name := "testbuildnetcontainer"
out, err := buildImage(name, `
FROM busybox
RUN nc localhost 1234 > /otherhost
`, true, "--network=container:"+strings.TrimSpace(id))
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
host, _ := dockerCmd(c, "run", "testbuildnetcontainer", "cat", "/otherhost")
c.Assert(strings.TrimSpace(host), check.Equals, "foobar")
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
clients/google-api-services-cloudsearch/v1/1.31.0/com/google/api/services/cloudsearch/v1/CloudSearch.java | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.cloudsearch.v1;
/**
* Service definition for CloudSearch (v1).
*
* <p>
* Cloud Search provides cloud-based search capabilities over G Suite data. The Cloud Search API allows indexing of non-G Suite data into Cloud Search.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://developers.google.com/cloud-search/docs/guides/" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link CloudSearchRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class CloudSearch extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.31.0 of the Cloud Search API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://cloudsearch.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://cloudsearch.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public CloudSearch(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
CloudSearch(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Debug collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Debug.List request = cloudsearch.debug().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Debug debug() {
return new Debug();
}
/**
* The "debug" collection of methods.
*/
public class Debug {
/**
* An accessor for creating requests from the Datasources collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Datasources.List request = cloudsearch.datasources().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Datasources datasources() {
return new Datasources();
}
/**
* The "datasources" collection of methods.
*/
public class Datasources {
/**
* An accessor for creating requests from the Items collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Items.List request = cloudsearch.items().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Items items() {
return new Items();
}
/**
* The "items" collection of methods.
*/
public class Items {
/**
* Checks whether an item is accessible by specified principal. **Note:** This API requires an admin
* account to execute.
*
* Create a request for the method "items.checkAccess".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link CheckAccess#execute()} method to invoke the remote operation.
*
* @param name Item name, format: datasources/{source_id}/items/{item_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.Principal}
* @return the request
*/
public CheckAccess checkAccess(java.lang.String name, com.google.api.services.cloudsearch.v1.model.Principal content) throws java.io.IOException {
CheckAccess result = new CheckAccess(name, content);
initialize(result);
return result;
}
public class CheckAccess extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.CheckAccessResponse> {
private static final String REST_PATH = "v1/debug/{+name}:checkAccess";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+/items/[^/]+$");
/**
* Checks whether an item is accessible by specified principal. **Note:** This API requires an
* admin account to execute.
*
* Create a request for the method "items.checkAccess".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link CheckAccess#execute()} method to invoke the remote
* operation. <p> {@link
* CheckAccess#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Item name, format: datasources/{source_id}/items/{item_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.Principal}
* @since 1.13
*/
protected CheckAccess(java.lang.String name, com.google.api.services.cloudsearch.v1.model.Principal content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.CheckAccessResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
}
@Override
public CheckAccess set$Xgafv(java.lang.String $Xgafv) {
return (CheckAccess) super.set$Xgafv($Xgafv);
}
@Override
public CheckAccess setAccessToken(java.lang.String accessToken) {
return (CheckAccess) super.setAccessToken(accessToken);
}
@Override
public CheckAccess setAlt(java.lang.String alt) {
return (CheckAccess) super.setAlt(alt);
}
@Override
public CheckAccess setCallback(java.lang.String callback) {
return (CheckAccess) super.setCallback(callback);
}
@Override
public CheckAccess setFields(java.lang.String fields) {
return (CheckAccess) super.setFields(fields);
}
@Override
public CheckAccess setKey(java.lang.String key) {
return (CheckAccess) super.setKey(key);
}
@Override
public CheckAccess setOauthToken(java.lang.String oauthToken) {
return (CheckAccess) super.setOauthToken(oauthToken);
}
@Override
public CheckAccess setPrettyPrint(java.lang.Boolean prettyPrint) {
return (CheckAccess) super.setPrettyPrint(prettyPrint);
}
@Override
public CheckAccess setQuotaUser(java.lang.String quotaUser) {
return (CheckAccess) super.setQuotaUser(quotaUser);
}
@Override
public CheckAccess setUploadType(java.lang.String uploadType) {
return (CheckAccess) super.setUploadType(uploadType);
}
@Override
public CheckAccess setUploadProtocol(java.lang.String uploadProtocol) {
return (CheckAccess) super.setUploadProtocol(uploadProtocol);
}
/** Item name, format: datasources/{source_id}/items/{item_id} */
@com.google.api.client.util.Key
private java.lang.String name;
/** Item name, format: datasources/{source_id}/items/{item_id}
*/
public java.lang.String getName() {
return name;
}
/** Item name, format: datasources/{source_id}/items/{item_id} */
public CheckAccess setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
this.name = name;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
public CheckAccess setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
@Override
public CheckAccess set(String parameterName, Object value) {
return (CheckAccess) super.set(parameterName, value);
}
}
/**
* Fetches the item whose viewUrl exactly matches that of the URL provided in the request. **Note:**
* This API requires an admin account to execute.
*
* Create a request for the method "items.searchByViewUrl".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link SearchByViewUrl#execute()} method to invoke the remote operation.
*
* @param name Source name, format: datasources/{source_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.SearchItemsByViewUrlRequest}
* @return the request
*/
public SearchByViewUrl searchByViewUrl(java.lang.String name, com.google.api.services.cloudsearch.v1.model.SearchItemsByViewUrlRequest content) throws java.io.IOException {
SearchByViewUrl result = new SearchByViewUrl(name, content);
initialize(result);
return result;
}
public class SearchByViewUrl extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.SearchItemsByViewUrlResponse> {
private static final String REST_PATH = "v1/debug/{+name}/items:searchByViewUrl";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Fetches the item whose viewUrl exactly matches that of the URL provided in the request.
* **Note:** This API requires an admin account to execute.
*
* Create a request for the method "items.searchByViewUrl".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link SearchByViewUrl#execute()} method to invoke the remote
* operation. <p> {@link SearchByViewUrl#initialize(com.google.api.client.googleapis.services.Abst
* ractGoogleClientRequest)} must be called to initialize this instance immediately after invoking
* the constructor. </p>
*
* @param name Source name, format: datasources/{source_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.SearchItemsByViewUrlRequest}
* @since 1.13
*/
protected SearchByViewUrl(java.lang.String name, com.google.api.services.cloudsearch.v1.model.SearchItemsByViewUrlRequest content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.SearchItemsByViewUrlResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public SearchByViewUrl set$Xgafv(java.lang.String $Xgafv) {
return (SearchByViewUrl) super.set$Xgafv($Xgafv);
}
@Override
public SearchByViewUrl setAccessToken(java.lang.String accessToken) {
return (SearchByViewUrl) super.setAccessToken(accessToken);
}
@Override
public SearchByViewUrl setAlt(java.lang.String alt) {
return (SearchByViewUrl) super.setAlt(alt);
}
@Override
public SearchByViewUrl setCallback(java.lang.String callback) {
return (SearchByViewUrl) super.setCallback(callback);
}
@Override
public SearchByViewUrl setFields(java.lang.String fields) {
return (SearchByViewUrl) super.setFields(fields);
}
@Override
public SearchByViewUrl setKey(java.lang.String key) {
return (SearchByViewUrl) super.setKey(key);
}
@Override
public SearchByViewUrl setOauthToken(java.lang.String oauthToken) {
return (SearchByViewUrl) super.setOauthToken(oauthToken);
}
@Override
public SearchByViewUrl setPrettyPrint(java.lang.Boolean prettyPrint) {
return (SearchByViewUrl) super.setPrettyPrint(prettyPrint);
}
@Override
public SearchByViewUrl setQuotaUser(java.lang.String quotaUser) {
return (SearchByViewUrl) super.setQuotaUser(quotaUser);
}
@Override
public SearchByViewUrl setUploadType(java.lang.String uploadType) {
return (SearchByViewUrl) super.setUploadType(uploadType);
}
@Override
public SearchByViewUrl setUploadProtocol(java.lang.String uploadProtocol) {
return (SearchByViewUrl) super.setUploadProtocol(uploadProtocol);
}
/** Source name, format: datasources/{source_id} */
@com.google.api.client.util.Key
private java.lang.String name;
/** Source name, format: datasources/{source_id}
*/
public java.lang.String getName() {
return name;
}
/** Source name, format: datasources/{source_id} */
public SearchByViewUrl setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
@Override
public SearchByViewUrl set(String parameterName, Object value) {
return (SearchByViewUrl) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Unmappedids collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Unmappedids.List request = cloudsearch.unmappedids().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Unmappedids unmappedids() {
return new Unmappedids();
}
/**
* The "unmappedids" collection of methods.
*/
public class Unmappedids {
/**
* List all unmapped identities for a specific item. **Note:** This API requires an admin account to
* execute.
*
* Create a request for the method "unmappedids.list".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent The name of the item, in the following format: datasources/{source_id}/items/{ID}
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.ListUnmappedIdentitiesResponse> {
private static final String REST_PATH = "v1/debug/{+parent}/unmappedids";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+/items/[^/]+$");
/**
* List all unmapped identities for a specific item. **Note:** This API requires an admin account
* to execute.
*
* Create a request for the method "unmappedids.list".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent The name of the item, in the following format: datasources/{source_id}/items/{ID}
* @since 1.13
*/
protected List(java.lang.String parent) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.ListUnmappedIdentitiesResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* The name of the item, in the following format: datasources/{source_id}/items/{ID}
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** The name of the item, in the following format: datasources/{source_id}/items/{ID}
*/
public java.lang.String getParent() {
return parent;
}
/**
* The name of the item, in the following format: datasources/{source_id}/items/{ID}
*/
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
public List setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
/** Maximum number of items to fetch in a request. Defaults to 100. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Maximum number of items to fetch in a request. Defaults to 100.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** Maximum number of items to fetch in a request. Defaults to 100. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The next_page_token value returned from a previous List request, if any. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The next_page_token value returned from a previous List request, if any.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The next_page_token value returned from a previous List request, if any. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
}
/**
* An accessor for creating requests from the Identitysources collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Identitysources.List request = cloudsearch.identitysources().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Identitysources identitysources() {
return new Identitysources();
}
/**
* The "identitysources" collection of methods.
*/
public class Identitysources {
/**
* An accessor for creating requests from the Items collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Items.List request = cloudsearch.items().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Items items() {
return new Items();
}
/**
* The "items" collection of methods.
*/
public class Items {
/**
* Lists names of items associated with an unmapped identity. **Note:** This API requires an admin
* account to execute.
*
* Create a request for the method "items.listForunmappedidentity".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link ListForunmappedidentity#execute()} method to invoke the remote
* operation.
*
* @param parent The name of the identity source, in the following format: identitysources/{source_id}}
* @return the request
*/
public ListForunmappedidentity listForunmappedidentity(java.lang.String parent) throws java.io.IOException {
ListForunmappedidentity result = new ListForunmappedidentity(parent);
initialize(result);
return result;
}
public class ListForunmappedidentity extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.ListItemNamesForUnmappedIdentityResponse> {
private static final String REST_PATH = "v1/debug/{+parent}/items:forunmappedidentity";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^identitysources/[^/]+$");
/**
* Lists names of items associated with an unmapped identity. **Note:** This API requires an admin
* account to execute.
*
* Create a request for the method "items.listForunmappedidentity".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link ListForunmappedidentity#execute()} method to invoke the
* remote operation. <p> {@link ListForunmappedidentity#initialize(com.google.api.client.googleapi
* s.services.AbstractGoogleClientRequest)} must be called to initialize this instance immediately
* after invoking the constructor. </p>
*
* @param parent The name of the identity source, in the following format: identitysources/{source_id}}
* @since 1.13
*/
protected ListForunmappedidentity(java.lang.String parent) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.ListItemNamesForUnmappedIdentityResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^identitysources/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public ListForunmappedidentity set$Xgafv(java.lang.String $Xgafv) {
return (ListForunmappedidentity) super.set$Xgafv($Xgafv);
}
@Override
public ListForunmappedidentity setAccessToken(java.lang.String accessToken) {
return (ListForunmappedidentity) super.setAccessToken(accessToken);
}
@Override
public ListForunmappedidentity setAlt(java.lang.String alt) {
return (ListForunmappedidentity) super.setAlt(alt);
}
@Override
public ListForunmappedidentity setCallback(java.lang.String callback) {
return (ListForunmappedidentity) super.setCallback(callback);
}
@Override
public ListForunmappedidentity setFields(java.lang.String fields) {
return (ListForunmappedidentity) super.setFields(fields);
}
@Override
public ListForunmappedidentity setKey(java.lang.String key) {
return (ListForunmappedidentity) super.setKey(key);
}
@Override
public ListForunmappedidentity setOauthToken(java.lang.String oauthToken) {
return (ListForunmappedidentity) super.setOauthToken(oauthToken);
}
@Override
public ListForunmappedidentity setPrettyPrint(java.lang.Boolean prettyPrint) {
return (ListForunmappedidentity) super.setPrettyPrint(prettyPrint);
}
@Override
public ListForunmappedidentity setQuotaUser(java.lang.String quotaUser) {
return (ListForunmappedidentity) super.setQuotaUser(quotaUser);
}
@Override
public ListForunmappedidentity setUploadType(java.lang.String uploadType) {
return (ListForunmappedidentity) super.setUploadType(uploadType);
}
@Override
public ListForunmappedidentity setUploadProtocol(java.lang.String uploadProtocol) {
return (ListForunmappedidentity) super.setUploadProtocol(uploadProtocol);
}
/**
* The name of the identity source, in the following format: identitysources/{source_id}}
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** The name of the identity source, in the following format: identitysources/{source_id}}
*/
public java.lang.String getParent() {
return parent;
}
/**
* The name of the identity source, in the following format: identitysources/{source_id}}
*/
public ListForunmappedidentity setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^identitysources/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
public ListForunmappedidentity setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
@com.google.api.client.util.Key
private java.lang.String groupResourceName;
/**
*/
public java.lang.String getGroupResourceName() {
return groupResourceName;
}
public ListForunmappedidentity setGroupResourceName(java.lang.String groupResourceName) {
this.groupResourceName = groupResourceName;
return this;
}
/** Maximum number of items to fetch in a request. Defaults to 100. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Maximum number of items to fetch in a request. Defaults to 100.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** Maximum number of items to fetch in a request. Defaults to 100. */
public ListForunmappedidentity setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The next_page_token value returned from a previous List request, if any. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The next_page_token value returned from a previous List request, if any.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The next_page_token value returned from a previous List request, if any. */
public ListForunmappedidentity setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@com.google.api.client.util.Key
private java.lang.String userResourceName;
/**
*/
public java.lang.String getUserResourceName() {
return userResourceName;
}
public ListForunmappedidentity setUserResourceName(java.lang.String userResourceName) {
this.userResourceName = userResourceName;
return this;
}
@Override
public ListForunmappedidentity set(String parameterName, Object value) {
return (ListForunmappedidentity) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Unmappedids collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Unmappedids.List request = cloudsearch.unmappedids().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Unmappedids unmappedids() {
return new Unmappedids();
}
/**
* The "unmappedids" collection of methods.
*/
public class Unmappedids {
/**
* Lists unmapped user identities for an identity source. **Note:** This API requires an admin
* account to execute.
*
* Create a request for the method "unmappedids.list".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent The name of the identity source, in the following format: identitysources/{source_id}
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.ListUnmappedIdentitiesResponse> {
private static final String REST_PATH = "v1/debug/{+parent}/unmappedids";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^identitysources/[^/]+$");
/**
* Lists unmapped user identities for an identity source. **Note:** This API requires an admin
* account to execute.
*
* Create a request for the method "unmappedids.list".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent The name of the identity source, in the following format: identitysources/{source_id}
* @since 1.13
*/
protected List(java.lang.String parent) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.ListUnmappedIdentitiesResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^identitysources/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* The name of the identity source, in the following format: identitysources/{source_id}
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** The name of the identity source, in the following format: identitysources/{source_id}
*/
public java.lang.String getParent() {
return parent;
}
/**
* The name of the identity source, in the following format: identitysources/{source_id}
*/
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^identitysources/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
public List setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
/** Maximum number of items to fetch in a request. Defaults to 100. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Maximum number of items to fetch in a request. Defaults to 100.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** Maximum number of items to fetch in a request. Defaults to 100. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The next_page_token value returned from a previous List request, if any. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The next_page_token value returned from a previous List request, if any.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The next_page_token value returned from a previous List request, if any. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/** Limit users selection to this status. */
@com.google.api.client.util.Key
private java.lang.String resolutionStatusCode;
/** Limit users selection to this status.
*/
public java.lang.String getResolutionStatusCode() {
return resolutionStatusCode;
}
/** Limit users selection to this status. */
public List setResolutionStatusCode(java.lang.String resolutionStatusCode) {
this.resolutionStatusCode = resolutionStatusCode;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
}
/**
* An accessor for creating requests from the Indexing collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Indexing.List request = cloudsearch.indexing().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Indexing indexing() {
return new Indexing();
}
/**
* The "indexing" collection of methods.
*/
public class Indexing {
/**
* An accessor for creating requests from the Datasources collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Datasources.List request = cloudsearch.datasources().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Datasources datasources() {
return new Datasources();
}
/**
* The "datasources" collection of methods.
*/
public class Datasources {
/**
* Deletes the schema of a data source. **Note:** This API requires an admin or service account to
* execute.
*
* Create a request for the method "datasources.deleteSchema".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link DeleteSchema#execute()} method to invoke the remote operation.
*
* @param name Name of the data source to delete Schema. Format: datasources/{source_id}
* @return the request
*/
public DeleteSchema deleteSchema(java.lang.String name) throws java.io.IOException {
DeleteSchema result = new DeleteSchema(name);
initialize(result);
return result;
}
public class DeleteSchema extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/indexing/{+name}/schema";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Deletes the schema of a data source. **Note:** This API requires an admin or service account to
* execute.
*
* Create a request for the method "datasources.deleteSchema".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link DeleteSchema#execute()} method to invoke the remote
* operation. <p> {@link
* DeleteSchema#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the data source to delete Schema. Format: datasources/{source_id}
* @since 1.13
*/
protected DeleteSchema(java.lang.String name) {
super(CloudSearch.this, "DELETE", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public DeleteSchema set$Xgafv(java.lang.String $Xgafv) {
return (DeleteSchema) super.set$Xgafv($Xgafv);
}
@Override
public DeleteSchema setAccessToken(java.lang.String accessToken) {
return (DeleteSchema) super.setAccessToken(accessToken);
}
@Override
public DeleteSchema setAlt(java.lang.String alt) {
return (DeleteSchema) super.setAlt(alt);
}
@Override
public DeleteSchema setCallback(java.lang.String callback) {
return (DeleteSchema) super.setCallback(callback);
}
@Override
public DeleteSchema setFields(java.lang.String fields) {
return (DeleteSchema) super.setFields(fields);
}
@Override
public DeleteSchema setKey(java.lang.String key) {
return (DeleteSchema) super.setKey(key);
}
@Override
public DeleteSchema setOauthToken(java.lang.String oauthToken) {
return (DeleteSchema) super.setOauthToken(oauthToken);
}
@Override
public DeleteSchema setPrettyPrint(java.lang.Boolean prettyPrint) {
return (DeleteSchema) super.setPrettyPrint(prettyPrint);
}
@Override
public DeleteSchema setQuotaUser(java.lang.String quotaUser) {
return (DeleteSchema) super.setQuotaUser(quotaUser);
}
@Override
public DeleteSchema setUploadType(java.lang.String uploadType) {
return (DeleteSchema) super.setUploadType(uploadType);
}
@Override
public DeleteSchema setUploadProtocol(java.lang.String uploadProtocol) {
return (DeleteSchema) super.setUploadProtocol(uploadProtocol);
}
/** Name of the data source to delete Schema. Format: datasources/{source_id} */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the data source to delete Schema. Format: datasources/{source_id}
*/
public java.lang.String getName() {
return name;
}
/** Name of the data source to delete Schema. Format: datasources/{source_id} */
public DeleteSchema setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
public DeleteSchema setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
@Override
public DeleteSchema set(String parameterName, Object value) {
return (DeleteSchema) super.set(parameterName, value);
}
}
/**
* Gets the schema of a data source. **Note:** This API requires an admin or service account to
* execute.
*
* Create a request for the method "datasources.getSchema".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link GetSchema#execute()} method to invoke the remote operation.
*
* @param name Name of the data source to get Schema. Format: datasources/{source_id}
* @return the request
*/
public GetSchema getSchema(java.lang.String name) throws java.io.IOException {
GetSchema result = new GetSchema(name);
initialize(result);
return result;
}
public class GetSchema extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Schema> {
private static final String REST_PATH = "v1/indexing/{+name}/schema";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Gets the schema of a data source. **Note:** This API requires an admin or service account to
* execute.
*
* Create a request for the method "datasources.getSchema".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link GetSchema#execute()} method to invoke the remote
* operation. <p> {@link
* GetSchema#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the data source to get Schema. Format: datasources/{source_id}
* @since 1.13
*/
protected GetSchema(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.Schema.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetSchema set$Xgafv(java.lang.String $Xgafv) {
return (GetSchema) super.set$Xgafv($Xgafv);
}
@Override
public GetSchema setAccessToken(java.lang.String accessToken) {
return (GetSchema) super.setAccessToken(accessToken);
}
@Override
public GetSchema setAlt(java.lang.String alt) {
return (GetSchema) super.setAlt(alt);
}
@Override
public GetSchema setCallback(java.lang.String callback) {
return (GetSchema) super.setCallback(callback);
}
@Override
public GetSchema setFields(java.lang.String fields) {
return (GetSchema) super.setFields(fields);
}
@Override
public GetSchema setKey(java.lang.String key) {
return (GetSchema) super.setKey(key);
}
@Override
public GetSchema setOauthToken(java.lang.String oauthToken) {
return (GetSchema) super.setOauthToken(oauthToken);
}
@Override
public GetSchema setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetSchema) super.setPrettyPrint(prettyPrint);
}
@Override
public GetSchema setQuotaUser(java.lang.String quotaUser) {
return (GetSchema) super.setQuotaUser(quotaUser);
}
@Override
public GetSchema setUploadType(java.lang.String uploadType) {
return (GetSchema) super.setUploadType(uploadType);
}
@Override
public GetSchema setUploadProtocol(java.lang.String uploadProtocol) {
return (GetSchema) super.setUploadProtocol(uploadProtocol);
}
/** Name of the data source to get Schema. Format: datasources/{source_id} */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the data source to get Schema. Format: datasources/{source_id}
*/
public java.lang.String getName() {
return name;
}
/** Name of the data source to get Schema. Format: datasources/{source_id} */
public GetSchema setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
public GetSchema setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
@Override
public GetSchema set(String parameterName, Object value) {
return (GetSchema) super.set(parameterName, value);
}
}
/**
* Updates the schema of a data source. This method does not perform incremental updates to the
* schema. Instead, this method updates the schema by overwriting the entire schema. **Note:** This
* API requires an admin or service account to execute.
*
* Create a request for the method "datasources.updateSchema".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link UpdateSchema#execute()} method to invoke the remote operation.
*
* @param name Name of the data source to update Schema. Format: datasources/{source_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.UpdateSchemaRequest}
* @return the request
*/
public UpdateSchema updateSchema(java.lang.String name, com.google.api.services.cloudsearch.v1.model.UpdateSchemaRequest content) throws java.io.IOException {
UpdateSchema result = new UpdateSchema(name, content);
initialize(result);
return result;
}
public class UpdateSchema extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/indexing/{+name}/schema";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Updates the schema of a data source. This method does not perform incremental updates to the
* schema. Instead, this method updates the schema by overwriting the entire schema. **Note:**
* This API requires an admin or service account to execute.
*
* Create a request for the method "datasources.updateSchema".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link UpdateSchema#execute()} method to invoke the remote
* operation. <p> {@link
* UpdateSchema#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the data source to update Schema. Format: datasources/{source_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.UpdateSchemaRequest}
* @since 1.13
*/
protected UpdateSchema(java.lang.String name, com.google.api.services.cloudsearch.v1.model.UpdateSchemaRequest content) {
super(CloudSearch.this, "PUT", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public UpdateSchema set$Xgafv(java.lang.String $Xgafv) {
return (UpdateSchema) super.set$Xgafv($Xgafv);
}
@Override
public UpdateSchema setAccessToken(java.lang.String accessToken) {
return (UpdateSchema) super.setAccessToken(accessToken);
}
@Override
public UpdateSchema setAlt(java.lang.String alt) {
return (UpdateSchema) super.setAlt(alt);
}
@Override
public UpdateSchema setCallback(java.lang.String callback) {
return (UpdateSchema) super.setCallback(callback);
}
@Override
public UpdateSchema setFields(java.lang.String fields) {
return (UpdateSchema) super.setFields(fields);
}
@Override
public UpdateSchema setKey(java.lang.String key) {
return (UpdateSchema) super.setKey(key);
}
@Override
public UpdateSchema setOauthToken(java.lang.String oauthToken) {
return (UpdateSchema) super.setOauthToken(oauthToken);
}
@Override
public UpdateSchema setPrettyPrint(java.lang.Boolean prettyPrint) {
return (UpdateSchema) super.setPrettyPrint(prettyPrint);
}
@Override
public UpdateSchema setQuotaUser(java.lang.String quotaUser) {
return (UpdateSchema) super.setQuotaUser(quotaUser);
}
@Override
public UpdateSchema setUploadType(java.lang.String uploadType) {
return (UpdateSchema) super.setUploadType(uploadType);
}
@Override
public UpdateSchema setUploadProtocol(java.lang.String uploadProtocol) {
return (UpdateSchema) super.setUploadProtocol(uploadProtocol);
}
/** Name of the data source to update Schema. Format: datasources/{source_id} */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the data source to update Schema. Format: datasources/{source_id}
*/
public java.lang.String getName() {
return name;
}
/** Name of the data source to update Schema. Format: datasources/{source_id} */
public UpdateSchema setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
@Override
public UpdateSchema set(String parameterName, Object value) {
return (UpdateSchema) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Items collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Items.List request = cloudsearch.items().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Items items() {
return new Items();
}
/**
* The "items" collection of methods.
*/
public class Items {
/**
* Deletes Item resource for the specified resource name. This API requires an admin or service
* account to execute. The service account used is the one whitelisted in the corresponding data
* source.
*
* Create a request for the method "items.delete".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name Required. Name of the item to delete. Format: datasources/{source_id}/items/{item_id}
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/indexing/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+/items/[^/]+$");
/**
* Deletes Item resource for the specified resource name. This API requires an admin or service
* account to execute. The service account used is the one whitelisted in the corresponding data
* source.
*
* Create a request for the method "items.delete".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Name of the item to delete. Format: datasources/{source_id}/items/{item_id}
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(CloudSearch.this, "DELETE", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Name of the item to delete. Format: datasources/{source_id}/items/{item_id}
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Name of the item to delete. Format: datasources/{source_id}/items/{item_id}
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Name of the item to delete. Format: datasources/{source_id}/items/{item_id}
*/
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
this.name = name;
return this;
}
/**
* Name of connector making this call. Format: datasources/{source_id}/connectors/{ID}
*/
@com.google.api.client.util.Key
private java.lang.String connectorName;
/** Name of connector making this call. Format: datasources/{source_id}/connectors/{ID}
*/
public java.lang.String getConnectorName() {
return connectorName;
}
/**
* Name of connector making this call. Format: datasources/{source_id}/connectors/{ID}
*/
public Delete setConnectorName(java.lang.String connectorName) {
this.connectorName = connectorName;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
public Delete setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
/** Required. The RequestMode for this request. */
@com.google.api.client.util.Key
private java.lang.String mode;
/** Required. The RequestMode for this request.
*/
public java.lang.String getMode() {
return mode;
}
/** Required. The RequestMode for this request. */
public Delete setMode(java.lang.String mode) {
this.mode = mode;
return this;
}
/**
* Required. The incremented version of the item to delete from the index. The indexing
* system stores the version from the datasource as a byte string and compares the Item
* version in the index to the version of the queued Item using lexical ordering. Cloud
* Search Indexing won't delete any queued item with a version value that is less than or
* equal to the version of the currently indexed item. The maximum length for this field
* is 1024 bytes.
*/
@com.google.api.client.util.Key
private java.lang.String version;
/** Required. The incremented version of the item to delete from the index. The indexing system stores
the version from the datasource as a byte string and compares the Item version in the index to the
version of the queued Item using lexical ordering. Cloud Search Indexing won't delete any queued
item with a version value that is less than or equal to the version of the currently indexed item.
The maximum length for this field is 1024 bytes.
*/
public java.lang.String getVersion() {
return version;
}
/**
* Required. The incremented version of the item to delete from the index. The indexing
* system stores the version from the datasource as a byte string and compares the Item
* version in the index to the version of the queued Item using lexical ordering. Cloud
* Search Indexing won't delete any queued item with a version value that is less than or
* equal to the version of the currently indexed item. The maximum length for this field
* is 1024 bytes.
*/
public Delete setVersion(java.lang.String version) {
this.version = version;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Deletes all items in a queue. This method is useful for deleting stale items. This API requires
* an admin or service account to execute. The service account used is the one whitelisted in the
* corresponding data source.
*
* Create a request for the method "items.deleteQueueItems".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link DeleteQueueItems#execute()} method to invoke the remote operation.
*
* @param name Name of the Data Source to delete items in a queue. Format: datasources/{source_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.DeleteQueueItemsRequest}
* @return the request
*/
public DeleteQueueItems deleteQueueItems(java.lang.String name, com.google.api.services.cloudsearch.v1.model.DeleteQueueItemsRequest content) throws java.io.IOException {
DeleteQueueItems result = new DeleteQueueItems(name, content);
initialize(result);
return result;
}
public class DeleteQueueItems extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/indexing/{+name}/items:deleteQueueItems";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Deletes all items in a queue. This method is useful for deleting stale items. This API requires
* an admin or service account to execute. The service account used is the one whitelisted in the
* corresponding data source.
*
* Create a request for the method "items.deleteQueueItems".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link DeleteQueueItems#execute()} method to invoke the remote
* operation. <p> {@link DeleteQueueItems#initialize(com.google.api.client.googleapis.services.Abs
* tractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param name Name of the Data Source to delete items in a queue. Format: datasources/{source_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.DeleteQueueItemsRequest}
* @since 1.13
*/
protected DeleteQueueItems(java.lang.String name, com.google.api.services.cloudsearch.v1.model.DeleteQueueItemsRequest content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public DeleteQueueItems set$Xgafv(java.lang.String $Xgafv) {
return (DeleteQueueItems) super.set$Xgafv($Xgafv);
}
@Override
public DeleteQueueItems setAccessToken(java.lang.String accessToken) {
return (DeleteQueueItems) super.setAccessToken(accessToken);
}
@Override
public DeleteQueueItems setAlt(java.lang.String alt) {
return (DeleteQueueItems) super.setAlt(alt);
}
@Override
public DeleteQueueItems setCallback(java.lang.String callback) {
return (DeleteQueueItems) super.setCallback(callback);
}
@Override
public DeleteQueueItems setFields(java.lang.String fields) {
return (DeleteQueueItems) super.setFields(fields);
}
@Override
public DeleteQueueItems setKey(java.lang.String key) {
return (DeleteQueueItems) super.setKey(key);
}
@Override
public DeleteQueueItems setOauthToken(java.lang.String oauthToken) {
return (DeleteQueueItems) super.setOauthToken(oauthToken);
}
@Override
public DeleteQueueItems setPrettyPrint(java.lang.Boolean prettyPrint) {
return (DeleteQueueItems) super.setPrettyPrint(prettyPrint);
}
@Override
public DeleteQueueItems setQuotaUser(java.lang.String quotaUser) {
return (DeleteQueueItems) super.setQuotaUser(quotaUser);
}
@Override
public DeleteQueueItems setUploadType(java.lang.String uploadType) {
return (DeleteQueueItems) super.setUploadType(uploadType);
}
@Override
public DeleteQueueItems setUploadProtocol(java.lang.String uploadProtocol) {
return (DeleteQueueItems) super.setUploadProtocol(uploadProtocol);
}
/**
* Name of the Data Source to delete items in a queue. Format: datasources/{source_id}
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the Data Source to delete items in a queue. Format: datasources/{source_id}
*/
public java.lang.String getName() {
return name;
}
/**
* Name of the Data Source to delete items in a queue. Format: datasources/{source_id}
*/
public DeleteQueueItems setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
@Override
public DeleteQueueItems set(String parameterName, Object value) {
return (DeleteQueueItems) super.set(parameterName, value);
}
}
/**
* Gets Item resource by item name. This API requires an admin or service account to execute. The
* service account used is the one whitelisted in the corresponding data source.
*
* Create a request for the method "items.get".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Name of the item to get info. Format: datasources/{source_id}/items/{item_id}
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Item> {
private static final String REST_PATH = "v1/indexing/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+/items/[^/]+$");
/**
* Gets Item resource by item name. This API requires an admin or service account to execute. The
* service account used is the one whitelisted in the corresponding data source.
*
* Create a request for the method "items.get".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the item to get info. Format: datasources/{source_id}/items/{item_id}
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.Item.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Name of the item to get info. Format: datasources/{source_id}/items/{item_id} */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the item to get info. Format: datasources/{source_id}/items/{item_id}
*/
public java.lang.String getName() {
return name;
}
/** Name of the item to get info. Format: datasources/{source_id}/items/{item_id} */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
this.name = name;
return this;
}
/**
* Name of connector making this call. Format: datasources/{source_id}/connectors/{ID}
*/
@com.google.api.client.util.Key
private java.lang.String connectorName;
/** Name of connector making this call. Format: datasources/{source_id}/connectors/{ID}
*/
public java.lang.String getConnectorName() {
return connectorName;
}
/**
* Name of connector making this call. Format: datasources/{source_id}/connectors/{ID}
*/
public Get setConnectorName(java.lang.String connectorName) {
this.connectorName = connectorName;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
public Get setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Updates Item ACL, metadata, and content. It will insert the Item if it does not exist. This
* method does not support partial updates. Fields with no provided values are cleared out in the
* Cloud Search index. This API requires an admin or service account to execute. The service account
* used is the one whitelisted in the corresponding data source.
*
* Create a request for the method "items.index".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Index#execute()} method to invoke the remote operation.
*
* @param name Name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The
* maximum length is 1536 characters.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.IndexItemRequest}
* @return the request
*/
public Index index(java.lang.String name, com.google.api.services.cloudsearch.v1.model.IndexItemRequest content) throws java.io.IOException {
Index result = new Index(name, content);
initialize(result);
return result;
}
public class Index extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/indexing/{+name}:index";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+/items/[^/]+$");
/**
* Updates Item ACL, metadata, and content. It will insert the Item if it does not exist. This
* method does not support partial updates. Fields with no provided values are cleared out in the
* Cloud Search index. This API requires an admin or service account to execute. The service
* account used is the one whitelisted in the corresponding data source.
*
* Create a request for the method "items.index".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Index#execute()} method to invoke the remote operation.
* <p> {@link
* Index#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The
* maximum length is 1536 characters.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.IndexItemRequest}
* @since 1.13
*/
protected Index(java.lang.String name, com.google.api.services.cloudsearch.v1.model.IndexItemRequest content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
}
@Override
public Index set$Xgafv(java.lang.String $Xgafv) {
return (Index) super.set$Xgafv($Xgafv);
}
@Override
public Index setAccessToken(java.lang.String accessToken) {
return (Index) super.setAccessToken(accessToken);
}
@Override
public Index setAlt(java.lang.String alt) {
return (Index) super.setAlt(alt);
}
@Override
public Index setCallback(java.lang.String callback) {
return (Index) super.setCallback(callback);
}
@Override
public Index setFields(java.lang.String fields) {
return (Index) super.setFields(fields);
}
@Override
public Index setKey(java.lang.String key) {
return (Index) super.setKey(key);
}
@Override
public Index setOauthToken(java.lang.String oauthToken) {
return (Index) super.setOauthToken(oauthToken);
}
@Override
public Index setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Index) super.setPrettyPrint(prettyPrint);
}
@Override
public Index setQuotaUser(java.lang.String quotaUser) {
return (Index) super.setQuotaUser(quotaUser);
}
@Override
public Index setUploadType(java.lang.String uploadType) {
return (Index) super.setUploadType(uploadType);
}
@Override
public Index setUploadProtocol(java.lang.String uploadProtocol) {
return (Index) super.setUploadProtocol(uploadProtocol);
}
/**
* Name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required
* field. The maximum length is 1536 characters.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The
maximum length is 1536 characters.
*/
public java.lang.String getName() {
return name;
}
/**
* Name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required
* field. The maximum length is 1536 characters.
*/
public Index setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Index set(String parameterName, Object value) {
return (Index) super.set(parameterName, value);
}
}
/**
* Lists all or a subset of Item resources. This API requires an admin or service account to
* execute. The service account used is the one whitelisted in the corresponding data source.
*
* Create a request for the method "items.list".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param name Name of the Data Source to list Items. Format: datasources/{source_id}
* @return the request
*/
public List list(java.lang.String name) throws java.io.IOException {
List result = new List(name);
initialize(result);
return result;
}
public class List extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.ListItemsResponse> {
private static final String REST_PATH = "v1/indexing/{+name}/items";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Lists all or a subset of Item resources. This API requires an admin or service account to
* execute. The service account used is the one whitelisted in the corresponding data source.
*
* Create a request for the method "items.list".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the Data Source to list Items. Format: datasources/{source_id}
* @since 1.13
*/
protected List(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.ListItemsResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Name of the Data Source to list Items. Format: datasources/{source_id} */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the Data Source to list Items. Format: datasources/{source_id}
*/
public java.lang.String getName() {
return name;
}
/** Name of the Data Source to list Items. Format: datasources/{source_id} */
public List setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
/**
* When set to true, the indexing system only populates the following fields: name,
* version, queue. metadata.hash, metadata.title, metadata.sourceRepositoryURL,
* metadata.objectType, metadata.createTime, metadata.updateTime,
* metadata.contentLanguage, metadata.mimeType, structured_data.hash, content.hash,
* itemType, itemStatus.code, itemStatus.processingError.code,
* itemStatus.repositoryError.type, If this value is false, then all the fields are
* populated in Item.
*/
@com.google.api.client.util.Key
private java.lang.Boolean brief;
/** When set to true, the indexing system only populates the following fields: name, version, queue.
metadata.hash, metadata.title, metadata.sourceRepositoryURL, metadata.objectType,
metadata.createTime, metadata.updateTime, metadata.contentLanguage, metadata.mimeType,
structured_data.hash, content.hash, itemType, itemStatus.code, itemStatus.processingError.code,
itemStatus.repositoryError.type, If this value is false, then all the fields are populated in Item.
*/
public java.lang.Boolean getBrief() {
return brief;
}
/**
* When set to true, the indexing system only populates the following fields: name,
* version, queue. metadata.hash, metadata.title, metadata.sourceRepositoryURL,
* metadata.objectType, metadata.createTime, metadata.updateTime,
* metadata.contentLanguage, metadata.mimeType, structured_data.hash, content.hash,
* itemType, itemStatus.code, itemStatus.processingError.code,
* itemStatus.repositoryError.type, If this value is false, then all the fields are
* populated in Item.
*/
public List setBrief(java.lang.Boolean brief) {
this.brief = brief;
return this;
}
/**
* Name of connector making this call. Format: datasources/{source_id}/connectors/{ID}
*/
@com.google.api.client.util.Key
private java.lang.String connectorName;
/** Name of connector making this call. Format: datasources/{source_id}/connectors/{ID}
*/
public java.lang.String getConnectorName() {
return connectorName;
}
/**
* Name of connector making this call. Format: datasources/{source_id}/connectors/{ID}
*/
public List setConnectorName(java.lang.String connectorName) {
this.connectorName = connectorName;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore
* this field.
*/
public List setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
/**
* Maximum number of items to fetch in a request. The max value is 1000 when brief is
* true. The max value is 10 if brief is false. The default value is 10
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Maximum number of items to fetch in a request. The max value is 1000 when brief is true. The max
value is 10 if brief is false. The default value is 10
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Maximum number of items to fetch in a request. The max value is 1000 when brief is
* true. The max value is 10 if brief is false. The default value is 10
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The next_page_token value returned from a previous List request, if any. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The next_page_token value returned from a previous List request, if any.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The next_page_token value returned from a previous List request, if any. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Polls for unreserved items from the indexing queue and marks a set as reserved, starting with
* items that have the oldest timestamp from the highest priority ItemStatus. The priority order is
* as follows: ERROR MODIFIED NEW_ITEM ACCEPTED Reserving items ensures that polling from other
* threads cannot create overlapping sets. After handling the reserved items, the client should put
* items back into the unreserved state, either by calling index, or by calling push with the type
* REQUEUE. Items automatically become available (unreserved) after 4 hours even if no update or
* push method is called. This API requires an admin or service account to execute. The service
* account used is the one whitelisted in the corresponding data source.
*
* Create a request for the method "items.poll".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Poll#execute()} method to invoke the remote operation.
*
* @param name Name of the Data Source to poll items. Format: datasources/{source_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.PollItemsRequest}
* @return the request
*/
public Poll poll(java.lang.String name, com.google.api.services.cloudsearch.v1.model.PollItemsRequest content) throws java.io.IOException {
Poll result = new Poll(name, content);
initialize(result);
return result;
}
public class Poll extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.PollItemsResponse> {
private static final String REST_PATH = "v1/indexing/{+name}/items:poll";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Polls for unreserved items from the indexing queue and marks a set as reserved, starting with
* items that have the oldest timestamp from the highest priority ItemStatus. The priority order
* is as follows: ERROR MODIFIED NEW_ITEM ACCEPTED Reserving items ensures that polling from other
* threads cannot create overlapping sets. After handling the reserved items, the client should
* put items back into the unreserved state, either by calling index, or by calling push with the
* type REQUEUE. Items automatically become available (unreserved) after 4 hours even if no update
* or push method is called. This API requires an admin or service account to execute. The service
* account used is the one whitelisted in the corresponding data source.
*
* Create a request for the method "items.poll".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Poll#execute()} method to invoke the remote operation. <p>
* {@link Poll#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the Data Source to poll items. Format: datasources/{source_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.PollItemsRequest}
* @since 1.13
*/
protected Poll(java.lang.String name, com.google.api.services.cloudsearch.v1.model.PollItemsRequest content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.PollItemsResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public Poll set$Xgafv(java.lang.String $Xgafv) {
return (Poll) super.set$Xgafv($Xgafv);
}
@Override
public Poll setAccessToken(java.lang.String accessToken) {
return (Poll) super.setAccessToken(accessToken);
}
@Override
public Poll setAlt(java.lang.String alt) {
return (Poll) super.setAlt(alt);
}
@Override
public Poll setCallback(java.lang.String callback) {
return (Poll) super.setCallback(callback);
}
@Override
public Poll setFields(java.lang.String fields) {
return (Poll) super.setFields(fields);
}
@Override
public Poll setKey(java.lang.String key) {
return (Poll) super.setKey(key);
}
@Override
public Poll setOauthToken(java.lang.String oauthToken) {
return (Poll) super.setOauthToken(oauthToken);
}
@Override
public Poll setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Poll) super.setPrettyPrint(prettyPrint);
}
@Override
public Poll setQuotaUser(java.lang.String quotaUser) {
return (Poll) super.setQuotaUser(quotaUser);
}
@Override
public Poll setUploadType(java.lang.String uploadType) {
return (Poll) super.setUploadType(uploadType);
}
@Override
public Poll setUploadProtocol(java.lang.String uploadProtocol) {
return (Poll) super.setUploadProtocol(uploadProtocol);
}
/** Name of the Data Source to poll items. Format: datasources/{source_id} */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the Data Source to poll items. Format: datasources/{source_id}
*/
public java.lang.String getName() {
return name;
}
/** Name of the Data Source to poll items. Format: datasources/{source_id} */
public Poll setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Poll set(String parameterName, Object value) {
return (Poll) super.set(parameterName, value);
}
}
/**
* Pushes an item onto a queue for later polling and updating. This API requires an admin or service
* account to execute. The service account used is the one whitelisted in the corresponding data
* source.
*
* Create a request for the method "items.push".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Push#execute()} method to invoke the remote operation.
*
* @param name Name of the item to push into the indexing queue. Format: datasources/{source_id}/items/{ID} This is
* a required field. The maximum length is 1536 characters.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.PushItemRequest}
* @return the request
*/
public Push push(java.lang.String name, com.google.api.services.cloudsearch.v1.model.PushItemRequest content) throws java.io.IOException {
Push result = new Push(name, content);
initialize(result);
return result;
}
public class Push extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Item> {
private static final String REST_PATH = "v1/indexing/{+name}:push";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+/items/[^/]+$");
/**
* Pushes an item onto a queue for later polling and updating. This API requires an admin or
* service account to execute. The service account used is the one whitelisted in the
* corresponding data source.
*
* Create a request for the method "items.push".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Push#execute()} method to invoke the remote operation. <p>
* {@link Push#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the item to push into the indexing queue. Format: datasources/{source_id}/items/{ID} This is
* a required field. The maximum length is 1536 characters.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.PushItemRequest}
* @since 1.13
*/
protected Push(java.lang.String name, com.google.api.services.cloudsearch.v1.model.PushItemRequest content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Item.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
}
@Override
public Push set$Xgafv(java.lang.String $Xgafv) {
return (Push) super.set$Xgafv($Xgafv);
}
@Override
public Push setAccessToken(java.lang.String accessToken) {
return (Push) super.setAccessToken(accessToken);
}
@Override
public Push setAlt(java.lang.String alt) {
return (Push) super.setAlt(alt);
}
@Override
public Push setCallback(java.lang.String callback) {
return (Push) super.setCallback(callback);
}
@Override
public Push setFields(java.lang.String fields) {
return (Push) super.setFields(fields);
}
@Override
public Push setKey(java.lang.String key) {
return (Push) super.setKey(key);
}
@Override
public Push setOauthToken(java.lang.String oauthToken) {
return (Push) super.setOauthToken(oauthToken);
}
@Override
public Push setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Push) super.setPrettyPrint(prettyPrint);
}
@Override
public Push setQuotaUser(java.lang.String quotaUser) {
return (Push) super.setQuotaUser(quotaUser);
}
@Override
public Push setUploadType(java.lang.String uploadType) {
return (Push) super.setUploadType(uploadType);
}
@Override
public Push setUploadProtocol(java.lang.String uploadProtocol) {
return (Push) super.setUploadProtocol(uploadProtocol);
}
/**
* Name of the item to push into the indexing queue. Format:
* datasources/{source_id}/items/{ID} This is a required field. The maximum length is 1536
* characters.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the item to push into the indexing queue. Format: datasources/{source_id}/items/{ID} This
is a required field. The maximum length is 1536 characters.
*/
public java.lang.String getName() {
return name;
}
/**
* Name of the item to push into the indexing queue. Format:
* datasources/{source_id}/items/{ID} This is a required field. The maximum length is 1536
* characters.
*/
public Push setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Push set(String parameterName, Object value) {
return (Push) super.set(parameterName, value);
}
}
/**
* Unreserves all items from a queue, making them all eligible to be polled. This method is useful
* for resetting the indexing queue after a connector has been restarted. This API requires an admin
* or service account to execute. The service account used is the one whitelisted in the
* corresponding data source.
*
* Create a request for the method "items.unreserve".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Unreserve#execute()} method to invoke the remote operation.
*
* @param name Name of the Data Source to unreserve all items. Format: datasources/{source_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.UnreserveItemsRequest}
* @return the request
*/
public Unreserve unreserve(java.lang.String name, com.google.api.services.cloudsearch.v1.model.UnreserveItemsRequest content) throws java.io.IOException {
Unreserve result = new Unreserve(name, content);
initialize(result);
return result;
}
public class Unreserve extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/indexing/{+name}/items:unreserve";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Unreserves all items from a queue, making them all eligible to be polled. This method is useful
* for resetting the indexing queue after a connector has been restarted. This API requires an
* admin or service account to execute. The service account used is the one whitelisted in the
* corresponding data source.
*
* Create a request for the method "items.unreserve".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Unreserve#execute()} method to invoke the remote
* operation. <p> {@link
* Unreserve#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the Data Source to unreserve all items. Format: datasources/{source_id}
* @param content the {@link com.google.api.services.cloudsearch.v1.model.UnreserveItemsRequest}
* @since 1.13
*/
protected Unreserve(java.lang.String name, com.google.api.services.cloudsearch.v1.model.UnreserveItemsRequest content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public Unreserve set$Xgafv(java.lang.String $Xgafv) {
return (Unreserve) super.set$Xgafv($Xgafv);
}
@Override
public Unreserve setAccessToken(java.lang.String accessToken) {
return (Unreserve) super.setAccessToken(accessToken);
}
@Override
public Unreserve setAlt(java.lang.String alt) {
return (Unreserve) super.setAlt(alt);
}
@Override
public Unreserve setCallback(java.lang.String callback) {
return (Unreserve) super.setCallback(callback);
}
@Override
public Unreserve setFields(java.lang.String fields) {
return (Unreserve) super.setFields(fields);
}
@Override
public Unreserve setKey(java.lang.String key) {
return (Unreserve) super.setKey(key);
}
@Override
public Unreserve setOauthToken(java.lang.String oauthToken) {
return (Unreserve) super.setOauthToken(oauthToken);
}
@Override
public Unreserve setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Unreserve) super.setPrettyPrint(prettyPrint);
}
@Override
public Unreserve setQuotaUser(java.lang.String quotaUser) {
return (Unreserve) super.setQuotaUser(quotaUser);
}
@Override
public Unreserve setUploadType(java.lang.String uploadType) {
return (Unreserve) super.setUploadType(uploadType);
}
@Override
public Unreserve setUploadProtocol(java.lang.String uploadProtocol) {
return (Unreserve) super.setUploadProtocol(uploadProtocol);
}
/** Name of the Data Source to unreserve all items. Format: datasources/{source_id} */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the Data Source to unreserve all items. Format: datasources/{source_id}
*/
public java.lang.String getName() {
return name;
}
/** Name of the Data Source to unreserve all items. Format: datasources/{source_id} */
public Unreserve setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Unreserve set(String parameterName, Object value) {
return (Unreserve) super.set(parameterName, value);
}
}
/**
* Creates an upload session for uploading item content. For items smaller than 100 KB, it's easier
* to embed the content inline within an index request. This API requires an admin or service
* account to execute. The service account used is the one whitelisted in the corresponding data
* source.
*
* Create a request for the method "items.upload".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Upload#execute()} method to invoke the remote operation.
*
* @param name Name of the Item to start a resumable upload. Format: datasources/{source_id}/items/{item_id}. The
* maximum length is 1536 bytes.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.StartUploadItemRequest}
* @return the request
*/
public Upload upload(java.lang.String name, com.google.api.services.cloudsearch.v1.model.StartUploadItemRequest content) throws java.io.IOException {
Upload result = new Upload(name, content);
initialize(result);
return result;
}
public class Upload extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.UploadItemRef> {
private static final String REST_PATH = "v1/indexing/{+name}:upload";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+/items/[^/]+$");
/**
* Creates an upload session for uploading item content. For items smaller than 100 KB, it's
* easier to embed the content inline within an index request. This API requires an admin or
* service account to execute. The service account used is the one whitelisted in the
* corresponding data source.
*
* Create a request for the method "items.upload".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Upload#execute()} method to invoke the remote operation.
* <p> {@link
* Upload#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the Item to start a resumable upload. Format: datasources/{source_id}/items/{item_id}. The
* maximum length is 1536 bytes.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.StartUploadItemRequest}
* @since 1.13
*/
protected Upload(java.lang.String name, com.google.api.services.cloudsearch.v1.model.StartUploadItemRequest content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.UploadItemRef.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
}
@Override
public Upload set$Xgafv(java.lang.String $Xgafv) {
return (Upload) super.set$Xgafv($Xgafv);
}
@Override
public Upload setAccessToken(java.lang.String accessToken) {
return (Upload) super.setAccessToken(accessToken);
}
@Override
public Upload setAlt(java.lang.String alt) {
return (Upload) super.setAlt(alt);
}
@Override
public Upload setCallback(java.lang.String callback) {
return (Upload) super.setCallback(callback);
}
@Override
public Upload setFields(java.lang.String fields) {
return (Upload) super.setFields(fields);
}
@Override
public Upload setKey(java.lang.String key) {
return (Upload) super.setKey(key);
}
@Override
public Upload setOauthToken(java.lang.String oauthToken) {
return (Upload) super.setOauthToken(oauthToken);
}
@Override
public Upload setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Upload) super.setPrettyPrint(prettyPrint);
}
@Override
public Upload setQuotaUser(java.lang.String quotaUser) {
return (Upload) super.setQuotaUser(quotaUser);
}
@Override
public Upload setUploadType(java.lang.String uploadType) {
return (Upload) super.setUploadType(uploadType);
}
@Override
public Upload setUploadProtocol(java.lang.String uploadProtocol) {
return (Upload) super.setUploadProtocol(uploadProtocol);
}
/**
* Name of the Item to start a resumable upload. Format:
* datasources/{source_id}/items/{item_id}. The maximum length is 1536 bytes.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the Item to start a resumable upload. Format: datasources/{source_id}/items/{item_id}. The
maximum length is 1536 bytes.
*/
public java.lang.String getName() {
return name;
}
/**
* Name of the Item to start a resumable upload. Format:
* datasources/{source_id}/items/{item_id}. The maximum length is 1536 bytes.
*/
public Upload setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+/items/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Upload set(String parameterName, Object value) {
return (Upload) super.set(parameterName, value);
}
}
}
}
}
/**
* An accessor for creating requests from the Media collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Media.List request = cloudsearch.media().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Media media() {
return new Media();
}
/**
* The "media" collection of methods.
*/
public class Media {
/**
* Uploads media for indexing. The upload endpoint supports direct and resumable upload protocols
* and is intended for large items that can not be [inlined during index
* requests](https://developers.google.com/cloud-
* search/docs/reference/rest/v1/indexing.datasources.items#itemcontent). To index large content: 1.
* Call indexing.datasources.items.upload with the item name to begin an upload session and retrieve
* the UploadItemRef. 1. Call media.upload to upload the content, as a streaming request, using the
* same resource name from the UploadItemRef from step 1. 1. Call indexing.datasources.items.index
* to index the item. Populate the [ItemContent](/cloud-
* search/docs/reference/rest/v1/indexing.datasources.items#ItemContent) with the UploadItemRef from
* step 1. For additional information, see [Create a content connector using the REST
* API](https://developers.google.com/cloud-search/docs/guides/content-connector#rest). **Note:**
* This API requires a service account to execute.
*
* Create a request for the method "media.upload".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Upload#execute()} method to invoke the remote operation.
*
* @param resourceName Name of the media that is being downloaded. See ReadRequest.resource_name.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.Media}
* @return the request
*/
public Upload upload(java.lang.String resourceName, com.google.api.services.cloudsearch.v1.model.Media content) throws java.io.IOException {
Upload result = new Upload(resourceName, content);
initialize(result);
return result;
}
/**
* Uploads media for indexing. The upload endpoint supports direct and resumable upload protocols
* and is intended for large items that can not be [inlined during index
* requests](https://developers.google.com/cloud-
* search/docs/reference/rest/v1/indexing.datasources.items#itemcontent). To index large content: 1.
* Call indexing.datasources.items.upload with the item name to begin an upload session and retrieve
* the UploadItemRef. 1. Call media.upload to upload the content, as a streaming request, using the
* same resource name from the UploadItemRef from step 1. 1. Call indexing.datasources.items.index
* to index the item. Populate the [ItemContent](/cloud-
* search/docs/reference/rest/v1/indexing.datasources.items#ItemContent) with the UploadItemRef from
* step 1. For additional information, see [Create a content connector using the REST
* API](https://developers.google.com/cloud-search/docs/guides/content-connector#rest). **Note:**
* This API requires a service account to execute.
*
* Create a request for the method "media.upload".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Upload#execute()} method to invoke the remote operation.
*
* <p>
* This method should be used for uploading media content.
* </p>
*
* @param resourceName Name of the media that is being downloaded. See ReadRequest.resource_name.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.Media} media metadata or {@code null} if none
* @param mediaContent The media HTTP content or {@code null} if none.
* @return the request
* @throws java.io.IOException if the initialization of the request fails
*/
public Upload upload(java.lang.String resourceName, com.google.api.services.cloudsearch.v1.model.Media content, com.google.api.client.http.AbstractInputStreamContent mediaContent) throws java.io.IOException {
Upload result = new Upload(resourceName, content, mediaContent);
initialize(result);
return result;
}
public class Upload extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Media> {
private static final String REST_PATH = "v1/media/{+resourceName}";
private final java.util.regex.Pattern RESOURCE_NAME_PATTERN =
java.util.regex.Pattern.compile("^.*$");
/**
* Uploads media for indexing. The upload endpoint supports direct and resumable upload protocols
* and is intended for large items that can not be [inlined during index
* requests](https://developers.google.com/cloud-
* search/docs/reference/rest/v1/indexing.datasources.items#itemcontent). To index large content:
* 1. Call indexing.datasources.items.upload with the item name to begin an upload session and
* retrieve the UploadItemRef. 1. Call media.upload to upload the content, as a streaming request,
* using the same resource name from the UploadItemRef from step 1. 1. Call
* indexing.datasources.items.index to index the item. Populate the [ItemContent](/cloud-
* search/docs/reference/rest/v1/indexing.datasources.items#ItemContent) with the UploadItemRef
* from step 1. For additional information, see [Create a content connector using the REST
* API](https://developers.google.com/cloud-search/docs/guides/content-connector#rest). **Note:**
* This API requires a service account to execute.
*
* Create a request for the method "media.upload".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Upload#execute()} method to invoke the remote operation.
* <p> {@link
* Upload#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resourceName Name of the media that is being downloaded. See ReadRequest.resource_name.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.Media}
* @since 1.13
*/
protected Upload(java.lang.String resourceName, com.google.api.services.cloudsearch.v1.model.Media content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Media.class);
this.resourceName = com.google.api.client.util.Preconditions.checkNotNull(resourceName, "Required parameter resourceName must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_NAME_PATTERN.matcher(resourceName).matches(),
"Parameter resourceName must conform to the pattern " +
"^.*$");
}
}
/**
* Uploads media for indexing. The upload endpoint supports direct and resumable upload protocols
* and is intended for large items that can not be [inlined during index
* requests](https://developers.google.com/cloud-
* search/docs/reference/rest/v1/indexing.datasources.items#itemcontent). To index large content:
* 1. Call indexing.datasources.items.upload with the item name to begin an upload session and
* retrieve the UploadItemRef. 1. Call media.upload to upload the content, as a streaming request,
* using the same resource name from the UploadItemRef from step 1. 1. Call
* indexing.datasources.items.index to index the item. Populate the [ItemContent](/cloud-
* search/docs/reference/rest/v1/indexing.datasources.items#ItemContent) with the UploadItemRef
* from step 1. For additional information, see [Create a content connector using the REST
* API](https://developers.google.com/cloud-search/docs/guides/content-connector#rest). **Note:**
* This API requires a service account to execute.
*
* Create a request for the method "media.upload".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Upload#execute()} method to invoke the remote operation.
* <p> {@link
* Upload#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* <p>
* This constructor should be used for uploading media content.
* </p>
*
* @param resourceName Name of the media that is being downloaded. See ReadRequest.resource_name.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.Media} media metadata or {@code null} if none
* @param mediaContent The media HTTP content or {@code null} if none.
* @since 1.13
*/
protected Upload(java.lang.String resourceName, com.google.api.services.cloudsearch.v1.model.Media content, com.google.api.client.http.AbstractInputStreamContent mediaContent) {
super(CloudSearch.this, "POST", "/upload/" + getServicePath() + REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Media.class);
this.resourceName = com.google.api.client.util.Preconditions.checkNotNull(resourceName, "Required parameter resourceName must be specified.");
initializeMediaUpload(mediaContent);
}
@Override
public Upload set$Xgafv(java.lang.String $Xgafv) {
return (Upload) super.set$Xgafv($Xgafv);
}
@Override
public Upload setAccessToken(java.lang.String accessToken) {
return (Upload) super.setAccessToken(accessToken);
}
@Override
public Upload setAlt(java.lang.String alt) {
return (Upload) super.setAlt(alt);
}
@Override
public Upload setCallback(java.lang.String callback) {
return (Upload) super.setCallback(callback);
}
@Override
public Upload setFields(java.lang.String fields) {
return (Upload) super.setFields(fields);
}
@Override
public Upload setKey(java.lang.String key) {
return (Upload) super.setKey(key);
}
@Override
public Upload setOauthToken(java.lang.String oauthToken) {
return (Upload) super.setOauthToken(oauthToken);
}
@Override
public Upload setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Upload) super.setPrettyPrint(prettyPrint);
}
@Override
public Upload setQuotaUser(java.lang.String quotaUser) {
return (Upload) super.setQuotaUser(quotaUser);
}
@Override
public Upload setUploadType(java.lang.String uploadType) {
return (Upload) super.setUploadType(uploadType);
}
@Override
public Upload setUploadProtocol(java.lang.String uploadProtocol) {
return (Upload) super.setUploadProtocol(uploadProtocol);
}
/** Name of the media that is being downloaded. See ReadRequest.resource_name. */
@com.google.api.client.util.Key
private java.lang.String resourceName;
/** Name of the media that is being downloaded. See ReadRequest.resource_name.
*/
public java.lang.String getResourceName() {
return resourceName;
}
/** Name of the media that is being downloaded. See ReadRequest.resource_name. */
public Upload setResourceName(java.lang.String resourceName) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_NAME_PATTERN.matcher(resourceName).matches(),
"Parameter resourceName must conform to the pattern " +
"^.*$");
}
this.resourceName = resourceName;
return this;
}
@Override
public Upload set(String parameterName, Object value) {
return (Upload) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Operations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Operations.List request = cloudsearch.operations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Operations operations() {
return new Operations();
}
/**
* The "operations" collection of methods.
*/
public class Operations {
/**
* Gets the latest state of a long-running operation. Clients can use this method to poll the
* operation result at intervals as recommended by the API service.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name The name of the operation resource.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^operations/.*$");
/**
* Gets the latest state of a long-running operation. Clients can use this method to poll the
* operation result at intervals as recommended by the API service.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation resource.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/.*$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation resource. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation resource.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation resource. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/.*$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Lro collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Lro.List request = cloudsearch.lro().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Lro lro() {
return new Lro();
}
/**
* The "lro" collection of methods.
*/
public class Lro {
/**
* Lists operations that match the specified filter in the request. If the server doesn't support
* this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override
* the binding to use different resource name schemes, such as `users/operations`. To override the
* binding, API services can add a binding such as `"/v1/{name=users}/operations"` to their service
* configuration. For backwards compatibility, the default name includes the operations collection
* id, however overriding users must ensure the name binding is the parent resource, without the
* operations collection id.
*
* Create a request for the method "lro.list".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param name The name of the operation's parent resource.
* @return the request
*/
public List list(java.lang.String name) throws java.io.IOException {
List result = new List(name);
initialize(result);
return result;
}
public class List extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.ListOperationsResponse> {
private static final String REST_PATH = "v1/{+name}/lro";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^operations/.*$");
/**
* Lists operations that match the specified filter in the request. If the server doesn't support
* this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to
* override the binding to use different resource name schemes, such as `users/operations`. To
* override the binding, API services can add a binding such as `"/v1/{name=users}/operations"` to
* their service configuration. For backwards compatibility, the default name includes the
* operations collection id, however overriding users must ensure the name binding is the parent
* resource, without the operations collection id.
*
* Create a request for the method "lro.list".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation's parent resource.
* @since 1.13
*/
protected List(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.ListOperationsResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/.*$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation's parent resource. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation's parent resource.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation's parent resource. */
public List setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/.*$");
}
this.name = name;
return this;
}
/** The standard list filter. */
@com.google.api.client.util.Key
private java.lang.String filter;
/** The standard list filter.
*/
public java.lang.String getFilter() {
return filter;
}
/** The standard list filter. */
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** The standard list page size. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The standard list page size.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The standard list page size. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The standard list page token. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The standard list page token.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The standard list page token. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the Query collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Query.List request = cloudsearch.query().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Query query() {
return new Query();
}
/**
* The "query" collection of methods.
*/
public class Query {
/**
* The Cloud Search Query API provides the search method, which returns the most relevant results
* from a user query. The results can come from G Suite Apps, such as Gmail or Google Drive, or they
* can come from data that you have indexed from a third party. **Note:** This API requires a
* standard end user account to execute. A service account can't perform Query API requests
* directly; to use a service account to perform queries, set up [G Suite domain-wide delegation of
* authority](https://developers.google.com/cloud-search/docs/guides/delegation/).
*
* Create a request for the method "query.search".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Search#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.cloudsearch.v1.model.SearchRequest}
* @return the request
*/
public Search search(com.google.api.services.cloudsearch.v1.model.SearchRequest content) throws java.io.IOException {
Search result = new Search(content);
initialize(result);
return result;
}
public class Search extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.SearchResponse> {
private static final String REST_PATH = "v1/query/search";
/**
* The Cloud Search Query API provides the search method, which returns the most relevant results
* from a user query. The results can come from G Suite Apps, such as Gmail or Google Drive, or
* they can come from data that you have indexed from a third party. **Note:** This API requires a
* standard end user account to execute. A service account can't perform Query API requests
* directly; to use a service account to perform queries, set up [G Suite domain-wide delegation
* of authority](https://developers.google.com/cloud-search/docs/guides/delegation/).
*
* Create a request for the method "query.search".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Search#execute()} method to invoke the remote operation.
* <p> {@link
* Search#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.cloudsearch.v1.model.SearchRequest}
* @since 1.13
*/
protected Search(com.google.api.services.cloudsearch.v1.model.SearchRequest content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.SearchResponse.class);
}
@Override
public Search set$Xgafv(java.lang.String $Xgafv) {
return (Search) super.set$Xgafv($Xgafv);
}
@Override
public Search setAccessToken(java.lang.String accessToken) {
return (Search) super.setAccessToken(accessToken);
}
@Override
public Search setAlt(java.lang.String alt) {
return (Search) super.setAlt(alt);
}
@Override
public Search setCallback(java.lang.String callback) {
return (Search) super.setCallback(callback);
}
@Override
public Search setFields(java.lang.String fields) {
return (Search) super.setFields(fields);
}
@Override
public Search setKey(java.lang.String key) {
return (Search) super.setKey(key);
}
@Override
public Search setOauthToken(java.lang.String oauthToken) {
return (Search) super.setOauthToken(oauthToken);
}
@Override
public Search setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Search) super.setPrettyPrint(prettyPrint);
}
@Override
public Search setQuotaUser(java.lang.String quotaUser) {
return (Search) super.setQuotaUser(quotaUser);
}
@Override
public Search setUploadType(java.lang.String uploadType) {
return (Search) super.setUploadType(uploadType);
}
@Override
public Search setUploadProtocol(java.lang.String uploadProtocol) {
return (Search) super.setUploadProtocol(uploadProtocol);
}
@Override
public Search set(String parameterName, Object value) {
return (Search) super.set(parameterName, value);
}
}
/**
* Provides suggestions for autocompleting the query. **Note:** This API requires a standard end
* user account to execute. A service account can't perform Query API requests directly; to use a
* service account to perform queries, set up [G Suite domain-wide delegation of
* authority](https://developers.google.com/cloud-search/docs/guides/delegation/).
*
* Create a request for the method "query.suggest".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Suggest#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.cloudsearch.v1.model.SuggestRequest}
* @return the request
*/
public Suggest suggest(com.google.api.services.cloudsearch.v1.model.SuggestRequest content) throws java.io.IOException {
Suggest result = new Suggest(content);
initialize(result);
return result;
}
public class Suggest extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.SuggestResponse> {
private static final String REST_PATH = "v1/query/suggest";
/**
* Provides suggestions for autocompleting the query. **Note:** This API requires a standard end
* user account to execute. A service account can't perform Query API requests directly; to use a
* service account to perform queries, set up [G Suite domain-wide delegation of
* authority](https://developers.google.com/cloud-search/docs/guides/delegation/).
*
* Create a request for the method "query.suggest".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Suggest#execute()} method to invoke the remote operation.
* <p> {@link
* Suggest#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.cloudsearch.v1.model.SuggestRequest}
* @since 1.13
*/
protected Suggest(com.google.api.services.cloudsearch.v1.model.SuggestRequest content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.SuggestResponse.class);
}
@Override
public Suggest set$Xgafv(java.lang.String $Xgafv) {
return (Suggest) super.set$Xgafv($Xgafv);
}
@Override
public Suggest setAccessToken(java.lang.String accessToken) {
return (Suggest) super.setAccessToken(accessToken);
}
@Override
public Suggest setAlt(java.lang.String alt) {
return (Suggest) super.setAlt(alt);
}
@Override
public Suggest setCallback(java.lang.String callback) {
return (Suggest) super.setCallback(callback);
}
@Override
public Suggest setFields(java.lang.String fields) {
return (Suggest) super.setFields(fields);
}
@Override
public Suggest setKey(java.lang.String key) {
return (Suggest) super.setKey(key);
}
@Override
public Suggest setOauthToken(java.lang.String oauthToken) {
return (Suggest) super.setOauthToken(oauthToken);
}
@Override
public Suggest setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Suggest) super.setPrettyPrint(prettyPrint);
}
@Override
public Suggest setQuotaUser(java.lang.String quotaUser) {
return (Suggest) super.setQuotaUser(quotaUser);
}
@Override
public Suggest setUploadType(java.lang.String uploadType) {
return (Suggest) super.setUploadType(uploadType);
}
@Override
public Suggest setUploadProtocol(java.lang.String uploadProtocol) {
return (Suggest) super.setUploadProtocol(uploadProtocol);
}
@Override
public Suggest set(String parameterName, Object value) {
return (Suggest) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Sources collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Sources.List request = cloudsearch.sources().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Sources sources() {
return new Sources();
}
/**
* The "sources" collection of methods.
*/
public class Sources {
/**
* Returns list of sources that user can use for Search and Suggest APIs. **Note:** This API
* requires a standard end user account to execute. A service account can't perform Query API
* requests directly; to use a service account to perform queries, set up [G Suite domain-wide
* delegation of authority](https://developers.google.com/cloud-search/docs/guides/delegation/).
*
* Create a request for the method "sources.list".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.ListQuerySourcesResponse> {
private static final String REST_PATH = "v1/query/sources";
/**
* Returns list of sources that user can use for Search and Suggest APIs. **Note:** This API
* requires a standard end user account to execute. A service account can't perform Query API
* requests directly; to use a service account to perform queries, set up [G Suite domain-wide
* delegation of authority](https://developers.google.com/cloud-search/docs/guides/delegation/).
*
* Create a request for the method "sources.list".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.ListQuerySourcesResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Number of sources to return in the response. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Number of sources to return in the response.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Number of sources to return in the response. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
@com.google.api.client.util.Key("requestOptions.debugOptions.enableDebugging")
private java.lang.Boolean requestOptionsDebugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getRequestOptionsDebugOptionsEnableDebugging() {
return requestOptionsDebugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
public List setRequestOptionsDebugOptionsEnableDebugging(java.lang.Boolean requestOptionsDebugOptionsEnableDebugging) {
this.requestOptionsDebugOptionsEnableDebugging = requestOptionsDebugOptionsEnableDebugging;
return this;
}
/**
* The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see
* http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. For translations. Set
* this field using the language set in browser or for the page. In the event that the
* user's language preference is known, set this field to the known user language. When
* specified, the documents in search results are biased towards the specified language. The
* suggest API does not use this parameter. Instead, suggest autocompletes only based on
* characters in the query.
*/
@com.google.api.client.util.Key("requestOptions.languageCode")
private java.lang.String requestOptionsLanguageCode;
/** The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see
http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. For translations. Set this field
using the language set in browser or for the page. In the event that the user's language preference
is known, set this field to the known user language. When specified, the documents in search
results are biased towards the specified language. The suggest API does not use this parameter.
Instead, suggest autocompletes only based on characters in the query.
*/
public java.lang.String getRequestOptionsLanguageCode() {
return requestOptionsLanguageCode;
}
/**
* The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see
* http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. For translations. Set
* this field using the language set in browser or for the page. In the event that the
* user's language preference is known, set this field to the known user language. When
* specified, the documents in search results are biased towards the specified language. The
* suggest API does not use this parameter. Instead, suggest autocompletes only based on
* characters in the query.
*/
public List setRequestOptionsLanguageCode(java.lang.String requestOptionsLanguageCode) {
this.requestOptionsLanguageCode = requestOptionsLanguageCode;
return this;
}
/**
* The ID generated when you create a search application using the [admin
* console](https://support.google.com/a/answer/9043922).
*/
@com.google.api.client.util.Key("requestOptions.searchApplicationId")
private java.lang.String requestOptionsSearchApplicationId;
/** The ID generated when you create a search application using the [admin
console](https://support.google.com/a/answer/9043922).
*/
public java.lang.String getRequestOptionsSearchApplicationId() {
return requestOptionsSearchApplicationId;
}
/**
* The ID generated when you create a search application using the [admin
* console](https://support.google.com/a/answer/9043922).
*/
public List setRequestOptionsSearchApplicationId(java.lang.String requestOptionsSearchApplicationId) {
this.requestOptionsSearchApplicationId = requestOptionsSearchApplicationId;
return this;
}
/**
* Current user's time zone id, such as "America/Los_Angeles" or "Australia/Sydney". These
* IDs are defined by [Unicode Common Locale Data Repository
* (CLDR)](http://cldr.unicode.org/) project, and currently available in the file
* [timezone.xml](http://unicode.org/repos/cldr/trunk/common/bcp47/timezone.xml). This field
* is used to correctly interpret date and time queries. If this field is not specified, the
* default time zone (UTC) is used.
*/
@com.google.api.client.util.Key("requestOptions.timeZone")
private java.lang.String requestOptionsTimeZone;
/** Current user's time zone id, such as "America/Los_Angeles" or "Australia/Sydney". These IDs are
defined by [Unicode Common Locale Data Repository (CLDR)](http://cldr.unicode.org/) project, and
currently available in the file
[timezone.xml](http://unicode.org/repos/cldr/trunk/common/bcp47/timezone.xml). This field is used
to correctly interpret date and time queries. If this field is not specified, the default time zone
(UTC) is used.
*/
public java.lang.String getRequestOptionsTimeZone() {
return requestOptionsTimeZone;
}
/**
* Current user's time zone id, such as "America/Los_Angeles" or "Australia/Sydney". These
* IDs are defined by [Unicode Common Locale Data Repository
* (CLDR)](http://cldr.unicode.org/) project, and currently available in the file
* [timezone.xml](http://unicode.org/repos/cldr/trunk/common/bcp47/timezone.xml). This field
* is used to correctly interpret date and time queries. If this field is not specified, the
* default time zone (UTC) is used.
*/
public List setRequestOptionsTimeZone(java.lang.String requestOptionsTimeZone) {
this.requestOptionsTimeZone = requestOptionsTimeZone;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the Settings collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Settings.List request = cloudsearch.settings().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Settings settings() {
return new Settings();
}
/**
* The "settings" collection of methods.
*/
public class Settings {
/**
* An accessor for creating requests from the Datasources collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Datasources.List request = cloudsearch.datasources().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Datasources datasources() {
return new Datasources();
}
/**
* The "datasources" collection of methods.
*/
public class Datasources {
/**
* Creates a datasource. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "datasources.create".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.cloudsearch.v1.model.DataSource}
* @return the request
*/
public Create create(com.google.api.services.cloudsearch.v1.model.DataSource content) throws java.io.IOException {
Create result = new Create(content);
initialize(result);
return result;
}
public class Create extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/settings/datasources";
/**
* Creates a datasource. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "datasources.create".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.cloudsearch.v1.model.DataSource}
* @since 1.13
*/
protected Create(com.google.api.services.cloudsearch.v1.model.DataSource content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Operation.class);
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a datasource. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "datasources.delete".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name Name of the datasource. Format: datasources/{source_id}.
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/settings/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Deletes a datasource. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "datasources.delete".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the datasource. Format: datasources/{source_id}.
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(CloudSearch.this, "DELETE", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Name of the datasource. Format: datasources/{source_id}. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the datasource. Format: datasources/{source_id}.
*/
public java.lang.String getName() {
return name;
}
/** Name of the datasource. Format: datasources/{source_id}. */
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
public Delete setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets a datasource. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "datasources.get".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Name of the datasource resource. Format: datasources/{source_id}.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.DataSource> {
private static final String REST_PATH = "v1/settings/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Gets a datasource. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "datasources.get".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the datasource resource. Format: datasources/{source_id}.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.DataSource.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Name of the datasource resource. Format: datasources/{source_id}. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the datasource resource. Format: datasources/{source_id}.
*/
public java.lang.String getName() {
return name;
}
/** Name of the datasource resource. Format: datasources/{source_id}. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
public Get setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists datasources. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "datasources.list".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.ListDataSourceResponse> {
private static final String REST_PATH = "v1/settings/datasources";
/**
* Lists datasources. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "datasources.list".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.ListDataSourceResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
public List setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
/**
* Maximum number of datasources to fetch in a request. The max value is 100. The default
* value is 10
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Maximum number of datasources to fetch in a request. The max value is 100. The default value is 10
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Maximum number of datasources to fetch in a request. The max value is 100. The default
* value is 10
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** Starting index of the results. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Starting index of the results.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Starting index of the results. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates a datasource. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "datasources.update".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Update#execute()} method to invoke the remote operation.
*
* @param name Name of the datasource resource. Format: datasources/{source_id}. The name is ignored when creating
* a datasource.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.UpdateDataSourceRequest}
* @return the request
*/
public Update update(java.lang.String name, com.google.api.services.cloudsearch.v1.model.UpdateDataSourceRequest content) throws java.io.IOException {
Update result = new Update(name, content);
initialize(result);
return result;
}
public class Update extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/settings/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Updates a datasource. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "datasources.update".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Update#execute()} method to invoke the remote operation.
* <p> {@link
* Update#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the datasource resource. Format: datasources/{source_id}. The name is ignored when creating
* a datasource.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.UpdateDataSourceRequest}
* @since 1.13
*/
protected Update(java.lang.String name, com.google.api.services.cloudsearch.v1.model.UpdateDataSourceRequest content) {
super(CloudSearch.this, "PUT", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public Update set$Xgafv(java.lang.String $Xgafv) {
return (Update) super.set$Xgafv($Xgafv);
}
@Override
public Update setAccessToken(java.lang.String accessToken) {
return (Update) super.setAccessToken(accessToken);
}
@Override
public Update setAlt(java.lang.String alt) {
return (Update) super.setAlt(alt);
}
@Override
public Update setCallback(java.lang.String callback) {
return (Update) super.setCallback(callback);
}
@Override
public Update setFields(java.lang.String fields) {
return (Update) super.setFields(fields);
}
@Override
public Update setKey(java.lang.String key) {
return (Update) super.setKey(key);
}
@Override
public Update setOauthToken(java.lang.String oauthToken) {
return (Update) super.setOauthToken(oauthToken);
}
@Override
public Update setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Update) super.setPrettyPrint(prettyPrint);
}
@Override
public Update setQuotaUser(java.lang.String quotaUser) {
return (Update) super.setQuotaUser(quotaUser);
}
@Override
public Update setUploadType(java.lang.String uploadType) {
return (Update) super.setUploadType(uploadType);
}
@Override
public Update setUploadProtocol(java.lang.String uploadProtocol) {
return (Update) super.setUploadProtocol(uploadProtocol);
}
/**
* Name of the datasource resource. Format: datasources/{source_id}. The name is ignored
* when creating a datasource.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the datasource resource. Format: datasources/{source_id}. The name is ignored when creating
a datasource.
*/
public java.lang.String getName() {
return name;
}
/**
* Name of the datasource resource. Format: datasources/{source_id}. The name is ignored
* when creating a datasource.
*/
public Update setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Update set(String parameterName, Object value) {
return (Update) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Searchapplications collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Searchapplications.List request = cloudsearch.searchapplications().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Searchapplications searchapplications() {
return new Searchapplications();
}
/**
* The "searchapplications" collection of methods.
*/
public class Searchapplications {
/**
* Creates a search application. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.create".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.cloudsearch.v1.model.SearchApplication}
* @return the request
*/
public Create create(com.google.api.services.cloudsearch.v1.model.SearchApplication content) throws java.io.IOException {
Create result = new Create(content);
initialize(result);
return result;
}
public class Create extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/settings/searchapplications";
/**
* Creates a search application. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.create".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.cloudsearch.v1.model.SearchApplication}
* @since 1.13
*/
protected Create(com.google.api.services.cloudsearch.v1.model.SearchApplication content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Operation.class);
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a search application. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.delete".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name The name of the search application to be deleted. Format: applications/{application_id}.
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/settings/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^searchapplications/[^/]+$");
/**
* Deletes a search application. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.delete".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the search application to be deleted. Format: applications/{application_id}.
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(CloudSearch.this, "DELETE", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/**
* The name of the search application to be deleted. Format: applications/{application_id}.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the search application to be deleted. Format: applications/{application_id}.
*/
public java.lang.String getName() {
return name;
}
/**
* The name of the search application to be deleted. Format: applications/{application_id}.
*/
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
this.name = name;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
public Delete setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets the specified search application. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.get".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Name of the search application. Format: searchapplications/{application_id}.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.SearchApplication> {
private static final String REST_PATH = "v1/settings/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^searchapplications/[^/]+$");
/**
* Gets the specified search application. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.get".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the search application. Format: searchapplications/{application_id}.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.SearchApplication.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Name of the search application. Format: searchapplications/{application_id}. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the search application. Format: searchapplications/{application_id}.
*/
public java.lang.String getName() {
return name;
}
/** Name of the search application. Format: searchapplications/{application_id}. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
this.name = name;
return this;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
public Get setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists all search applications. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.list".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.ListSearchApplicationsResponse> {
private static final String REST_PATH = "v1/settings/searchapplications";
/**
* Lists all search applications. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.list".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.ListSearchApplicationsResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
@com.google.api.client.util.Key("debugOptions.enableDebugging")
private java.lang.Boolean debugOptionsEnableDebugging;
/** If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field.
*/
public java.lang.Boolean getDebugOptionsEnableDebugging() {
return debugOptionsEnableDebugging;
}
/**
* If you are asked by Google to help with debugging, set this field. Otherwise, ignore this
* field.
*/
public List setDebugOptionsEnableDebugging(java.lang.Boolean debugOptionsEnableDebugging) {
this.debugOptionsEnableDebugging = debugOptionsEnableDebugging;
return this;
}
/** The maximum number of items to return. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of items to return.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The maximum number of items to return. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* The next_page_token value returned from a previous List request, if any. The default
* value is 10
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The next_page_token value returned from a previous List request, if any. The default value is 10
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* The next_page_token value returned from a previous List request, if any. The default
* value is 10
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Resets a search application to default settings. This will return an empty response. **Note:**
* This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.reset".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Reset#execute()} method to invoke the remote operation.
*
* @param name The name of the search application to be reset. Format: applications/{application_id}.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.ResetSearchApplicationRequest}
* @return the request
*/
public Reset reset(java.lang.String name, com.google.api.services.cloudsearch.v1.model.ResetSearchApplicationRequest content) throws java.io.IOException {
Reset result = new Reset(name, content);
initialize(result);
return result;
}
public class Reset extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/settings/{+name}:reset";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^searchapplications/[^/]+$");
/**
* Resets a search application to default settings. This will return an empty response. **Note:**
* This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.reset".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Reset#execute()} method to invoke the remote operation.
* <p> {@link
* Reset#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the search application to be reset. Format: applications/{application_id}.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.ResetSearchApplicationRequest}
* @since 1.13
*/
protected Reset(java.lang.String name, com.google.api.services.cloudsearch.v1.model.ResetSearchApplicationRequest content) {
super(CloudSearch.this, "POST", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
}
@Override
public Reset set$Xgafv(java.lang.String $Xgafv) {
return (Reset) super.set$Xgafv($Xgafv);
}
@Override
public Reset setAccessToken(java.lang.String accessToken) {
return (Reset) super.setAccessToken(accessToken);
}
@Override
public Reset setAlt(java.lang.String alt) {
return (Reset) super.setAlt(alt);
}
@Override
public Reset setCallback(java.lang.String callback) {
return (Reset) super.setCallback(callback);
}
@Override
public Reset setFields(java.lang.String fields) {
return (Reset) super.setFields(fields);
}
@Override
public Reset setKey(java.lang.String key) {
return (Reset) super.setKey(key);
}
@Override
public Reset setOauthToken(java.lang.String oauthToken) {
return (Reset) super.setOauthToken(oauthToken);
}
@Override
public Reset setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Reset) super.setPrettyPrint(prettyPrint);
}
@Override
public Reset setQuotaUser(java.lang.String quotaUser) {
return (Reset) super.setQuotaUser(quotaUser);
}
@Override
public Reset setUploadType(java.lang.String uploadType) {
return (Reset) super.setUploadType(uploadType);
}
@Override
public Reset setUploadProtocol(java.lang.String uploadProtocol) {
return (Reset) super.setUploadProtocol(uploadProtocol);
}
/**
* The name of the search application to be reset. Format: applications/{application_id}.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the search application to be reset. Format: applications/{application_id}.
*/
public java.lang.String getName() {
return name;
}
/**
* The name of the search application to be reset. Format: applications/{application_id}.
*/
public Reset setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Reset set(String parameterName, Object value) {
return (Reset) super.set(parameterName, value);
}
}
/**
* Updates a search application. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.update".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Update#execute()} method to invoke the remote operation.
*
* @param name Name of the Search Application. Format: searchapplications/{application_id}.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.SearchApplication}
* @return the request
*/
public Update update(java.lang.String name, com.google.api.services.cloudsearch.v1.model.SearchApplication content) throws java.io.IOException {
Update result = new Update(name, content);
initialize(result);
return result;
}
public class Update extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.Operation> {
private static final String REST_PATH = "v1/settings/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^searchapplications/[^/]+$");
/**
* Updates a search application. **Note:** This API requires an admin account to execute.
*
* Create a request for the method "searchapplications.update".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Update#execute()} method to invoke the remote operation.
* <p> {@link
* Update#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Name of the Search Application. Format: searchapplications/{application_id}.
* @param content the {@link com.google.api.services.cloudsearch.v1.model.SearchApplication}
* @since 1.13
*/
protected Update(java.lang.String name, com.google.api.services.cloudsearch.v1.model.SearchApplication content) {
super(CloudSearch.this, "PUT", REST_PATH, content, com.google.api.services.cloudsearch.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
}
@Override
public Update set$Xgafv(java.lang.String $Xgafv) {
return (Update) super.set$Xgafv($Xgafv);
}
@Override
public Update setAccessToken(java.lang.String accessToken) {
return (Update) super.setAccessToken(accessToken);
}
@Override
public Update setAlt(java.lang.String alt) {
return (Update) super.setAlt(alt);
}
@Override
public Update setCallback(java.lang.String callback) {
return (Update) super.setCallback(callback);
}
@Override
public Update setFields(java.lang.String fields) {
return (Update) super.setFields(fields);
}
@Override
public Update setKey(java.lang.String key) {
return (Update) super.setKey(key);
}
@Override
public Update setOauthToken(java.lang.String oauthToken) {
return (Update) super.setOauthToken(oauthToken);
}
@Override
public Update setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Update) super.setPrettyPrint(prettyPrint);
}
@Override
public Update setQuotaUser(java.lang.String quotaUser) {
return (Update) super.setQuotaUser(quotaUser);
}
@Override
public Update setUploadType(java.lang.String uploadType) {
return (Update) super.setUploadType(uploadType);
}
@Override
public Update setUploadProtocol(java.lang.String uploadProtocol) {
return (Update) super.setUploadProtocol(uploadProtocol);
}
/** Name of the Search Application. Format: searchapplications/{application_id}. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the Search Application. Format: searchapplications/{application_id}.
*/
public java.lang.String getName() {
return name;
}
/** Name of the Search Application. Format: searchapplications/{application_id}. */
public Update setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Update set(String parameterName, Object value) {
return (Update) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the Stats collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Stats.List request = cloudsearch.stats().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Stats stats() {
return new Stats();
}
/**
* The "stats" collection of methods.
*/
public class Stats {
/**
* Gets indexed item statistics aggreggated across all data sources. This API only returns
* statistics for previous dates; it doesn't return statistics for the current day. **Note:** This
* API requires a standard end user account to execute.
*
* Create a request for the method "stats.getIndex".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link GetIndex#execute()} method to invoke the remote operation.
*
* @return the request
*/
public GetIndex getIndex() throws java.io.IOException {
GetIndex result = new GetIndex();
initialize(result);
return result;
}
public class GetIndex extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.GetCustomerIndexStatsResponse> {
private static final String REST_PATH = "v1/stats/index";
/**
* Gets indexed item statistics aggreggated across all data sources. This API only returns
* statistics for previous dates; it doesn't return statistics for the current day. **Note:** This
* API requires a standard end user account to execute.
*
* Create a request for the method "stats.getIndex".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link GetIndex#execute()} method to invoke the remote operation.
* <p> {@link
* GetIndex#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected GetIndex() {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.GetCustomerIndexStatsResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetIndex set$Xgafv(java.lang.String $Xgafv) {
return (GetIndex) super.set$Xgafv($Xgafv);
}
@Override
public GetIndex setAccessToken(java.lang.String accessToken) {
return (GetIndex) super.setAccessToken(accessToken);
}
@Override
public GetIndex setAlt(java.lang.String alt) {
return (GetIndex) super.setAlt(alt);
}
@Override
public GetIndex setCallback(java.lang.String callback) {
return (GetIndex) super.setCallback(callback);
}
@Override
public GetIndex setFields(java.lang.String fields) {
return (GetIndex) super.setFields(fields);
}
@Override
public GetIndex setKey(java.lang.String key) {
return (GetIndex) super.setKey(key);
}
@Override
public GetIndex setOauthToken(java.lang.String oauthToken) {
return (GetIndex) super.setOauthToken(oauthToken);
}
@Override
public GetIndex setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetIndex) super.setPrettyPrint(prettyPrint);
}
@Override
public GetIndex setQuotaUser(java.lang.String quotaUser) {
return (GetIndex) super.setQuotaUser(quotaUser);
}
@Override
public GetIndex setUploadType(java.lang.String uploadType) {
return (GetIndex) super.setUploadType(uploadType);
}
@Override
public GetIndex setUploadProtocol(java.lang.String uploadProtocol) {
return (GetIndex) super.setUploadProtocol(uploadProtocol);
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("fromDate.day")
private java.lang.Integer fromDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getFromDateDay() {
return fromDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public GetIndex setFromDateDay(java.lang.Integer fromDateDay) {
this.fromDateDay = fromDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("fromDate.month")
private java.lang.Integer fromDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getFromDateMonth() {
return fromDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public GetIndex setFromDateMonth(java.lang.Integer fromDateMonth) {
this.fromDateMonth = fromDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("fromDate.year")
private java.lang.Integer fromDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getFromDateYear() {
return fromDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public GetIndex setFromDateYear(java.lang.Integer fromDateYear) {
this.fromDateYear = fromDateYear;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("toDate.day")
private java.lang.Integer toDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getToDateDay() {
return toDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public GetIndex setToDateDay(java.lang.Integer toDateDay) {
this.toDateDay = toDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("toDate.month")
private java.lang.Integer toDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getToDateMonth() {
return toDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public GetIndex setToDateMonth(java.lang.Integer toDateMonth) {
this.toDateMonth = toDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("toDate.year")
private java.lang.Integer toDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getToDateYear() {
return toDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public GetIndex setToDateYear(java.lang.Integer toDateYear) {
this.toDateYear = toDateYear;
return this;
}
@Override
public GetIndex set(String parameterName, Object value) {
return (GetIndex) super.set(parameterName, value);
}
}
/**
* Get the query statistics for customer. **Note:** This API requires a standard end user account to
* execute.
*
* Create a request for the method "stats.getQuery".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link GetQuery#execute()} method to invoke the remote operation.
*
* @return the request
*/
public GetQuery getQuery() throws java.io.IOException {
GetQuery result = new GetQuery();
initialize(result);
return result;
}
public class GetQuery extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.GetCustomerQueryStatsResponse> {
private static final String REST_PATH = "v1/stats/query";
/**
* Get the query statistics for customer. **Note:** This API requires a standard end user account
* to execute.
*
* Create a request for the method "stats.getQuery".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link GetQuery#execute()} method to invoke the remote operation.
* <p> {@link
* GetQuery#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected GetQuery() {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.GetCustomerQueryStatsResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetQuery set$Xgafv(java.lang.String $Xgafv) {
return (GetQuery) super.set$Xgafv($Xgafv);
}
@Override
public GetQuery setAccessToken(java.lang.String accessToken) {
return (GetQuery) super.setAccessToken(accessToken);
}
@Override
public GetQuery setAlt(java.lang.String alt) {
return (GetQuery) super.setAlt(alt);
}
@Override
public GetQuery setCallback(java.lang.String callback) {
return (GetQuery) super.setCallback(callback);
}
@Override
public GetQuery setFields(java.lang.String fields) {
return (GetQuery) super.setFields(fields);
}
@Override
public GetQuery setKey(java.lang.String key) {
return (GetQuery) super.setKey(key);
}
@Override
public GetQuery setOauthToken(java.lang.String oauthToken) {
return (GetQuery) super.setOauthToken(oauthToken);
}
@Override
public GetQuery setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetQuery) super.setPrettyPrint(prettyPrint);
}
@Override
public GetQuery setQuotaUser(java.lang.String quotaUser) {
return (GetQuery) super.setQuotaUser(quotaUser);
}
@Override
public GetQuery setUploadType(java.lang.String uploadType) {
return (GetQuery) super.setUploadType(uploadType);
}
@Override
public GetQuery setUploadProtocol(java.lang.String uploadProtocol) {
return (GetQuery) super.setUploadProtocol(uploadProtocol);
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("fromDate.day")
private java.lang.Integer fromDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getFromDateDay() {
return fromDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public GetQuery setFromDateDay(java.lang.Integer fromDateDay) {
this.fromDateDay = fromDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("fromDate.month")
private java.lang.Integer fromDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getFromDateMonth() {
return fromDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public GetQuery setFromDateMonth(java.lang.Integer fromDateMonth) {
this.fromDateMonth = fromDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("fromDate.year")
private java.lang.Integer fromDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getFromDateYear() {
return fromDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public GetQuery setFromDateYear(java.lang.Integer fromDateYear) {
this.fromDateYear = fromDateYear;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("toDate.day")
private java.lang.Integer toDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getToDateDay() {
return toDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public GetQuery setToDateDay(java.lang.Integer toDateDay) {
this.toDateDay = toDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("toDate.month")
private java.lang.Integer toDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getToDateMonth() {
return toDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public GetQuery setToDateMonth(java.lang.Integer toDateMonth) {
this.toDateMonth = toDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("toDate.year")
private java.lang.Integer toDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getToDateYear() {
return toDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public GetQuery setToDateYear(java.lang.Integer toDateYear) {
this.toDateYear = toDateYear;
return this;
}
@Override
public GetQuery set(String parameterName, Object value) {
return (GetQuery) super.set(parameterName, value);
}
}
/**
* Get the # of search sessions, % of successful sessions with a click query statistics for
* customer. **Note:** This API requires a standard end user account to execute.
*
* Create a request for the method "stats.getSession".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link GetSession#execute()} method to invoke the remote operation.
*
* @return the request
*/
public GetSession getSession() throws java.io.IOException {
GetSession result = new GetSession();
initialize(result);
return result;
}
public class GetSession extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.GetCustomerSessionStatsResponse> {
private static final String REST_PATH = "v1/stats/session";
/**
* Get the # of search sessions, % of successful sessions with a click query statistics for
* customer. **Note:** This API requires a standard end user account to execute.
*
* Create a request for the method "stats.getSession".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link GetSession#execute()} method to invoke the remote
* operation. <p> {@link
* GetSession#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected GetSession() {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.GetCustomerSessionStatsResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetSession set$Xgafv(java.lang.String $Xgafv) {
return (GetSession) super.set$Xgafv($Xgafv);
}
@Override
public GetSession setAccessToken(java.lang.String accessToken) {
return (GetSession) super.setAccessToken(accessToken);
}
@Override
public GetSession setAlt(java.lang.String alt) {
return (GetSession) super.setAlt(alt);
}
@Override
public GetSession setCallback(java.lang.String callback) {
return (GetSession) super.setCallback(callback);
}
@Override
public GetSession setFields(java.lang.String fields) {
return (GetSession) super.setFields(fields);
}
@Override
public GetSession setKey(java.lang.String key) {
return (GetSession) super.setKey(key);
}
@Override
public GetSession setOauthToken(java.lang.String oauthToken) {
return (GetSession) super.setOauthToken(oauthToken);
}
@Override
public GetSession setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetSession) super.setPrettyPrint(prettyPrint);
}
@Override
public GetSession setQuotaUser(java.lang.String quotaUser) {
return (GetSession) super.setQuotaUser(quotaUser);
}
@Override
public GetSession setUploadType(java.lang.String uploadType) {
return (GetSession) super.setUploadType(uploadType);
}
@Override
public GetSession setUploadProtocol(java.lang.String uploadProtocol) {
return (GetSession) super.setUploadProtocol(uploadProtocol);
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("fromDate.day")
private java.lang.Integer fromDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getFromDateDay() {
return fromDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public GetSession setFromDateDay(java.lang.Integer fromDateDay) {
this.fromDateDay = fromDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("fromDate.month")
private java.lang.Integer fromDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getFromDateMonth() {
return fromDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public GetSession setFromDateMonth(java.lang.Integer fromDateMonth) {
this.fromDateMonth = fromDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("fromDate.year")
private java.lang.Integer fromDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getFromDateYear() {
return fromDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public GetSession setFromDateYear(java.lang.Integer fromDateYear) {
this.fromDateYear = fromDateYear;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("toDate.day")
private java.lang.Integer toDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getToDateDay() {
return toDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public GetSession setToDateDay(java.lang.Integer toDateDay) {
this.toDateDay = toDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("toDate.month")
private java.lang.Integer toDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getToDateMonth() {
return toDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public GetSession setToDateMonth(java.lang.Integer toDateMonth) {
this.toDateMonth = toDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("toDate.year")
private java.lang.Integer toDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getToDateYear() {
return toDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public GetSession setToDateYear(java.lang.Integer toDateYear) {
this.toDateYear = toDateYear;
return this;
}
@Override
public GetSession set(String parameterName, Object value) {
return (GetSession) super.set(parameterName, value);
}
}
/**
* Get the users statistics for customer. **Note:** This API requires a standard end user account to
* execute.
*
* Create a request for the method "stats.getUser".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link GetUser#execute()} method to invoke the remote operation.
*
* @return the request
*/
public GetUser getUser() throws java.io.IOException {
GetUser result = new GetUser();
initialize(result);
return result;
}
public class GetUser extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.GetCustomerUserStatsResponse> {
private static final String REST_PATH = "v1/stats/user";
/**
* Get the users statistics for customer. **Note:** This API requires a standard end user account
* to execute.
*
* Create a request for the method "stats.getUser".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link GetUser#execute()} method to invoke the remote operation.
* <p> {@link
* GetUser#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected GetUser() {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.GetCustomerUserStatsResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetUser set$Xgafv(java.lang.String $Xgafv) {
return (GetUser) super.set$Xgafv($Xgafv);
}
@Override
public GetUser setAccessToken(java.lang.String accessToken) {
return (GetUser) super.setAccessToken(accessToken);
}
@Override
public GetUser setAlt(java.lang.String alt) {
return (GetUser) super.setAlt(alt);
}
@Override
public GetUser setCallback(java.lang.String callback) {
return (GetUser) super.setCallback(callback);
}
@Override
public GetUser setFields(java.lang.String fields) {
return (GetUser) super.setFields(fields);
}
@Override
public GetUser setKey(java.lang.String key) {
return (GetUser) super.setKey(key);
}
@Override
public GetUser setOauthToken(java.lang.String oauthToken) {
return (GetUser) super.setOauthToken(oauthToken);
}
@Override
public GetUser setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetUser) super.setPrettyPrint(prettyPrint);
}
@Override
public GetUser setQuotaUser(java.lang.String quotaUser) {
return (GetUser) super.setQuotaUser(quotaUser);
}
@Override
public GetUser setUploadType(java.lang.String uploadType) {
return (GetUser) super.setUploadType(uploadType);
}
@Override
public GetUser setUploadProtocol(java.lang.String uploadProtocol) {
return (GetUser) super.setUploadProtocol(uploadProtocol);
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("fromDate.day")
private java.lang.Integer fromDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getFromDateDay() {
return fromDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public GetUser setFromDateDay(java.lang.Integer fromDateDay) {
this.fromDateDay = fromDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("fromDate.month")
private java.lang.Integer fromDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getFromDateMonth() {
return fromDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public GetUser setFromDateMonth(java.lang.Integer fromDateMonth) {
this.fromDateMonth = fromDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("fromDate.year")
private java.lang.Integer fromDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getFromDateYear() {
return fromDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public GetUser setFromDateYear(java.lang.Integer fromDateYear) {
this.fromDateYear = fromDateYear;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("toDate.day")
private java.lang.Integer toDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getToDateDay() {
return toDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public GetUser setToDateDay(java.lang.Integer toDateDay) {
this.toDateDay = toDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("toDate.month")
private java.lang.Integer toDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getToDateMonth() {
return toDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public GetUser setToDateMonth(java.lang.Integer toDateMonth) {
this.toDateMonth = toDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("toDate.year")
private java.lang.Integer toDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getToDateYear() {
return toDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public GetUser setToDateYear(java.lang.Integer toDateYear) {
this.toDateYear = toDateYear;
return this;
}
@Override
public GetUser set(String parameterName, Object value) {
return (GetUser) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Index collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Index.List request = cloudsearch.index().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Index index() {
return new Index();
}
/**
* The "index" collection of methods.
*/
public class Index {
/**
* An accessor for creating requests from the Datasources collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Datasources.List request = cloudsearch.datasources().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Datasources datasources() {
return new Datasources();
}
/**
* The "datasources" collection of methods.
*/
public class Datasources {
/**
* Gets indexed item statistics for a single data source. **Note:** This API requires a standard end
* user account to execute.
*
* Create a request for the method "datasources.get".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name The resource id of the data source to retrieve statistics for, in the following format:
* "datasources/{source_id}"
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.GetDataSourceIndexStatsResponse> {
private static final String REST_PATH = "v1/stats/index/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^datasources/[^/]+$");
/**
* Gets indexed item statistics for a single data source. **Note:** This API requires a standard
* end user account to execute.
*
* Create a request for the method "datasources.get".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The resource id of the data source to retrieve statistics for, in the following format:
* "datasources/{source_id}"
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.GetDataSourceIndexStatsResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* The resource id of the data source to retrieve statistics for, in the following format:
* "datasources/{source_id}"
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The resource id of the data source to retrieve statistics for, in the following format:
"datasources/{source_id}"
*/
public java.lang.String getName() {
return name;
}
/**
* The resource id of the data source to retrieve statistics for, in the following format:
* "datasources/{source_id}"
*/
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^datasources/[^/]+$");
}
this.name = name;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("fromDate.day")
private java.lang.Integer fromDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getFromDateDay() {
return fromDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public Get setFromDateDay(java.lang.Integer fromDateDay) {
this.fromDateDay = fromDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("fromDate.month")
private java.lang.Integer fromDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getFromDateMonth() {
return fromDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public Get setFromDateMonth(java.lang.Integer fromDateMonth) {
this.fromDateMonth = fromDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("fromDate.year")
private java.lang.Integer fromDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getFromDateYear() {
return fromDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public Get setFromDateYear(java.lang.Integer fromDateYear) {
this.fromDateYear = fromDateYear;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("toDate.day")
private java.lang.Integer toDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getToDateDay() {
return toDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public Get setToDateDay(java.lang.Integer toDateDay) {
this.toDateDay = toDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("toDate.month")
private java.lang.Integer toDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getToDateMonth() {
return toDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public Get setToDateMonth(java.lang.Integer toDateMonth) {
this.toDateMonth = toDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("toDate.year")
private java.lang.Integer toDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getToDateYear() {
return toDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public Get setToDateYear(java.lang.Integer toDateYear) {
this.toDateYear = toDateYear;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the Query collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Query.List request = cloudsearch.query().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Query query() {
return new Query();
}
/**
* The "query" collection of methods.
*/
public class Query {
/**
* An accessor for creating requests from the Searchapplications collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Searchapplications.List request = cloudsearch.searchapplications().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Searchapplications searchapplications() {
return new Searchapplications();
}
/**
* The "searchapplications" collection of methods.
*/
public class Searchapplications {
/**
* Get the query statistics for search application. **Note:** This API requires a standard end user
* account to execute.
*
* Create a request for the method "searchapplications.get".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name The resource id of the search application query stats, in the following format:
* searchapplications/{application_id}
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.GetSearchApplicationQueryStatsResponse> {
private static final String REST_PATH = "v1/stats/query/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^searchapplications/[^/]+$");
/**
* Get the query statistics for search application. **Note:** This API requires a standard end
* user account to execute.
*
* Create a request for the method "searchapplications.get".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The resource id of the search application query stats, in the following format:
* searchapplications/{application_id}
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.GetSearchApplicationQueryStatsResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* The resource id of the search application query stats, in the following format:
* searchapplications/{application_id}
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The resource id of the search application query stats, in the following format:
searchapplications/{application_id}
*/
public java.lang.String getName() {
return name;
}
/**
* The resource id of the search application query stats, in the following format:
* searchapplications/{application_id}
*/
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
this.name = name;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("fromDate.day")
private java.lang.Integer fromDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getFromDateDay() {
return fromDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public Get setFromDateDay(java.lang.Integer fromDateDay) {
this.fromDateDay = fromDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("fromDate.month")
private java.lang.Integer fromDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getFromDateMonth() {
return fromDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public Get setFromDateMonth(java.lang.Integer fromDateMonth) {
this.fromDateMonth = fromDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("fromDate.year")
private java.lang.Integer fromDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getFromDateYear() {
return fromDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public Get setFromDateYear(java.lang.Integer fromDateYear) {
this.fromDateYear = fromDateYear;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("toDate.day")
private java.lang.Integer toDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getToDateDay() {
return toDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public Get setToDateDay(java.lang.Integer toDateDay) {
this.toDateDay = toDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("toDate.month")
private java.lang.Integer toDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getToDateMonth() {
return toDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public Get setToDateMonth(java.lang.Integer toDateMonth) {
this.toDateMonth = toDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("toDate.year")
private java.lang.Integer toDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getToDateYear() {
return toDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public Get setToDateYear(java.lang.Integer toDateYear) {
this.toDateYear = toDateYear;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the Session collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Session.List request = cloudsearch.session().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Session session() {
return new Session();
}
/**
* The "session" collection of methods.
*/
public class Session {
/**
* An accessor for creating requests from the Searchapplications collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Searchapplications.List request = cloudsearch.searchapplications().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Searchapplications searchapplications() {
return new Searchapplications();
}
/**
* The "searchapplications" collection of methods.
*/
public class Searchapplications {
/**
* Get the # of search sessions, % of successful sessions with a click query statistics for search
* application. **Note:** This API requires a standard end user account to execute.
*
* Create a request for the method "searchapplications.get".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name The resource id of the search application session stats, in the following format:
* searchapplications/{application_id}
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.GetSearchApplicationSessionStatsResponse> {
private static final String REST_PATH = "v1/stats/session/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^searchapplications/[^/]+$");
/**
* Get the # of search sessions, % of successful sessions with a click query statistics for search
* application. **Note:** This API requires a standard end user account to execute.
*
* Create a request for the method "searchapplications.get".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The resource id of the search application session stats, in the following format:
* searchapplications/{application_id}
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.GetSearchApplicationSessionStatsResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* The resource id of the search application session stats, in the following format:
* searchapplications/{application_id}
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The resource id of the search application session stats, in the following format:
searchapplications/{application_id}
*/
public java.lang.String getName() {
return name;
}
/**
* The resource id of the search application session stats, in the following format:
* searchapplications/{application_id}
*/
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
this.name = name;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("fromDate.day")
private java.lang.Integer fromDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getFromDateDay() {
return fromDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public Get setFromDateDay(java.lang.Integer fromDateDay) {
this.fromDateDay = fromDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("fromDate.month")
private java.lang.Integer fromDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getFromDateMonth() {
return fromDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public Get setFromDateMonth(java.lang.Integer fromDateMonth) {
this.fromDateMonth = fromDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("fromDate.year")
private java.lang.Integer fromDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getFromDateYear() {
return fromDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public Get setFromDateYear(java.lang.Integer fromDateYear) {
this.fromDateYear = fromDateYear;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("toDate.day")
private java.lang.Integer toDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getToDateDay() {
return toDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public Get setToDateDay(java.lang.Integer toDateDay) {
this.toDateDay = toDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("toDate.month")
private java.lang.Integer toDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getToDateMonth() {
return toDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public Get setToDateMonth(java.lang.Integer toDateMonth) {
this.toDateMonth = toDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("toDate.year")
private java.lang.Integer toDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getToDateYear() {
return toDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public Get setToDateYear(java.lang.Integer toDateYear) {
this.toDateYear = toDateYear;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the User collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.User.List request = cloudsearch.user().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public User user() {
return new User();
}
/**
* The "user" collection of methods.
*/
public class User {
/**
* An accessor for creating requests from the Searchapplications collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudSearch cloudsearch = new CloudSearch(...);}
* {@code CloudSearch.Searchapplications.List request = cloudsearch.searchapplications().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Searchapplications searchapplications() {
return new Searchapplications();
}
/**
* The "searchapplications" collection of methods.
*/
public class Searchapplications {
/**
* Get the users statistics for search application. **Note:** This API requires a standard end user
* account to execute.
*
* Create a request for the method "searchapplications.get".
*
* This request holds the parameters needed by the cloudsearch server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name The resource id of the search application session stats, in the following format:
* searchapplications/{application_id}
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudSearchRequest<com.google.api.services.cloudsearch.v1.model.GetSearchApplicationUserStatsResponse> {
private static final String REST_PATH = "v1/stats/user/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^searchapplications/[^/]+$");
/**
* Get the users statistics for search application. **Note:** This API requires a standard end
* user account to execute.
*
* Create a request for the method "searchapplications.get".
*
* This request holds the parameters needed by the the cloudsearch server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The resource id of the search application session stats, in the following format:
* searchapplications/{application_id}
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudSearch.this, "GET", REST_PATH, null, com.google.api.services.cloudsearch.v1.model.GetSearchApplicationUserStatsResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* The resource id of the search application session stats, in the following format:
* searchapplications/{application_id}
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The resource id of the search application session stats, in the following format:
searchapplications/{application_id}
*/
public java.lang.String getName() {
return name;
}
/**
* The resource id of the search application session stats, in the following format:
* searchapplications/{application_id}
*/
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^searchapplications/[^/]+$");
}
this.name = name;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("fromDate.day")
private java.lang.Integer fromDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getFromDateDay() {
return fromDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public Get setFromDateDay(java.lang.Integer fromDateDay) {
this.fromDateDay = fromDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("fromDate.month")
private java.lang.Integer fromDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getFromDateMonth() {
return fromDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public Get setFromDateMonth(java.lang.Integer fromDateMonth) {
this.fromDateMonth = fromDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("fromDate.year")
private java.lang.Integer fromDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getFromDateYear() {
return fromDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public Get setFromDateYear(java.lang.Integer fromDateYear) {
this.fromDateYear = fromDateYear;
return this;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
@com.google.api.client.util.Key("toDate.day")
private java.lang.Integer toDateDay;
/** Day of month. Must be from 1 to 31 and valid for the year and month.
*/
public java.lang.Integer getToDateDay() {
return toDateDay;
}
/** Day of month. Must be from 1 to 31 and valid for the year and month. */
public Get setToDateDay(java.lang.Integer toDateDay) {
this.toDateDay = toDateDay;
return this;
}
/** Month of date. Must be from 1 to 12. */
@com.google.api.client.util.Key("toDate.month")
private java.lang.Integer toDateMonth;
/** Month of date. Must be from 1 to 12.
*/
public java.lang.Integer getToDateMonth() {
return toDateMonth;
}
/** Month of date. Must be from 1 to 12. */
public Get setToDateMonth(java.lang.Integer toDateMonth) {
this.toDateMonth = toDateMonth;
return this;
}
/** Year of date. Must be from 1 to 9999. */
@com.google.api.client.util.Key("toDate.year")
private java.lang.Integer toDateYear;
/** Year of date. Must be from 1 to 9999.
*/
public java.lang.Integer getToDateYear() {
return toDateYear;
}
/** Year of date. Must be from 1 to 9999. */
public Get setToDateYear(java.lang.Integer toDateYear) {
this.toDateYear = toDateYear;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
}
}
}
/**
* Builder for {@link CloudSearch}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link CloudSearch}. */
@Override
public CloudSearch build() {
return new CloudSearch(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link CloudSearchRequestInitializer}.
*
* @since 1.12
*/
public Builder setCloudSearchRequestInitializer(
CloudSearchRequestInitializer cloudsearchRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(cloudsearchRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
| [
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
]
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT"] | java | 1 | 0 | |
tests/unit/gapic/compute_v1/test_zone_operations.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests.sessions import Session
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.zone_operations import ZoneOperationsClient
from google.cloud.compute_v1.services.zone_operations import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ZoneOperationsClient._get_default_mtls_endpoint(None) is None
assert (
ZoneOperationsClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
ZoneOperationsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ZoneOperationsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ZoneOperationsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
ZoneOperationsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
def test_zone_operations_client_from_service_account_info():
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = ZoneOperationsClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize("client_class", [ZoneOperationsClient,])
def test_zone_operations_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == "compute.googleapis.com:443"
def test_zone_operations_client_get_transport_class():
transport = ZoneOperationsClient.get_transport_class()
available_transports = [
transports.ZoneOperationsRestTransport,
]
assert transport in available_transports
transport = ZoneOperationsClient.get_transport_class("rest")
assert transport == transports.ZoneOperationsRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest"),],
)
@mock.patch.object(
ZoneOperationsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ZoneOperationsClient),
)
def test_zone_operations_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ZoneOperationsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ZoneOperationsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest", "true"),
(ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest", "false"),
],
)
@mock.patch.object(
ZoneOperationsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ZoneOperationsClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_zone_operations_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest"),],
)
def test_zone_operations_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest"),],
)
def test_zone_operations_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_delete_rest(
transport: str = "rest", request_type=compute.DeleteZoneOperationRequest
):
client = ZoneOperationsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.DeleteZoneOperationResponse()
# Wrap the value into a proper Response obj
json_return_value = compute.DeleteZoneOperationResponse.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.DeleteZoneOperationResponse)
def test_delete_rest_from_dict():
test_delete_rest(request_type=dict)
def test_delete_rest_flattened():
client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.DeleteZoneOperationResponse()
# Wrap the value into a proper Response obj
json_return_value = compute.DeleteZoneOperationResponse.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete(
project="project_value", zone="zone_value", operation="operation_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("json")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
assert "operation_value" in http_call[1] + str(body)
def test_delete_rest_flattened_error():
client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete(
compute.DeleteZoneOperationRequest(),
project="project_value",
zone="zone_value",
operation="operation_value",
)
def test_get_rest(
transport: str = "rest", request_type=compute.GetZoneOperationRequest
):
client = ZoneOperationsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_get_rest_from_dict():
test_get_rest(request_type=dict)
def test_get_rest_flattened():
client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get(
project="project_value", zone="zone_value", operation="operation_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("json")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
assert "operation_value" in http_call[1] + str(body)
def test_get_rest_flattened_error():
client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetZoneOperationRequest(),
project="project_value",
zone="zone_value",
operation="operation_value",
)
def test_list_rest(
transport: str = "rest", request_type=compute.ListZoneOperationsRequest
):
client = ZoneOperationsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.OperationList(
id="id_value",
items=[compute.Operation(client_operation_id="client_operation_id_value")],
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
warning=compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED),
)
# Wrap the value into a proper Response obj
json_return_value = compute.OperationList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
assert response.raw_page is response
# Establish that the response is the type that we expect.
assert isinstance(response, compute.OperationList)
assert response.id == "id_value"
assert response.items == [
compute.Operation(client_operation_id="client_operation_id_value")
]
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.warning == compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED)
def test_list_rest_from_dict():
test_list_rest(request_type=dict)
def test_list_rest_flattened():
client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.OperationList()
# Wrap the value into a proper Response obj
json_return_value = compute.OperationList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list(
project="project_value", zone="zone_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("json")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
def test_list_rest_flattened_error():
client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListZoneOperationsRequest(),
project="project_value",
zone="zone_value",
)
def test_wait_rest(
transport: str = "rest", request_type=compute.WaitZoneOperationRequest
):
client = ZoneOperationsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.wait(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_wait_rest_from_dict():
test_wait_rest(request_type=dict)
def test_wait_rest_flattened():
client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.wait(
project="project_value", zone="zone_value", operation="operation_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("json")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
assert "operation_value" in http_call[1] + str(body)
def test_wait_rest_flattened_error():
client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.wait(
compute.WaitZoneOperationRequest(),
project="project_value",
zone="zone_value",
operation="operation_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ZoneOperationsRestTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ZoneOperationsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ZoneOperationsRestTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ZoneOperationsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.ZoneOperationsRestTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ZoneOperationsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ZoneOperationsRestTransport(
credentials=credentials.AnonymousCredentials(),
)
client = ZoneOperationsClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.ZoneOperationsRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_zone_operations_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.ZoneOperationsTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_zone_operations_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.zone_operations.transports.ZoneOperationsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ZoneOperationsTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"list",
"wait",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_zone_operations_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
auth, "load_credentials_from_file"
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.zone_operations.transports.ZoneOperationsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.ZoneOperationsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_zone_operations_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, "default") as adc, mock.patch(
"google.cloud.compute_v1.services.zone_operations.transports.ZoneOperationsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.ZoneOperationsTransport()
adc.assert_called_once()
def test_zone_operations_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
ZoneOperationsClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_zone_operations_http_transport_client_cert_source_for_mtls():
cred = credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.ZoneOperationsRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_zone_operations_host_no_port():
client = ZoneOperationsClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_zone_operations_host_with_port():
client = ZoneOperationsClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ZoneOperationsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = ZoneOperationsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ZoneOperationsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = ZoneOperationsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = ZoneOperationsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ZoneOperationsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = ZoneOperationsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = ZoneOperationsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ZoneOperationsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = ZoneOperationsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = ZoneOperationsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ZoneOperationsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ZoneOperationsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = ZoneOperationsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ZoneOperationsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ZoneOperationsTransport, "_prep_wrapped_messages"
) as prep:
client = ZoneOperationsClient(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ZoneOperationsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ZoneOperationsClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
providers/yahoo/yahoo_test.go | package yahoo_test
import (
"github.com/viddsee/goth"
"github.com/viddsee/goth/providers/yahoo"
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
a.Equal(p.ClientKey, os.Getenv("YAHOO_KEY"))
a.Equal(p.Secret, os.Getenv("YAHOO_SECRET"))
a.Equal(p.CallbackURL, "/foo")
}
func Test_Implements_Provider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), provider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.BeginAuth("test_state")
s := session.(*yahoo.Session)
a.NoError(err)
a.Contains(s.AuthURL, "api.login.yahoo.com/oauth2/request_auth")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.UnmarshalSession(`{"AuthURL":"https://api.login.yahoo.com/oauth2/request_auth","AccessToken":"1234567890"}`)
a.NoError(err)
s := session.(*yahoo.Session)
a.Equal(s.AuthURL, "https://api.login.yahoo.com/oauth2/request_auth")
a.Equal(s.AccessToken, "1234567890")
}
func provider() *yahoo.Provider {
return yahoo.New(os.Getenv("YAHOO_KEY"), os.Getenv("YAHOO_SECRET"), "/foo")
}
| [
"\"YAHOO_KEY\"",
"\"YAHOO_SECRET\"",
"\"YAHOO_KEY\"",
"\"YAHOO_SECRET\""
]
| []
| [
"YAHOO_KEY",
"YAHOO_SECRET"
]
| [] | ["YAHOO_KEY", "YAHOO_SECRET"] | go | 2 | 0 | |
service/step_defs_test.go | package service
/*
Copyright (c) 2019 Dell Inc, or its subsidiaries.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"errors"
"fmt"
"github.com/dell/csi-isilon/common/constants"
"github.com/dell/csi-isilon/common/k8sutils"
"log"
"net"
"net/http/httptest"
"os"
"runtime"
"strings"
"sync"
csi "github.com/container-storage-interface/spec/lib/go/csi"
"github.com/cucumber/godog"
"github.com/dell/csi-isilon/common/utils"
"github.com/dell/gocsi"
"github.com/dell/gofsutil"
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"os/exec"
)
type feature struct {
nGoRoutines int
server *httptest.Server
service *service
err error // return from the preceeding call
getPluginInfoResponse *csi.GetPluginInfoResponse
getPluginCapabilitiesResponse *csi.GetPluginCapabilitiesResponse
probeResponse *csi.ProbeResponse
createVolumeResponse *csi.CreateVolumeResponse
publishVolumeResponse *csi.ControllerPublishVolumeResponse
unpublishVolumeResponse *csi.ControllerUnpublishVolumeResponse
nodeGetInfoResponse *csi.NodeGetInfoResponse
nodeGetCapabilitiesResponse *csi.NodeGetCapabilitiesResponse
deleteVolumeResponse *csi.DeleteVolumeResponse
getCapacityResponse *csi.GetCapacityResponse
controllerGetCapabilitiesResponse *csi.ControllerGetCapabilitiesResponse
validateVolumeCapabilitiesResponse *csi.ValidateVolumeCapabilitiesResponse
createSnapshotResponse *csi.CreateSnapshotResponse
createVolumeRequest *csi.CreateVolumeRequest
publishVolumeRequest *csi.ControllerPublishVolumeRequest
unpublishVolumeRequest *csi.ControllerUnpublishVolumeRequest
deleteVolumeRequest *csi.DeleteVolumeRequest
controllerExpandVolumeRequest *csi.ControllerExpandVolumeRequest
controllerExpandVolumeResponse *csi.ControllerExpandVolumeResponse
listVolumesRequest *csi.ListVolumesRequest
listVolumesResponse *csi.ListVolumesResponse
listSnapshotsRequest *csi.ListSnapshotsRequest
listSnapshotsResponse *csi.ListSnapshotsResponse
listedVolumeIDs map[string]bool
listVolumesNextTokenCache string
wrongCapacity, wrongStoragePool bool
accessZone string
capability *csi.VolumeCapability
capabilities []*csi.VolumeCapability
nodeStageVolumeRequest *csi.NodeStageVolumeRequest
nodeStageVolumeResponse *csi.NodeStageVolumeResponse
nodeUnstageVolumeRequest *csi.NodeUnstageVolumeRequest
nodeUnstageVolumeResponse *csi.NodeUnstageVolumeResponse
nodePublishVolumeRequest *csi.NodePublishVolumeRequest
nodeUnpublishVolumeRequest *csi.NodeUnpublishVolumeRequest
nodeUnpublishVolumeResponse *csi.NodeUnpublishVolumeResponse
deleteSnapshotRequest *csi.DeleteSnapshotRequest
deleteSnapshotResponse *csi.DeleteSnapshotResponse
createSnapshotRequest *csi.CreateSnapshotRequest
volumeIDList []string
snapshotIDList []string
snapshotIndex int
}
var inducedErrors struct {
badVolumeIdentifier bool
invalidVolumeID bool
noVolumeID bool
differentVolumeID bool
noNodeName bool
noNodeID bool
omitVolumeCapability bool
omitAccessMode bool
useAccessTypeMount bool
noIsiService bool
autoProbeNotEnabled bool
}
const (
Volume1 = "d0f055a700000000"
datafile = "test/tmp/datafile"
datadir = "test/tmp/datadir"
datafile2 = "test/tmp/datafile2"
datadir2 = "test/tmp/datadir2"
clusterName1 = "cluster1"
)
func (f *feature) aIsilonService() error {
f.checkGoRoutines("start aIsilonService")
f.err = nil
f.getPluginInfoResponse = nil
f.volumeIDList = f.volumeIDList[:0]
f.snapshotIDList = f.snapshotIDList[:0]
// configure gofsutil; we use a mock interface
gofsutil.UseMockFS()
gofsutil.GOFSMock.InduceBindMountError = false
gofsutil.GOFSMock.InduceMountError = false
gofsutil.GOFSMock.InduceGetMountsError = false
gofsutil.GOFSMock.InduceDevMountsError = false
gofsutil.GOFSMock.InduceUnmountError = false
gofsutil.GOFSMock.InduceFormatError = false
gofsutil.GOFSMock.InduceGetDiskFormatError = false
gofsutil.GOFSMock.InduceGetDiskFormatType = ""
gofsutil.GOFSMockMounts = gofsutil.GOFSMockMounts[:0]
// set induced errors
inducedErrors.badVolumeIdentifier = false
inducedErrors.invalidVolumeID = false
inducedErrors.noVolumeID = false
inducedErrors.differentVolumeID = false
inducedErrors.noNodeName = false
inducedErrors.noNodeID = false
inducedErrors.omitVolumeCapability = false
inducedErrors.omitAccessMode = false
// initialize volume and export existence status
stepHandlersErrors.ExportNotFoundError = true
stepHandlersErrors.VolumeNotExistError = true
// Get the httptest mock handler. Only set
// a new server if there isn't one already.
handler := getHandler()
// Get or reuse the cached service
f.getService()
clusterConfig := f.service.getIsilonClusterConfig(clusterName1)
if handler != nil && os.Getenv("CSI_ISILON_ENDPOINT") == "" {
if f.server == nil {
f.server = httptest.NewServer(handler)
}
log.Printf("server url: %s\n", f.server.URL)
clusterConfig.EndpointURL = f.server.URL
//f.service.opts.EndpointURL = f.server.URL
} else {
f.server = nil
}
isiSvc, _ := f.service.GetIsiService(context.Background(), clusterConfig)
updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1)
updatedClusterConfig.(*IsilonClusterConfig).isiSvc = isiSvc
f.service.isiClusters.Store(clusterName1, updatedClusterConfig)
f.checkGoRoutines("end aIsilonService")
f.service.logServiceStats()
return nil
}
func (f *feature) renderOneFSAPIUnreachable() error {
testControllerHasNoConnection = true
testNodeHasNoConnection = true
return nil
}
func (f *feature) enableQuota() error {
f.service.opts.QuotaEnabled = true
return nil
}
func (f *feature) getService() *service {
testControllerHasNoConnection = false
testNodeHasNoConnection = false
svc := new(service)
var opts Opts
opts.AccessZone = "System"
opts.Path = "/ifs/data/csi-isilon"
opts.Insecure = true
opts.DebugEnabled = true
opts.Verbose = 1
newConfig := IsilonClusterConfig{}
newConfig.ClusterName = clusterName1
newConfig.IsiIP = "127.0.0.1"
newConfig.IsiPort = "8080"
newConfig.EndpointURL = "http://127.0.0.1"
newConfig.User = "blah"
newConfig.Password = "blah"
newConfig.IsiInsecure = &opts.Insecure
newConfig.IsiPath = "/ifs/data/csi-isilon"
newConfig.IsDefaultCluster = true
if os.Getenv("CSI_ISILON_ENDPOINT") != "" {
newConfig.EndpointURL = os.Getenv("CSI_ISILON_ENDPOINT")
}
if os.Getenv("CSI_ISILON_USERID") != "" {
newConfig.User = os.Getenv("CSI_ISILON_USERID")
}
if os.Getenv("CSI_ISILON_PASSWORD") != "" {
newConfig.Password = os.Getenv("CSI_ISILON_PASSWORD")
}
if os.Getenv("CSI_ISILON_PATH") != "" {
newConfig.IsiPath = os.Getenv("CSI_ISILON_PATH")
}
if os.Getenv("CSI_ISILON_ZONE") != "" {
opts.AccessZone = os.Getenv("CSI_ISILON_ZONE")
}
svc.opts = opts
svc.mode = "controller"
f.service = svc
f.service.nodeID = fmt.Sprintf("k8s-rhel76-qual=#=#=1.2.3.4=#=#=#{clusterName1}")
f.service.nodeIP = "1.2.3.4"
f.service.defaultIsiClusterName = clusterName1
f.service.isiClusters = new(sync.Map)
f.service.isiClusters.Store(newConfig.ClusterName, &newConfig)
utils.ConfigureLogger(opts.DebugEnabled)
return svc
}
func (f *feature) iSetEmptyPassword() error {
cluster, _ := f.service.isiClusters.Load(clusterName1)
cluster.(*IsilonClusterConfig).Password = ""
f.service.isiClusters.Store(clusterName1, cluster)
return nil
}
func (f *feature) checkGoRoutines(tag string) {
goroutines := runtime.NumGoroutine()
fmt.Printf("goroutines %s new %d old groutines %d\n", tag, goroutines, f.nGoRoutines)
f.nGoRoutines = goroutines
}
func FeatureContext(s *godog.Suite) {
f := &feature{}
s.Step(`^a Isilon service$`, f.aIsilonService)
s.Step(`^a Isilon service with params "([^"]*)" "([^"]*)"$`, f.aIsilonServiceWithParams)
s.Step(`^a Isilon service with custom topology "([^"]*)" "([^"]*)"$`, f.aIsilonServiceWithParamsForCustomTopology)
s.Step(`^a Isilon service with custom topology and no label "([^"]*)" "([^"]*)"$`, f.aIsilonServiceWithParamsForCustomTopologyNoLabel)
s.Step(`^I render Isilon service unreachable$`, f.renderOneFSAPIUnreachable)
s.Step(`^I enable quota$`, f.enableQuota)
s.Step(`^I call GetPluginInfo$`, f.iCallGetPluginInfo)
s.Step(`^a valid GetPlugInfoResponse is returned$`, f.aValidGetPlugInfoResponseIsReturned)
s.Step(`^I call GetPluginCapabilities$`, f.iCallGetPluginCapabilities)
s.Step(`^a valid GetPluginCapabilitiesResponse is returned$`, f.aValidGetPluginCapabilitiesResponseIsReturned)
s.Step(`^I call Probe$`, f.iCallProbe)
s.Step(`^I call autoProbe$`, f.iCallAutoProbe)
s.Step(`^a valid ProbeResponse is returned$`, f.aValidProbeResponseIsReturned)
s.Step(`^an invalid ProbeResponse is returned$`, f.anInvalidProbeResponseIsReturned)
s.Step(`^I set empty password for Isilon service$`, f.iSetEmptyPassword)
s.Step(`^I call CreateVolume "([^"]*)"$`, f.iCallCreateVolume)
s.Step(`^I call CreateVolume with persistent metadata "([^"]*)"$`, f.iCallCreateVolumeWithPersistentMetadata)
s.Step(`^I call CreateVolume with params "([^"]*)" (-?\d+) "([^"]*)" "([^"]*)" "([^"]*)" "([^"]*)"$`, f.iCallCreateVolumeWithParams)
s.Step(`^I call DeleteVolume "([^"]*)"$`, f.iCallDeleteVolume)
s.Step(`^a valid CreateVolumeResponse is returned$`, f.aValidCreateVolumeResponseIsReturned)
s.Step(`^a valid DeleteVolumeResponse is returned$`, f.aValidDeleteVolumeResponseIsReturned)
s.Step(`^I induce error "([^"]*)"$`, f.iInduceError)
s.Step(`^the error contains "([^"]*)"$`, f.theErrorContains)
s.Step(`^I call ControllerGetCapabilities$`, f.iCallControllerGetCapabilities)
s.Step(`^a valid ControllerGetCapabilitiesResponse is returned$`, f.aValidControllerGetCapabilitiesResponseIsReturned)
s.Step(`^I call ValidateVolumeCapabilities with voltype "([^"]*)" access "([^"]*)"$`, f.iCallValidateVolumeCapabilitiesWithVoltypeAccess)
s.Step(`^I call GetCapacity$`, f.iCallGetCapacity)
s.Step(`^I call GetCapacity with params "([^"]*)"$`, f.iCallGetCapacityWithParams)
s.Step(`^a valid GetCapacityResponse is returned$`, f.aValidGetCapacityResponseIsReturned)
s.Step(`^I call GetCapacity with Invalid access mode$`, f.iCallGetCapacityWithInvalidAccessMode)
s.Step(`^I call NodeGetInfo$`, f.iCallNodeGetInfo)
s.Step(`^a valid NodeGetInfoResponse is returned$`, f.aValidNodeGetInfoResponseIsReturned)
s.Step(`^I call NodeGetCapabilities$`, f.iCallNodeGetCapabilities)
s.Step(`^a valid NodeGetCapabilitiesResponse is returned$`, f.aValidNodeGetCapabilitiesResponseIsReturned)
s.Step(`^I have a Node "([^"]*)" with AccessZone$`, f.iHaveANodeWithAccessZone)
s.Step(`^I call ControllerPublishVolume with "([^"]*)" to "([^"]*)"$`, f.iCallControllerPublishVolumeWithTo)
s.Step(`^a valid ControllerPublishVolumeResponse is returned$`, f.aValidControllerPublishVolumeResponseIsReturned)
s.Step(`^a controller published volume$`, f.aControllerPublishedVolume)
s.Step(`^a capability with voltype "([^"]*)" access "([^"]*)"$`, f.aCapabilityWithVoltypeAccess)
s.Step(`^I call NodePublishVolume$`, f.iCallNodePublishVolume)
s.Step(`^I call EphemeralNodePublishVolume$`, f.iCallEphemeralNodePublishVolume)
s.Step(`^get Node Publish Volume Request$`, f.getNodePublishVolumeRequest)
s.Step(`^I change the target path$`, f.iChangeTheTargetPath)
s.Step(`^I mark request read only$`, f.iMarkRequestReadOnly)
s.Step(`^I call NodeStageVolume with name "([^"]*)" and access type "([^"]*)"$`, f.iCallNodeStageVolume)
s.Step(`^I call ControllerPublishVolume with name "([^"]*)" and access type "([^"]*)" to "([^"]*)"$`, f.iCallControllerPublishVolume)
s.Step(`^a valid NodeStageVolumeResponse is returned$`, f.aValidNodeStageVolumeResponseIsReturned)
s.Step(`^I call NodeUnstageVolume with name "([^"]*)"$`, f.iCallNodeUnstageVolume)
s.Step(`^I call ControllerUnpublishVolume with name "([^"]*)" and access type "([^"]*)" to "([^"]*)"$`, f.iCallControllerUnPublishVolume)
s.Step(`^a valid NodeUnstageVolumeResponse is returned$`, f.aValidNodeUnstageVolumeResponseIsReturned)
s.Step(`^a valid ControllerUnpublishVolumeResponse is returned$`, f.aValidControllerUnpublishVolumeResponseIsReturned)
s.Step(`^I call ListVolumes with max entries (-?\d+) starting token "([^"]*)"$`, f.iCallListVolumesWithMaxEntriesStartingToken)
s.Step(`^a valid ListVolumesResponse is returned$`, f.aValidListVolumesResponseIsReturned)
s.Step(`^I call NodeUnpublishVolume$`, f.iCallNodeUnpublishVolume)
s.Step(`^I call EphemeralNodeUnpublishVolume$`, f.iCallEphemeralNodeUnpublishVolume)
s.Step(`^a valid NodeUnpublishVolumeResponse is returned$`, f.aValidNodeUnpublishVolumeResponseIsReturned)
s.Step(`^I call CreateSnapshot "([^"]*)" "([^"]*)" "([^"]*)"$`, f.iCallCreateSnapshot)
s.Step(`^a valid CreateSnapshotResponse is returned$`, f.aValidCreateSnapshotResponseIsReturned)
s.Step(`^I call DeleteSnapshot "([^"]*)"$`, f.iCallDeleteSnapshot)
s.Step(`^I call CreateVolumeFromSnapshot "([^"]*)" "([^"]*)"$`, f.iCallCreateVolumeFromSnapshot)
s.Step(`^I call CreateVolumeFromVolume "([^"]*)" "([^"]*)"$`, f.iCallCreateVolumeFromVolume)
s.Step(`^I call initialize real isilon service$`, f.iCallInitializeRealIsilonService)
s.Step(`^I call logStatistics (\d+) times$`, f.iCallLogStatisticsTimes)
s.Step(`^I call BeforeServe$`, f.iCallBeforeServe)
s.Step(`^I call CreateQuota in isiService with negative sizeInBytes$`, f.ICallCreateQuotaInIsiServiceWithNegativeSizeInBytes)
s.Step(`^I call get export related functions in isiService$`, f.iCallGetExportRelatedFunctionsInIsiService)
s.Step(`^I call unimplemented functions$`, f.iCallUnimplementedFunctions)
s.Step(`^I call init Service object$`, f.iCallInitServiceObject)
s.Step(`^I call ControllerExpandVolume "([^"]*)" "(\d+)"$`, f.iCallControllerExpandVolume)
s.Step(`^a valid ControllerExpandVolumeResponse is returned$`, f.aValidControllerExpandVolumeResponseIsReturned)
}
// GetPluginInfo
func (f *feature) iCallGetPluginInfo() error {
req := new(csi.GetPluginInfoRequest)
f.getPluginInfoResponse, f.err = f.service.GetPluginInfo(context.Background(), req)
if f.err != nil {
return f.err
}
return nil
}
func (f *feature) aValidGetPlugInfoResponseIsReturned() error {
rep := f.getPluginInfoResponse
url := rep.GetManifest()["url"]
if rep.GetName() == "" || rep.GetVendorVersion() == "" || url == "" {
return errors.New("Expected GetPluginInfo to return name and version")
}
log.Printf("Name %s Version %s URL %s", rep.GetName(), rep.GetVendorVersion(), url)
return nil
}
func (f *feature) iCallGetPluginCapabilities() error {
req := new(csi.GetPluginCapabilitiesRequest)
f.getPluginCapabilitiesResponse, f.err = f.service.GetPluginCapabilities(context.Background(), req)
if f.err != nil {
return f.err
}
return nil
}
func (f *feature) aValidGetPluginCapabilitiesResponseIsReturned() error {
rep := f.getPluginCapabilitiesResponse
capabilities := rep.GetCapabilities()
var foundController bool
for _, capability := range capabilities {
if capability.GetService().GetType() == csi.PluginCapability_Service_CONTROLLER_SERVICE {
foundController = true
}
}
if !foundController {
return errors.New("Expected PluginCapabilitiesResponse to contain CONTROLLER_SERVICE")
}
return nil
}
func (f *feature) iCallProbe() error {
req := new(csi.ProbeRequest)
f.checkGoRoutines("before probe")
f.probeResponse, f.err = f.service.Probe(context.Background(), req)
f.checkGoRoutines("after probe")
return nil
}
func (f *feature) iCallAutoProbe() error {
f.checkGoRoutines("before auto probe")
f.err = f.service.autoProbe(context.Background(), f.service.getIsilonClusterConfig(clusterName1))
f.checkGoRoutines("after auto probe")
return nil
}
func (f *feature) aValidProbeResponseIsReturned() error {
if f.probeResponse.GetReady().GetValue() != true {
return errors.New("Probe returned 'Ready': false")
}
return nil
}
func (f *feature) anInvalidProbeResponseIsReturned() error {
if f.probeResponse.GetReady().GetValue() != false {
return errors.New("Probe returned 'Ready': true")
}
return nil
}
func getTypicalCreateVolumeRequest() *csi.CreateVolumeRequest {
req := new(csi.CreateVolumeRequest)
req.Name = "volume1"
capacityRange := new(csi.CapacityRange)
capacityRange.RequiredBytes = 8 * 1024 * 1024 * 1024
req.CapacityRange = capacityRange
mount := new(csi.VolumeCapability_MountVolume)
capability := new(csi.VolumeCapability)
accessType := new(csi.VolumeCapability_Mount)
accessType.Mount = mount
capability.AccessType = accessType
accessMode := new(csi.VolumeCapability_AccessMode)
accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
capability.AccessMode = accessMode
capabilities := make([]*csi.VolumeCapability, 0)
capabilities = append(capabilities, capability)
parameters := make(map[string]string)
parameters[AccessZoneParam] = "System"
parameters[IsiPathParam] = "/ifs/data/csi-isilon"
req.Parameters = parameters
req.VolumeCapabilities = capabilities
return req
}
func getCreateVolumeRequestWithMetaData() *csi.CreateVolumeRequest {
req := new(csi.CreateVolumeRequest)
req.Name = "volume1"
capacityRange := new(csi.CapacityRange)
capacityRange.RequiredBytes = 8 * 1024 * 1024 * 1024
req.CapacityRange = capacityRange
mount := new(csi.VolumeCapability_MountVolume)
capability := new(csi.VolumeCapability)
accessType := new(csi.VolumeCapability_Mount)
accessType.Mount = mount
capability.AccessType = accessType
accessMode := new(csi.VolumeCapability_AccessMode)
accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
capability.AccessMode = accessMode
capabilities := make([]*csi.VolumeCapability, 0)
capabilities = append(capabilities, capability)
parameters := make(map[string]string)
parameters[AccessZoneParam] = "System"
parameters[IsiPathParam] = "/ifs/data/csi-isilon"
parameters[csiPersistentVolumeName] = "pv-name"
parameters[csiPersistentVolumeClaimName] = "pv-claimname"
parameters[csiPersistentVolumeClaimNamespace] = "pv-namespace"
req.Parameters = parameters
req.VolumeCapabilities = capabilities
return req
}
func getCreateVolumeRequestWithParams(rangeInGiB int64, accessZone, isiPath, AzServiceIP, clusterName string) *csi.CreateVolumeRequest {
req := new(csi.CreateVolumeRequest)
req.Name = "volume1"
capacityRange := new(csi.CapacityRange)
capacityRange.RequiredBytes = rangeInGiB * 1024 * 1024 * 1024
req.CapacityRange = capacityRange
mount := new(csi.VolumeCapability_MountVolume)
capability := new(csi.VolumeCapability)
accessType := new(csi.VolumeCapability_Mount)
accessType.Mount = mount
capability.AccessType = accessType
accessMode := new(csi.VolumeCapability_AccessMode)
accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
capability.AccessMode = accessMode
capabilities := make([]*csi.VolumeCapability, 0)
capabilities = append(capabilities, capability)
parameters := make(map[string]string)
if accessZone != "none" {
parameters[AccessZoneParam] = accessZone
}
if isiPath != "none" {
parameters[IsiPathParam] = isiPath
}
if AzServiceIP != "none" {
parameters[AzServiceIPParam] = AzServiceIP
}
if clusterName != "none" {
parameters[ClusterNameParam] = clusterName
}
parameters[csiPersistentVolumeName] = "pv-name"
parameters[csiPersistentVolumeClaimName] = "pv-claimname"
parameters[csiPersistentVolumeClaimNamespace] = "pv-namespace"
req.Parameters = parameters
req.VolumeCapabilities = capabilities
return req
}
func getTypicalDeleteVolumeRequest() *csi.DeleteVolumeRequest {
req := new(csi.DeleteVolumeRequest)
req.VolumeId = "volume1"
return req
}
func getTypicalNodeStageVolumeRequest(accessType string) *csi.NodeStageVolumeRequest {
req := new(csi.NodeStageVolumeRequest)
volCtx := make(map[string]string)
req.VolumeContext = volCtx
req.VolumeId = "volume2"
capability := new(csi.VolumeCapability)
if !inducedErrors.omitAccessMode {
capability.AccessMode = getAccessMode(accessType)
}
req.VolumeCapability = capability
return req
}
func getTypicalNodeUnstageVolumeRequest(volID string) *csi.NodeUnstageVolumeRequest {
req := new(csi.NodeUnstageVolumeRequest)
req.VolumeId = volID
return req
}
func getAccessMode(accessType string) *csi.VolumeCapability_AccessMode {
accessMode := new(csi.VolumeCapability_AccessMode)
switch accessType {
case "single-writer":
accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case "multiple-reader":
accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case "multiple-writer":
accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
case "single-reader":
accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY
case "unknown":
accessMode.Mode = csi.VolumeCapability_AccessMode_UNKNOWN
}
return accessMode
}
func (f *feature) iCallCreateVolume(name string) error {
req := getTypicalCreateVolumeRequest()
f.createVolumeRequest = req
req.Name = name
f.createVolumeResponse, f.err = f.service.CreateVolume(context.Background(), req)
if f.err != nil {
log.Printf("CreateVolume call failed: %s\n", f.err.Error())
}
if f.createVolumeResponse != nil {
log.Printf("vol id %s\n", f.createVolumeResponse.GetVolume().VolumeId)
stepHandlersErrors.ExportNotFoundError = false
stepHandlersErrors.VolumeNotExistError = false
}
return nil
}
func (f *feature) iCallCreateVolumeWithPersistentMetadata(name string) error {
req := getCreateVolumeRequestWithMetaData()
f.createVolumeRequest = req
req.Name = name
f.createVolumeResponse, f.err = f.service.CreateVolume(context.Background(), req)
if f.err != nil {
log.Printf("CreateVolume call failed: %s\n", f.err.Error())
}
if f.createVolumeResponse != nil {
log.Printf("vol id %s\n", f.createVolumeResponse.GetVolume().VolumeId)
stepHandlersErrors.ExportNotFoundError = false
stepHandlersErrors.VolumeNotExistError = false
}
return nil
}
func (f *feature) iCallCreateVolumeWithParams(name string, rangeInGiB int, accessZone, isiPath, AzServiceIP, clusterName string) error {
req := getCreateVolumeRequestWithParams(int64(rangeInGiB), accessZone, isiPath, AzServiceIP, clusterName)
f.createVolumeRequest = req
req.Name = name
stepHandlersErrors.ExportNotFoundError = true
stepHandlersErrors.VolumeNotExistError = true
f.createVolumeResponse, f.err = f.service.CreateVolume(context.Background(), req)
if f.err != nil {
log.Printf("CreateVolume call failed: %s\n", f.err.Error())
}
if f.createVolumeResponse != nil {
log.Printf("vol id %s\n", f.createVolumeResponse.GetVolume().VolumeId)
stepHandlersErrors.ExportNotFoundError = false
stepHandlersErrors.VolumeNotExistError = false
}
return nil
}
func (f *feature) iCallDeleteVolume(name string) error {
if f.deleteVolumeRequest == nil {
req := getTypicalDeleteVolumeRequest()
f.deleteVolumeRequest = req
}
req := f.deleteVolumeRequest
req.VolumeId = name
ctx, log, _ := GetRunIDLog(context.Background())
f.deleteVolumeResponse, f.err = f.service.DeleteVolume(ctx, req)
if f.err != nil {
log.Printf("DeleteVolume call failed: '%v'\n", f.err)
}
return nil
}
func (f *feature) aValidCreateVolumeResponseIsReturned() error {
if f.err != nil {
return f.err
}
f.volumeIDList = append(f.volumeIDList, f.createVolumeResponse.Volume.VolumeId)
fmt.Printf("volume '%s'\n",
f.createVolumeResponse.Volume.VolumeContext["Name"])
return nil
}
func (f *feature) aValidDeleteVolumeResponseIsReturned() error {
if f.err != nil {
return f.err
}
return nil
}
func (f *feature) iInduceError(errtype string) error {
log.Printf("set induce error %s\n", errtype)
switch errtype {
case "InstancesError":
stepHandlersErrors.InstancesError = true
case "VolInstanceError":
stepHandlersErrors.VolInstanceError = true
case "StatsError":
stepHandlersErrors.StatsError = true
case "NoNodeID":
inducedErrors.noNodeID = true
case "OmitVolumeCapability":
inducedErrors.omitVolumeCapability = true
case "noIsiService":
inducedErrors.noIsiService = true
case "autoProbeNotEnabled":
inducedErrors.autoProbeNotEnabled = true
case "autoProbeFailed":
updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1)
updatedClusterConfig.(*IsilonClusterConfig).isiSvc = nil
f.service.isiClusters.Store(clusterName1, updatedClusterConfig)
f.service.opts.AutoProbe = false
case "GOFSMockDevMountsError":
gofsutil.GOFSMock.InduceDevMountsError = true
case "GOFSMockMountError":
gofsutil.GOFSMock.InduceMountError = true
case "GOFSMockGetMountsError":
gofsutil.GOFSMock.InduceGetMountsError = true
case "GOFSMockUnmountError":
gofsutil.GOFSMock.InduceUnmountError = true
case "GOFSMockGetDiskFormatError":
gofsutil.GOFSMock.InduceGetDiskFormatError = true
case "GOFSMockGetDiskFormatType":
gofsutil.GOFSMock.InduceGetDiskFormatType = "unknown-fs"
case "GOFSMockFormatError":
gofsutil.GOFSMock.InduceFormatError = true
case "GOFSWWNToDevicePathError":
gofsutil.GOFSMock.InduceWWNToDevicePathError = true
case "GOFSRmoveBlockDeviceError":
gofsutil.GOFSMock.InduceRemoveBlockDeviceError = true
case "NodePublishNoTargetPath":
f.nodePublishVolumeRequest.TargetPath = ""
case "NodeUnpublishNoTargetPath":
f.nodeUnpublishVolumeRequest.TargetPath = ""
case "NodePublishNoVolumeCapability":
f.nodePublishVolumeRequest.VolumeCapability = nil
case "NodePublishNoAccessMode":
f.nodePublishVolumeRequest.VolumeCapability.AccessMode = nil
case "NodePublishNoAccessType":
f.nodePublishVolumeRequest.VolumeCapability.AccessType = nil
case "NodePublishFileTargetNotDir":
f.nodePublishVolumeRequest.TargetPath = datafile
case "BadVolumeIdentifier":
inducedErrors.badVolumeIdentifier = true
case "TargetNotCreatedForNodePublish":
err := os.Remove(datafile)
if err != nil {
return nil
}
//cmd := exec.Command("rm", "-rf", datadir)
//_, err = cmd.CombinedOutput()
err = os.RemoveAll(datadir)
if err != nil {
return err
}
case "OmitAccessMode":
inducedErrors.omitAccessMode = true
case "TargetNotCreatedForNodeUnpublish":
err := os.RemoveAll(datadir)
if err != nil {
return nil
}
case "GetSnapshotError":
stepHandlersErrors.GetSnapshotError = true
case "DeleteSnapshotError":
stepHandlersErrors.DeleteSnapshotError = true
case "CreateQuotaError":
stepHandlersErrors.CreateQuotaError = true
case "CreateExportError":
stepHandlersErrors.CreateExportError = true
case "UpdateQuotaError":
stepHandlersErrors.UpdateQuotaError = true
case "GetExportInternalError":
stepHandlersErrors.GetExportInternalError = true
case "VolumeNotExistError":
stepHandlersErrors.VolumeNotExistError = true
case "ExportNotFoundError":
stepHandlersErrors.ExportNotFoundError = true
case "VolumeExists":
stepHandlersErrors.VolumeNotExistError = false
case "ExportExists":
stepHandlersErrors.ExportNotFoundError = false
case "ControllerHasNoConnectionError":
testControllerHasNoConnection = true
case "NodeHasNoConnectionError":
testNodeHasNoConnection = true
case "GetExportByIDNotFoundError":
stepHandlersErrors.GetExportByIDNotFoundError = true
case "UnexportError":
stepHandlersErrors.UnexportError = true
case "CreateSnapshotError":
stepHandlersErrors.CreateSnapshotError = true
case "DeleteQuotaError":
stepHandlersErrors.DeleteQuotaError = true
case "QuotaNotFoundError":
stepHandlersErrors.QuotaNotFoundError = true
case "DeleteVolumeError":
stepHandlersErrors.DeleteVolumeError = true
case "none":
default:
return fmt.Errorf("Don't know how to induce error %q", errtype)
}
return nil
}
func (f *feature) theErrorContains(arg1 string) error {
// If arg1 is none, we expect no error, any error received is unexpected
clearErrors()
if arg1 == "none" {
if f.err == nil {
return nil
}
return fmt.Errorf("Unexpected error: %s", f.err)
}
// We expected an error...
if f.err == nil {
return fmt.Errorf("Expected error to contain %s but no error", arg1)
}
// Allow for multiple possible matches, separated by @@. This was necessary
// because Windows and Linux sometimes return different error strings for
// gofsutil operations. Note @@ was used instead of || because the Gherkin
// parser is not smart enough to ignore vertical braces within a quoted string,
// so if || is used it thinks the row's cell count is wrong.
possibleMatches := strings.Split(arg1, "@@")
for _, possibleMatch := range possibleMatches {
if strings.Contains(f.err.Error(), possibleMatch) {
return nil
}
}
return fmt.Errorf("Expected error to contain %s but it was %s", arg1, f.err.Error())
}
func (f *feature) iCallControllerGetCapabilities() error {
req := new(csi.ControllerGetCapabilitiesRequest)
f.controllerGetCapabilitiesResponse, f.err = f.service.ControllerGetCapabilities(context.Background(), req)
if f.err != nil {
log.Printf("ControllerGetCapabilities call failed: %s\n", f.err.Error())
return f.err
}
return nil
}
func (f *feature) aValidControllerGetCapabilitiesResponseIsReturned() error {
rep := f.controllerGetCapabilitiesResponse
if rep != nil {
if rep.Capabilities == nil {
return errors.New("no capabilities returned in ControllerGetCapabilitiesResponse")
}
count := 0
for _, cap := range rep.Capabilities {
rpcType := cap.GetRpc().Type
switch rpcType {
case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME:
count = count + 1
case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME:
count = count + 1
case csi.ControllerServiceCapability_RPC_LIST_VOLUMES:
count = count + 1
case csi.ControllerServiceCapability_RPC_GET_CAPACITY:
count = count + 1
case csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT:
count = count + 1
case csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS:
count = count + 1
case csi.ControllerServiceCapability_RPC_CLONE_VOLUME:
count = count + 1
case csi.ControllerServiceCapability_RPC_EXPAND_VOLUME:
count = count + 1
default:
return fmt.Errorf("received unexpected capability: %v", rpcType)
}
}
if count != 7 /*7*/ {
return errors.New("Did not retrieve all the expected capabilities")
}
return nil
}
return errors.New("expected ControllerGetCapabilitiesResponse but didn't get one")
}
func (f *feature) iCallValidateVolumeCapabilitiesWithVoltypeAccess(voltype, access string) error {
req := new(csi.ValidateVolumeCapabilitiesRequest)
if inducedErrors.invalidVolumeID || f.createVolumeResponse == nil {
req.VolumeId = "000-000"
} else {
req.VolumeId = f.createVolumeResponse.GetVolume().VolumeId
}
// Construct the volume capabilities
capability := new(csi.VolumeCapability)
switch voltype {
case "block":
block := new(csi.VolumeCapability_BlockVolume)
accessType := new(csi.VolumeCapability_Block)
accessType.Block = block
capability.AccessType = accessType
case "mount":
mount := new(csi.VolumeCapability_MountVolume)
accessType := new(csi.VolumeCapability_Mount)
accessType.Mount = mount
capability.AccessType = accessType
}
accessMode := new(csi.VolumeCapability_AccessMode)
switch access {
case "single-writer":
accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case "single-reader":
accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY
case "multi-writer":
accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
case "multi-reader":
accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case "multi-node-single-writer":
accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER
}
capability.AccessMode = accessMode
capabilities := make([]*csi.VolumeCapability, 0)
capabilities = append(capabilities, capability)
req.VolumeCapabilities = capabilities
log.Printf("Calling ValidateVolumeCapabilities")
ctx, _, _ := GetRunIDLog(context.Background())
f.validateVolumeCapabilitiesResponse, f.err = f.service.ValidateVolumeCapabilities(ctx, req)
if f.err != nil {
return nil
}
if f.validateVolumeCapabilitiesResponse.Message != "" {
f.err = errors.New(f.validateVolumeCapabilitiesResponse.Message)
} else {
// Validate we get a Confirmed structure with VolumeCapabilities
if f.validateVolumeCapabilitiesResponse.Confirmed == nil {
return errors.New("Expected ValidateVolumeCapabilities to have a Confirmed structure but it did not")
}
confirmed := f.validateVolumeCapabilitiesResponse.Confirmed
if len(confirmed.VolumeCapabilities) <= 0 {
return errors.New("Expected ValidateVolumeCapabilities to return the confirmed VolumeCapabilities but it did not")
}
}
return nil
}
func clearErrors() {
stepHandlersErrors.ExportNotFoundError = true
stepHandlersErrors.VolumeNotExistError = true
stepHandlersErrors.InstancesError = false
stepHandlersErrors.VolInstanceError = false
stepHandlersErrors.FindVolumeIDError = false
stepHandlersErrors.GetVolByIDError = false
stepHandlersErrors.GetStoragePoolsError = false
stepHandlersErrors.GetStatisticsError = false
stepHandlersErrors.CreateSnapshotError = false
stepHandlersErrors.RemoveVolumeError = false
stepHandlersErrors.StatsError = false
stepHandlersErrors.StartingTokenInvalidError = false
stepHandlersErrors.GetSnapshotError = false
stepHandlersErrors.DeleteSnapshotError = false
stepHandlersErrors.ExportNotFoundError = false
stepHandlersErrors.VolumeNotExistError = false
stepHandlersErrors.CreateQuotaError = false
stepHandlersErrors.UpdateQuotaError = false
stepHandlersErrors.CreateExportError = false
stepHandlersErrors.GetExportInternalError = false
stepHandlersErrors.GetExportByIDNotFoundError = false
stepHandlersErrors.UnexportError = false
stepHandlersErrors.DeleteQuotaError = false
stepHandlersErrors.QuotaNotFoundError = false
stepHandlersErrors.DeleteVolumeError = false
inducedErrors.noIsiService = false
inducedErrors.autoProbeNotEnabled = false
}
func getTypicalCapacityRequest(valid bool) *csi.GetCapacityRequest {
req := new(csi.GetCapacityRequest)
// Construct the volume capabilities
capability := new(csi.VolumeCapability)
// Set FS type to mount volume
mount := new(csi.VolumeCapability_MountVolume)
accessType := new(csi.VolumeCapability_Mount)
accessType.Mount = mount
capability.AccessType = accessType
// A single mode writer
accessMode := new(csi.VolumeCapability_AccessMode)
if valid {
accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
} else {
accessMode.Mode = csi.VolumeCapability_AccessMode_UNKNOWN
}
capability.AccessMode = accessMode
capabilities := make([]*csi.VolumeCapability, 0)
capabilities = append(capabilities, capability)
req.VolumeCapabilities = capabilities
return req
}
func (f *feature) iCallGetCapacity() error {
header := metadata.New(map[string]string{"csi.requestid": "1"})
ctx, _, _ := GetRunIDLog(context.Background())
ctx = metadata.NewIncomingContext(ctx, header)
req := getTypicalCapacityRequest(true)
f.getCapacityResponse, f.err = f.service.GetCapacity(ctx, req)
if f.err != nil {
log.Printf("GetCapacity call failed: %s\n", f.err.Error())
return nil
}
return nil
}
func (f *feature) iCallGetCapacityWithParams(clusterName string) error {
header := metadata.New(map[string]string{"csi.requestid": "1"})
ctx := metadata.NewIncomingContext(context.Background(), header)
req := getTypicalCapacityRequest(true)
params := make(map[string]string)
params[ClusterNameParam] = clusterName
req.Parameters = params
f.getCapacityResponse, f.err = f.service.GetCapacity(ctx, req)
if f.err != nil {
log.Printf("GetCapacity call failed: %s\n", f.err.Error())
return nil
}
return nil
}
func (f *feature) iCallGetCapacityWithInvalidAccessMode() error {
header := metadata.New(map[string]string{"csi.requestid": "1"})
ctx := metadata.NewIncomingContext(context.Background(), header)
req := getTypicalCapacityRequest(false)
f.getCapacityResponse, f.err = f.service.GetCapacity(ctx, req)
if f.err != nil {
log.Printf("GetCapacity call failed: %s\n", f.err.Error())
return nil
}
return nil
}
func (f *feature) aValidGetCapacityResponseIsReturned() error {
if f.err != nil {
return f.err
}
if f.getCapacityResponse == nil {
return errors.New("Received null response to GetCapacity")
}
if f.getCapacityResponse.AvailableCapacity <= 0 {
return errors.New("Expected AvailableCapacity to be positive")
}
fmt.Printf("Available capacity: %d\n", f.getCapacityResponse.AvailableCapacity)
return nil
}
func (f *feature) iCallNodeGetInfo() error {
req := new(csi.NodeGetInfoRequest)
f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(context.Background(), req)
if f.err != nil {
log.Printf("NodeGetInfo call failed: %s\n", f.err.Error())
return f.err
}
return nil
}
func (f *feature) iCallNodeGetCapabilities() error {
req := new(csi.NodeGetCapabilitiesRequest)
f.nodeGetCapabilitiesResponse, f.err = f.service.NodeGetCapabilities(context.Background(), req)
if f.err != nil {
log.Printf("NodeGetCapabilities call failed: %s\n", f.err.Error())
return f.err
}
return nil
}
func (f *feature) aValidNodeGetInfoResponseIsReturned() error {
if f.err != nil {
return f.err
}
fmt.Printf("The node ID is %s\n", f.nodeGetInfoResponse.NodeId)
return nil
}
func (f *feature) aValidNodeGetCapabilitiesResponseIsReturned() error {
rep := f.nodeGetCapabilitiesResponse
if rep != nil {
if rep.Capabilities == nil {
return errors.New("No capabilities returned in NodeGetCapabilitiesResponse")
}
count := 0
for _, cap := range rep.Capabilities {
rpcType := cap.GetRpc().Type
switch rpcType {
case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME:
count = count + 1
case csi.NodeServiceCapability_RPC_GET_VOLUME_STATS:
count = count + 1
case csi.NodeServiceCapability_RPC_EXPAND_VOLUME:
count = count + 1
default:
return fmt.Errorf("Received unexpected capability: %v", rpcType)
}
}
if count != 1 /*3*/ {
return errors.New("Did not retrieve all the expected capabilities")
}
return nil
}
return errors.New("Expected NodeGetCapabilitiesResponse but didn't get one")
}
func (f *feature) iHaveANodeWithAccessZone(nodeID string) error {
f.accessZone = "CSI-" + nodeID
return nil
}
func (f *feature) iCallControllerPublishVolumeWithTo(accessMode, nodeID string) error {
header := metadata.New(map[string]string{"csi.requestid": "1"})
ctx := metadata.NewIncomingContext(context.Background(), header)
req := f.publishVolumeRequest
if f.publishVolumeRequest == nil {
req = f.getControllerPublishVolumeRequest(accessMode, nodeID)
f.publishVolumeRequest = req
}
log.Printf("Calling controllerPublishVolume")
f.publishVolumeResponse, f.err = f.service.ControllerPublishVolume(ctx, req)
if f.err != nil {
log.Printf("PublishVolume call failed: %s\n", f.err.Error())
}
f.publishVolumeRequest = nil
return nil
}
func (f *feature) aValidControllerPublishVolumeResponseIsReturned() error {
if f.err != nil {
return errors.New("PublishVolume returned error: " + f.err.Error())
}
if f.publishVolumeResponse == nil {
return errors.New("No PublishVolumeResponse returned")
}
for key, value := range f.publishVolumeResponse.PublishContext {
fmt.Printf("PublishContext %s: %s", key, value)
}
return nil
}
func (f *feature) aValidControllerUnpublishVolumeResponseIsReturned() error {
if f.err != nil {
return errors.New("UnpublishVolume returned error: " + f.err.Error())
}
if f.unpublishVolumeResponse == nil {
return errors.New("No UnpublishVolumeResponse returned")
}
return nil
}
func (f *feature) aValidNodeStageVolumeResponseIsReturned() error {
if f.err != nil {
return errors.New("NodeStageVolume returned error: " + f.err.Error())
}
if f.nodeStageVolumeResponse == nil {
return errors.New("no NodeStageVolumeResponse is returned")
}
return nil
}
func (f *feature) aValidNodeUnstageVolumeResponseIsReturned() error {
if f.err != nil {
return errors.New("NodeUnstageVolume returned error: " + f.err.Error())
}
if f.nodeUnstageVolumeResponse == nil {
return errors.New("no NodeUnstageVolumeResponse is returned")
}
return nil
}
func (f *feature) iCallNodeUnpublishVolume() error {
req := f.nodeUnpublishVolumeRequest
if req == nil {
_ = f.getNodeUnpublishVolumeRequest()
req = f.nodeUnpublishVolumeRequest
}
if inducedErrors.badVolumeIdentifier {
req.VolumeId = "bad volume identifier"
}
fmt.Printf("Calling NodePublishVolume\n")
f.nodeUnpublishVolumeResponse, f.err = f.service.NodeUnpublishVolume(context.Background(), req)
if f.err != nil {
log.Printf("NodePublishVolume call failed: %s\n", f.err.Error())
if strings.Contains(f.err.Error(), "Target Path is required") {
// Rollback for the future calls
f.nodeUnpublishVolumeRequest.TargetPath = datadir
}
}
if f.nodeUnpublishVolumeResponse != nil {
err := os.RemoveAll(req.TargetPath)
if err != nil {
return nil
}
log.Printf("vol id %s\n", f.nodeUnpublishVolumeRequest.VolumeId)
}
return nil
}
func (f *feature) iCallEphemeralNodeUnpublishVolume() error {
req := f.nodeUnpublishVolumeRequest
if req == nil {
_ = f.getNodeUnpublishVolumeRequest()
req = f.nodeUnpublishVolumeRequest
}
if inducedErrors.badVolumeIdentifier {
req.VolumeId = "bad volume identifier"
}
fmt.Printf("Calling NodePublishVolume\n")
f.nodeUnpublishVolumeResponse, f.err = f.service.NodeUnpublishVolume(context.Background(), req)
if f.err != nil {
log.Printf("NodePublishVolume call failed: %s\n", f.err.Error())
if strings.Contains(f.err.Error(), "Target Path is required") {
// Rollback for the future calls
f.nodeUnpublishVolumeRequest.TargetPath = datadir
}
}
if f.nodeUnpublishVolumeResponse != nil {
err := os.RemoveAll(req.TargetPath)
if err != nil {
return nil
}
log.Printf("vol id %s\n", f.nodeUnpublishVolumeRequest.VolumeId)
}
return nil
}
func (f *feature) aValidNodeUnpublishVolumeResponseIsReturned() error {
if f.err != nil {
return f.err
}
return nil
}
func (f *feature) getControllerPublishVolumeRequest(accessType, nodeID string) *csi.ControllerPublishVolumeRequest {
capability := new(csi.VolumeCapability)
mountVolume := new(csi.VolumeCapability_MountVolume)
mountVolume.MountFlags = make([]string, 0)
mount := new(csi.VolumeCapability_Mount)
mount.Mount = mountVolume
capability.AccessType = mount
if !inducedErrors.omitAccessMode {
capability.AccessMode = getAccessMode(accessType)
}
fmt.Printf("capability.AccessType %v\n", capability.AccessType)
fmt.Printf("capability.AccessMode %v\n", capability.AccessMode)
req := new(csi.ControllerPublishVolumeRequest)
if !inducedErrors.noVolumeID {
if inducedErrors.invalidVolumeID || f.createVolumeResponse == nil {
req.VolumeId = "000-000"
} else {
req.VolumeId = "volume1=_=_=19=_=_=System"
}
}
if !inducedErrors.noNodeID {
req.NodeId = nodeID
}
req.Readonly = false
if !inducedErrors.omitVolumeCapability {
req.VolumeCapability = capability
}
// add in the context
attributes := map[string]string{}
attributes[AccessZoneParam] = f.accessZone
req.VolumeContext = attributes
return req
}
func (f *feature) getControllerUnPublishVolumeRequest(accessType, nodeID string) *csi.ControllerUnpublishVolumeRequest {
capability := new(csi.VolumeCapability)
mountVolume := new(csi.VolumeCapability_MountVolume)
mountVolume.MountFlags = make([]string, 0)
mount := new(csi.VolumeCapability_Mount)
mount.Mount = mountVolume
capability.AccessType = mount
if !inducedErrors.omitAccessMode {
capability.AccessMode = getAccessMode(accessType)
}
fmt.Printf("capability.AccessType %v\n", capability.AccessType)
fmt.Printf("capability.AccessMode %v\n", capability.AccessMode)
req := new(csi.ControllerUnpublishVolumeRequest)
if !inducedErrors.noVolumeID {
if inducedErrors.invalidVolumeID || f.createVolumeResponse == nil {
req.VolumeId = "000-000"
} else {
req.VolumeId = "volume1=_=_=19=_=_=System"
}
}
if !inducedErrors.noNodeID {
req.NodeId = nodeID
}
// add in the context
attributes := map[string]string{}
attributes[AccessZoneParam] = f.accessZone
return req
}
func (f *feature) aControllerPublishedVolume() error {
var err error
// Make the target directory if required
_, err = os.Stat(datadir)
if err != nil {
err = os.MkdirAll(datadir, 0777)
if err != nil {
fmt.Printf("Couldn't make datadir: %s\n", datadir)
}
}
// Make the target file if required
_, err = os.Stat(datafile)
if err != nil {
file, err := os.Create(datafile)
if err != nil {
fmt.Printf("Couldn't make datafile: %s\n", datafile)
} else {
file.Close()
}
}
// Empty WindowsMounts in gofsutil
gofsutil.GOFSMockMounts = gofsutil.GOFSMockMounts[:0]
return nil
}
func (f *feature) aCapabilityWithVoltypeAccess(voltype, access string) error {
// Construct the volume capabilities
capability := new(csi.VolumeCapability)
switch voltype {
case "block":
blockVolume := new(csi.VolumeCapability_BlockVolume)
block := new(csi.VolumeCapability_Block)
block.Block = blockVolume
capability.AccessType = block
case "mount":
mountVolume := new(csi.VolumeCapability_MountVolume)
mountVolume.MountFlags = make([]string, 0)
mount := new(csi.VolumeCapability_Mount)
mount.Mount = mountVolume
capability.AccessType = mount
}
accessMode := new(csi.VolumeCapability_AccessMode)
accessMode.Mode = csi.VolumeCapability_AccessMode_UNKNOWN
fmt.Printf("Access mode '%s'", access)
switch access {
case "single-reader":
accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY
case "single-writer":
accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case "multiple-writer":
accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
case "multiple-reader":
accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case "multiple-node-single-writer":
accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER
}
capability.AccessMode = accessMode
f.capabilities = make([]*csi.VolumeCapability, 0)
f.capabilities = append(f.capabilities, capability)
f.capability = capability
f.nodePublishVolumeRequest = nil
return nil
}
func (f *feature) iCallNodePublishVolume() error {
header := metadata.New(map[string]string{"csi.requestid": "1"})
ctx := metadata.NewIncomingContext(context.Background(), header)
req := f.nodePublishVolumeRequest
if req == nil {
_ = f.getNodePublishVolumeRequest()
req = f.nodePublishVolumeRequest
}
if inducedErrors.badVolumeIdentifier {
req.VolumeId = "bad volume identifier"
}
fmt.Printf("Calling NodePublishVolume\n")
_, err := f.service.NodePublishVolume(ctx, req)
if err != nil {
fmt.Printf("NodePublishVolume failed: %s\n", err.Error())
if f.err == nil {
f.err = err
}
} else {
fmt.Printf("NodePublishVolume completed successfully\n")
}
return nil
}
func (f *feature) iCallEphemeralNodePublishVolume() error {
header := metadata.New(map[string]string{"csi.requestid": "1"})
ctx := metadata.NewIncomingContext(context.Background(), header)
req := f.nodePublishVolumeRequest
if req == nil {
_ = f.getNodePublishVolumeRequest()
req = f.nodePublishVolumeRequest
}
f.nodePublishVolumeRequest.VolumeContext["csi.storage.k8s.io/ephemeral"] = "true"
if inducedErrors.badVolumeIdentifier {
req.VolumeId = "bad volume identifier"
}
fmt.Printf("Calling NodePublishVolume\n")
_, err := f.service.NodePublishVolume(ctx, req)
if err != nil {
fmt.Printf("NodePublishVolume failed: %s\n", err.Error())
if f.err == nil {
f.err = err
}
} else {
fmt.Printf("NodePublishVolume completed successfully\n")
}
return nil
}
func (f *feature) getNodePublishVolumeRequest() error {
req := new(csi.NodePublishVolumeRequest)
req.VolumeId = Volume1
req.Readonly = false
req.VolumeCapability = f.capability
mount := f.capability.GetMount()
if mount != nil {
req.TargetPath = datadir
}
attributes := map[string]string{
"Name": req.VolumeId,
"AccessZone": "",
"Path": f.service.opts.Path + "/" + req.VolumeId,
}
req.VolumeContext = attributes
f.nodePublishVolumeRequest = req
return nil
}
func (f *feature) getNodeUnpublishVolumeRequest() error {
req := new(csi.NodeUnpublishVolumeRequest)
req.VolumeId = Volume1
req.TargetPath = datadir
f.nodeUnpublishVolumeRequest = req
return nil
}
func (f *feature) iChangeTheTargetPath() error {
// Make the target directory if required
_, err := os.Stat(datadir2)
if err != nil {
err = os.MkdirAll(datadir2, 0777)
if err != nil {
fmt.Printf("Couldn't make datadir: %s\n", datadir2)
}
}
// Make the target file if required
_, err = os.Stat(datafile2)
if err != nil {
file, err := os.Create(datafile2)
if err != nil {
fmt.Printf("Couldn't make datafile: %s\n", datafile2)
} else {
file.Close()
}
}
req := f.nodePublishVolumeRequest
block := f.capability.GetBlock()
if block != nil {
req.TargetPath = datafile2
}
mount := f.capability.GetMount()
if mount != nil {
req.TargetPath = datadir2
}
return nil
}
func (f *feature) iMarkRequestReadOnly() error {
f.nodePublishVolumeRequest.Readonly = true
return nil
}
func (f *feature) iCallControllerPublishVolume(volID string, accessMode string, nodeID string) error {
header := metadata.New(map[string]string{"csi.requestid": "1"})
ctx := metadata.NewIncomingContext(context.Background(), header)
req := f.publishVolumeRequest
if f.publishVolumeRequest == nil {
req = f.getControllerPublishVolumeRequest(accessMode, nodeID)
f.publishVolumeRequest = req
}
// a customized volume ID can be specified to overwrite the default one
if volID != "" {
req.VolumeId = volID
}
log.Printf("Calling controllerPublishVolume")
f.publishVolumeResponse, f.err = f.service.ControllerPublishVolume(ctx, req)
if f.err != nil {
log.Printf("PublishVolume call failed: %s\n", f.err.Error())
}
f.publishVolumeRequest = nil
return nil
}
func (f *feature) iCallControllerUnPublishVolume(volID string, accessMode string, nodeID string) error {
req := f.getControllerUnPublishVolumeRequest(accessMode, nodeID)
f.unpublishVolumeRequest = req
// a customized volume ID can be specified to overwrite the default one
req.VolumeId = volID
f.unpublishVolumeResponse, f.err = f.service.ControllerUnpublishVolume(context.Background(), req)
if f.err != nil {
log.Printf("ControllerUnPublishVolume call failed: %s\n", f.err.Error())
}
if f.unpublishVolumeResponse != nil {
log.Printf("a unpublishVolumeResponse has been returned\n")
}
return nil
}
func (f *feature) iCallNodeStageVolume(volID string, accessType string) error {
req := getTypicalNodeStageVolumeRequest(accessType)
f.nodeStageVolumeRequest = req
// a customized volume ID can be specified to overwrite the default one
if volID != "" {
req.VolumeId = volID
}
f.nodeStageVolumeResponse, f.err = f.service.NodeStageVolume(context.Background(), req)
if f.err != nil {
log.Printf("NodeStageVolume call failed: %s\n", f.err.Error())
}
if f.nodeStageVolumeResponse != nil {
log.Printf("a NodeStageVolumeResponse has been returned\n")
}
return nil
}
func (f *feature) iCallNodeUnstageVolume(volID string) error {
req := getTypicalNodeUnstageVolumeRequest(volID)
f.nodeUnstageVolumeRequest = req
f.nodeUnstageVolumeResponse, f.err = f.service.NodeUnstageVolume(context.Background(), req)
if f.err != nil {
log.Printf("NodeUnstageVolume call failed: %s\n", f.err.Error())
}
if f.nodeStageVolumeResponse != nil {
log.Printf("a NodeUnstageVolumeResponse has been returned\n")
}
return nil
}
func (f *feature) iCallListVolumesWithMaxEntriesStartingToken(arg1 int, arg2 string) error {
req := new(csi.ListVolumesRequest)
// The starting token is not valid
if arg2 == "invalid" {
stepHandlersErrors.StartingTokenInvalidError = true
}
req.MaxEntries = int32(arg1)
req.StartingToken = arg2
f.listVolumesResponse, f.err = f.service.ListVolumes(context.Background(), req)
if f.err != nil {
log.Printf("ListVolumes call failed: %s\n", f.err.Error())
return nil
}
return nil
}
func (f *feature) aValidListVolumesResponseIsReturned() error {
if f.err != nil {
return f.err
}
fmt.Printf("The volumes are %v\n", f.listVolumesResponse.Entries)
fmt.Printf("The next token is '%s'\n", f.listVolumesResponse.NextToken)
return nil
}
func (f *feature) iCallDeleteSnapshot(snapshotID string) error {
req := new(csi.DeleteSnapshotRequest)
req.SnapshotId = snapshotID
f.deleteSnapshotRequest = req
_, err := f.service.DeleteSnapshot(context.Background(), f.deleteSnapshotRequest)
if err != nil {
log.Printf("DeleteSnapshot call failed: %s\n", err.Error())
f.err = err
return nil
}
fmt.Printf("Delete snapshot successfully\n")
return nil
}
func getCreateSnapshotRequest(srcVolumeID, name, isiPath string) *csi.CreateSnapshotRequest {
req := new(csi.CreateSnapshotRequest)
req.SourceVolumeId = srcVolumeID
req.Name = name
parameters := make(map[string]string)
if isiPath != "none" {
parameters[IsiPathParam] = isiPath
}
req.Parameters = parameters
return req
}
func (f *feature) iCallCreateSnapshot(srcVolumeID, name, isiPath string) error {
f.createSnapshotRequest = getCreateSnapshotRequest(srcVolumeID, name, isiPath)
req := f.createSnapshotRequest
f.createSnapshotResponse, f.err = f.service.CreateSnapshot(context.Background(), req)
if f.err != nil {
log.Printf("CreateSnapshot call failed: %s\n", f.err.Error())
}
if f.createSnapshotResponse != nil {
log.Printf("snapshot id %s\n", f.createSnapshotResponse.GetSnapshot().SnapshotId)
}
return nil
}
func (f *feature) aValidCreateSnapshotResponseIsReturned() error {
if f.err != nil {
return f.err
}
f.snapshotIDList = append(f.snapshotIDList, f.createSnapshotResponse.Snapshot.SnapshotId)
fmt.Printf("created snapshot id %s: source volume id %s, sizeInBytes %d, creation time %s\n",
f.createSnapshotResponse.Snapshot.SnapshotId,
f.createSnapshotResponse.Snapshot.SourceVolumeId,
f.createSnapshotResponse.Snapshot.SizeBytes,
f.createSnapshotResponse.Snapshot.CreationTime)
return nil
}
func getControllerExpandVolumeRequest(volumeID string, requiredBytes int64) *csi.ControllerExpandVolumeRequest {
return &csi.ControllerExpandVolumeRequest{
VolumeId: volumeID,
CapacityRange: &csi.CapacityRange{
RequiredBytes: requiredBytes,
LimitBytes: requiredBytes,
},
}
}
func (f *feature) iCallControllerExpandVolume(volumeID string, requiredBytes int64) error {
log.Printf("###")
f.controllerExpandVolumeRequest = getControllerExpandVolumeRequest(volumeID, requiredBytes)
req := f.controllerExpandVolumeRequest
ctx, log, _ := GetRunIDLog(context.Background())
f.controllerExpandVolumeResponse, f.err = f.service.ControllerExpandVolume(ctx, req)
if f.err != nil {
log.Printf("ControllerExpandVolume call failed: %s\n", f.err.Error())
}
if f.controllerExpandVolumeResponse != nil {
log.Printf("Volume capacity %d\n", f.controllerExpandVolumeResponse.CapacityBytes)
}
return nil
}
func (f *feature) aValidControllerExpandVolumeResponseIsReturned() error {
if f.err != nil {
return f.err
}
if f.controllerExpandVolumeRequest.GetCapacityRange().GetRequiredBytes() <= f.controllerExpandVolumeResponse.CapacityBytes {
fmt.Printf("Volume expansion succeeded\n")
return nil
}
return fmt.Errorf("Volume expansion failed")
}
func (f *feature) setVolumeContent(isSnapshotType bool, identity string) *csi.CreateVolumeRequest {
req := f.createVolumeRequest
if isSnapshotType {
req.VolumeContentSource = &csi.VolumeContentSource{
Type: &csi.VolumeContentSource_Snapshot{
Snapshot: &csi.VolumeContentSource_SnapshotSource{
SnapshotId: identity,
},
},
}
} else {
req.VolumeContentSource = &csi.VolumeContentSource{
Type: &csi.VolumeContentSource_Volume{
Volume: &csi.VolumeContentSource_VolumeSource{
VolumeId: identity,
},
},
}
}
return req
}
func (f *feature) iCallCreateVolumeFromSnapshot(srcSnapshotID, name string) error {
req := getTypicalCreateVolumeRequest()
f.createVolumeRequest = req
req.Name = name
req = f.setVolumeContent(true, srcSnapshotID)
f.createVolumeResponse, f.err = f.service.CreateVolume(context.Background(), req)
if f.err != nil {
log.Printf("CreateVolume call failed: '%s'\n", f.err.Error())
}
if f.createVolumeResponse != nil {
log.Printf("volume name '%s' created\n", name)
}
return nil
}
func (f *feature) iCallCreateVolumeFromVolume(srcVolumeName, name string) error {
req := getTypicalCreateVolumeRequest()
f.createVolumeRequest = req
req.Name = name
req = f.setVolumeContent(false, srcVolumeName)
f.createVolumeResponse, f.err = f.service.CreateVolume(context.Background(), req)
if f.err != nil {
log.Printf("CreateVolume call failed: '%s'\n", f.err.Error())
}
if f.createVolumeResponse != nil {
log.Printf("volume name '%s' created\n", name)
}
return nil
}
func (f *feature) iCallInitializeRealIsilonService() error {
f.service.initializeServiceOpts(context.Background())
return nil
}
func (f *feature) aIsilonServiceWithParams(user, mode string) error {
f.checkGoRoutines("start aIsilonService")
f.err = nil
f.getPluginInfoResponse = nil
f.volumeIDList = f.volumeIDList[:0]
f.snapshotIDList = f.snapshotIDList[:0]
// configure gofsutil; we use a mock interface
gofsutil.UseMockFS()
gofsutil.GOFSMock.InduceBindMountError = false
gofsutil.GOFSMock.InduceMountError = false
gofsutil.GOFSMock.InduceGetMountsError = false
gofsutil.GOFSMock.InduceDevMountsError = false
gofsutil.GOFSMock.InduceUnmountError = false
gofsutil.GOFSMock.InduceFormatError = false
gofsutil.GOFSMock.InduceGetDiskFormatError = false
gofsutil.GOFSMock.InduceGetDiskFormatType = ""
gofsutil.GOFSMockMounts = gofsutil.GOFSMockMounts[:0]
// set induced errors
inducedErrors.badVolumeIdentifier = false
inducedErrors.invalidVolumeID = false
inducedErrors.noVolumeID = false
inducedErrors.differentVolumeID = false
inducedErrors.noNodeName = false
inducedErrors.noNodeID = false
inducedErrors.omitVolumeCapability = false
inducedErrors.omitAccessMode = false
// initialize volume and export existence status
stepHandlersErrors.ExportNotFoundError = true
stepHandlersErrors.VolumeNotExistError = true
// Get the httptest mock handler. Only set
// a new server if there isn't one already.
handler := getHandler()
// Get or reuse the cached service
f.getServiceWithParams(user, mode)
clusterConfig := f.service.getIsilonClusterConfig(clusterName1)
if handler != nil && os.Getenv("CSI_ISILON_ENDPOINT") == "" {
if f.server == nil {
f.server = httptest.NewServer(handler)
}
log.Printf("server url: %s\n", f.server.URL)
clusterConfig.EndpointURL = f.server.URL
} else {
f.server = nil
}
isiSvc, _ := f.service.GetIsiService(context.Background(), clusterConfig)
updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1)
updatedClusterConfig.(*IsilonClusterConfig).isiSvc = isiSvc
f.service.isiClusters.Store(clusterName1, updatedClusterConfig)
f.checkGoRoutines("end aIsilonService")
f.service.logServiceStats()
if inducedErrors.noIsiService || inducedErrors.autoProbeNotEnabled {
updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1)
updatedClusterConfig.(*IsilonClusterConfig).isiSvc = nil
f.service.isiClusters.Store(clusterName1, updatedClusterConfig)
}
return nil
}
func (f *feature) aIsilonServiceWithParamsForCustomTopology(user, mode string) error {
f.checkGoRoutines("start aIsilonService")
f.err = nil
f.getPluginInfoResponse = nil
f.volumeIDList = f.volumeIDList[:0]
f.snapshotIDList = f.snapshotIDList[:0]
// configure gofsutil; we use a mock interface
gofsutil.UseMockFS()
gofsutil.GOFSMock.InduceBindMountError = false
gofsutil.GOFSMock.InduceMountError = false
gofsutil.GOFSMock.InduceGetMountsError = false
gofsutil.GOFSMock.InduceDevMountsError = false
gofsutil.GOFSMock.InduceUnmountError = false
gofsutil.GOFSMock.InduceFormatError = false
gofsutil.GOFSMock.InduceGetDiskFormatError = false
gofsutil.GOFSMock.InduceGetDiskFormatType = ""
gofsutil.GOFSMockMounts = gofsutil.GOFSMockMounts[:0]
// set induced errors
inducedErrors.badVolumeIdentifier = false
inducedErrors.invalidVolumeID = false
inducedErrors.noVolumeID = false
inducedErrors.differentVolumeID = false
inducedErrors.noNodeName = false
inducedErrors.noNodeID = false
inducedErrors.omitVolumeCapability = false
inducedErrors.omitAccessMode = false
// initialize volume and export existence status
stepHandlersErrors.ExportNotFoundError = true
stepHandlersErrors.VolumeNotExistError = true
// Get the httptest mock handler. Only set
// a new server if there isn't one already.
handler := getHandler()
// Get or reuse the cached service
f.getServiceWithParamsForCustomTopology(user, mode, true)
clusterConfig := f.service.getIsilonClusterConfig(clusterName1)
if handler != nil && os.Getenv("CSI_ISILON_ENDPOINT") == "" {
if f.server == nil {
f.server = httptest.NewServer(handler)
}
log.Printf("server url: %s\n", f.server.URL)
clusterConfig.EndpointURL = f.server.URL
urlList := strings.Split(f.server.URL, ":")
log.Printf("urlList: %v", urlList)
clusterConfig.IsiPort = urlList[2]
} else {
f.server = nil
}
isiSvc, err := f.service.GetIsiService(context.Background(), clusterConfig)
f.err = err
updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1)
updatedClusterConfig.(*IsilonClusterConfig).isiSvc = isiSvc
f.service.isiClusters.Store(clusterName1, updatedClusterConfig)
f.checkGoRoutines("end aIsilonService")
f.service.logServiceStats()
if inducedErrors.noIsiService || inducedErrors.autoProbeNotEnabled {
updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1)
updatedClusterConfig.(*IsilonClusterConfig).isiSvc = nil
f.service.isiClusters.Store(clusterName1, updatedClusterConfig)
}
return nil
}
func (f *feature) aIsilonServiceWithParamsForCustomTopologyNoLabel(user, mode string) error {
f.checkGoRoutines("start aIsilonService")
f.err = nil
f.getPluginInfoResponse = nil
f.volumeIDList = f.volumeIDList[:0]
f.snapshotIDList = f.snapshotIDList[:0]
// configure gofsutil; we use a mock interface
gofsutil.UseMockFS()
gofsutil.GOFSMock.InduceBindMountError = false
gofsutil.GOFSMock.InduceMountError = false
gofsutil.GOFSMock.InduceGetMountsError = false
gofsutil.GOFSMock.InduceDevMountsError = false
gofsutil.GOFSMock.InduceUnmountError = false
gofsutil.GOFSMock.InduceFormatError = false
gofsutil.GOFSMock.InduceGetDiskFormatError = false
gofsutil.GOFSMock.InduceGetDiskFormatType = ""
gofsutil.GOFSMockMounts = gofsutil.GOFSMockMounts[:0]
// set induced errors
inducedErrors.badVolumeIdentifier = false
inducedErrors.invalidVolumeID = false
inducedErrors.noVolumeID = false
inducedErrors.differentVolumeID = false
inducedErrors.noNodeName = false
inducedErrors.noNodeID = false
inducedErrors.omitVolumeCapability = false
inducedErrors.omitAccessMode = false
// initialize volume and export existence status
stepHandlersErrors.ExportNotFoundError = true
stepHandlersErrors.VolumeNotExistError = true
// Get the httptest mock handler. Only set
// a new server if there isn't one already.
handler := getHandler()
// Get or reuse the cached service
f.getServiceWithParamsForCustomTopology(user, mode, false)
clusterConfig := f.service.getIsilonClusterConfig(clusterName1)
if handler != nil && os.Getenv("CSI_ISILON_ENDPOINT") == "" {
if f.server == nil {
f.server = httptest.NewServer(handler)
}
log.Printf("server url: %s\n", f.server.URL)
clusterConfig.EndpointURL = f.server.URL
urlList := strings.Split(f.server.URL, ":")
log.Printf("urlList: %v", urlList)
clusterConfig.IsiPort = urlList[2]
} else {
f.server = nil
}
isiSvc, _ := f.service.GetIsiService(context.Background(), clusterConfig)
updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1)
updatedClusterConfig.(*IsilonClusterConfig).isiSvc = isiSvc
f.service.isiClusters.Store(clusterName1, updatedClusterConfig)
f.checkGoRoutines("end aIsilonService")
f.service.logServiceStats()
if inducedErrors.noIsiService || inducedErrors.autoProbeNotEnabled {
updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1)
updatedClusterConfig.(*IsilonClusterConfig).isiSvc = nil
f.service.isiClusters.Store(clusterName1, updatedClusterConfig)
}
return nil
}
func removeNodeLabels(host string) (result bool) {
k8sclientset, err := k8sutils.CreateKubeClientSet("/etc/kubernetes/admin.conf")
if err != nil {
log.Printf("init client failed for custom topology: '%s'", err.Error())
return false
}
// access the API to fetch node object
node, _ := k8sclientset.CoreV1().Nodes().Get(context.TODO(), host, v1.GetOptions{})
log.Printf("Node %s details\n", node)
// Iterate node labels and check if required label is available and if found remove it
for lkey, lval := range node.Labels {
log.Printf("Label is: %s:%s\n", lkey, lval)
if strings.HasPrefix(lkey, constants.PluginName+"/") && lval == constants.PluginName {
log.Printf("Topology label %s:%s available on node", lkey, lval)
cmd := exec.Command("/bin/bash", "-c", "kubectl label nodes "+host+" "+lkey+"-")
err := cmd.Run()
if err != nil {
log.Printf("Error encountered while removing label from node %s: %s", host, err)
return false
}
}
}
return true
}
func applyNodeLabel(host string) (result bool) {
cmd := exec.Command("kubectl", "label", "nodes", host, "csi-isilon.dellemc.com/127.0.0.1=csi-isilon.dellemc.com")
err := cmd.Run()
if err != nil {
log.Printf("Applying label on node %s failed", host)
return false
}
return true
}
func (f *feature) getServiceWithParamsForCustomTopology(user, mode string, applyLabel bool) *service {
testControllerHasNoConnection = false
testNodeHasNoConnection = false
svc := new(service)
var opts Opts
opts.AccessZone = "System"
opts.Path = "/ifs/data/csi-isilon"
opts.Insecure = true
opts.DebugEnabled = true
opts.Verbose = 1
opts.CustomTopologyEnabled = true
opts.KubeConfigPath = "/etc/kubernetes/admin.conf"
newConfig := IsilonClusterConfig{}
newConfig.ClusterName = clusterName1
newConfig.IsiIP = "127.0.0.1"
newConfig.IsiPort = "8080"
newConfig.EndpointURL = "http://127.0.0.1"
newConfig.User = user
newConfig.Password = "blah"
newConfig.IsiInsecure = &opts.Insecure
newConfig.IsiPath = "/ifs/data/csi-isilon"
newConfig.IsDefaultCluster = true
host, _ := os.Hostname()
result := removeNodeLabels(host)
if !result {
log.Fatal("Setting custom topology failed")
}
if applyLabel {
result = applyNodeLabel(host)
if !result {
log.Fatal("Applying csi-isilon.dellemc.com/127.0.0.1=csi-isilon.dellemc.com label on node failed")
}
}
if inducedErrors.autoProbeNotEnabled {
opts.AutoProbe = false
} else {
opts.AutoProbe = true
}
svc.opts = opts
svc.mode = mode
f.service = svc
f.service.nodeID = host
// TODO - IP has to be updated before release
f.service.nodeIP = "1.2.3.4"
f.service.defaultIsiClusterName = clusterName1
f.service.isiClusters = new(sync.Map)
f.service.isiClusters.Store(newConfig.ClusterName, &newConfig)
utils.ConfigureLogger(opts.DebugEnabled)
return svc
}
func (f *feature) getServiceWithParams(user, mode string) *service {
testControllerHasNoConnection = false
testNodeHasNoConnection = false
svc := new(service)
var opts Opts
opts.AccessZone = "System"
opts.Path = "/ifs/data/csi-isilon"
opts.Insecure = true
opts.DebugEnabled = true
opts.Verbose = 1
newConfig := IsilonClusterConfig{}
newConfig.ClusterName = clusterName1
newConfig.IsiIP = "127.0.0.1"
newConfig.IsiPort = "8080"
newConfig.EndpointURL = "http://127.0.0.1"
newConfig.User = user
newConfig.Password = "blah"
newConfig.IsiInsecure = &opts.Insecure
newConfig.IsiPath = "/ifs/data/csi-isilon"
newConfig.IsDefaultCluster = true
if inducedErrors.autoProbeNotEnabled {
opts.AutoProbe = false
} else {
opts.AutoProbe = true
}
svc.opts = opts
svc.mode = mode
f.service = svc
f.service.nodeID = fmt.Sprintf("k8s-rhel76-qual=#=#=1.2.3.4=#=#=#{clusterName1}")
f.service.nodeIP = "1.2.3.4"
f.service.defaultIsiClusterName = clusterName1
f.service.isiClusters = new(sync.Map)
f.service.isiClusters.Store(newConfig.ClusterName, &newConfig)
utils.ConfigureLogger(opts.DebugEnabled)
return svc
}
func (f *feature) iCallLogStatisticsTimes(times int) error {
for i := 0; i < times; i++ {
f.service.logStatistics()
}
return nil
}
func (f *feature) iCallBeforeServe() error {
sp := new(gocsi.StoragePlugin)
var lis net.Listener
f.err = f.service.BeforeServe(context.Background(), sp, lis)
return nil
}
func (f *feature) ICallCreateQuotaInIsiServiceWithNegativeSizeInBytes() error {
clusterConfig := f.service.getIsilonClusterConfig(clusterName1)
ctx, _, _ := GetRunIDLog(context.Background())
_, f.err = clusterConfig.isiSvc.CreateQuota(ctx, f.service.opts.Path, "volume1", -1, true)
return nil
}
func (f *feature) iCallGetExportRelatedFunctionsInIsiService() error {
clusterConfig := f.service.getIsilonClusterConfig(clusterName1)
ctx, _, _ := GetRunIDLog(context.Background())
_, f.err = clusterConfig.isiSvc.GetExports(ctx)
_, f.err = clusterConfig.isiSvc.GetExportByIDWithZone(ctx, 557, "System")
f.err = clusterConfig.isiSvc.DeleteQuotaByExportIDWithZone(ctx, "volume1", 557, "System")
_, _, f.err = clusterConfig.isiSvc.GetExportsWithLimit(ctx, "2")
return nil
}
func (f *feature) iCallUnimplementedFunctions() error {
_, f.err = f.service.ListSnapshots(context.Background(), new(csi.ListSnapshotsRequest))
_, f.err = f.service.NodeUnpublishVolume(context.Background(), new(csi.NodeUnpublishVolumeRequest))
_, f.err = f.service.ControllerExpandVolume(context.Background(), new(csi.ControllerExpandVolumeRequest))
_, f.err = f.service.NodeExpandVolume(context.Background(), new(csi.NodeExpandVolumeRequest))
_, f.err = f.service.NodeGetVolumeStats(context.Background(), new(csi.NodeGetVolumeStatsRequest))
return nil
}
func (f *feature) iCallInitServiceObject() error {
service := New()
if service == nil {
f.err = errors.New("failed to initialize Service object")
} else {
f.err = nil
}
return nil
}
| [
"\"CSI_ISILON_ENDPOINT\"",
"\"CSI_ISILON_ENDPOINT\"",
"\"CSI_ISILON_ENDPOINT\"",
"\"CSI_ISILON_USERID\"",
"\"CSI_ISILON_USERID\"",
"\"CSI_ISILON_PASSWORD\"",
"\"CSI_ISILON_PASSWORD\"",
"\"CSI_ISILON_PATH\"",
"\"CSI_ISILON_PATH\"",
"\"CSI_ISILON_ZONE\"",
"\"CSI_ISILON_ZONE\"",
"\"CSI_ISILON_ENDPOINT\"",
"\"CSI_ISILON_ENDPOINT\"",
"\"CSI_ISILON_ENDPOINT\""
]
| []
| [
"CSI_ISILON_PASSWORD",
"CSI_ISILON_ZONE",
"CSI_ISILON_ENDPOINT",
"CSI_ISILON_PATH",
"CSI_ISILON_USERID"
]
| [] | ["CSI_ISILON_PASSWORD", "CSI_ISILON_ZONE", "CSI_ISILON_ENDPOINT", "CSI_ISILON_PATH", "CSI_ISILON_USERID"] | go | 5 | 0 | |
tests/conftest.py | import logging
import os
import random
import time
import tempfile
import threading
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
from math import floor
from shutil import copyfile
from functools import partial
from botocore.exceptions import ClientError
import pytest
from collections import namedtuple
from ocs_ci.deployment import factory as dep_factory
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
deployment,
ignore_leftovers,
tier_marks,
ignore_leftover_label,
)
from ocs_ci.ocs import constants, defaults, fio_artefacts, node, ocp, platform_nodes
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.exceptions import (
CommandFailed,
TimeoutExpiredError,
CephHealthException,
ResourceWrongStatusException,
UnsupportedPlatformError,
PoolDidNotReachReadyState,
StorageclassNotCreated,
PoolNotDeletedFromUI,
StorageClassNotDeletedFromUI,
)
from ocs_ci.ocs.mcg_workload import mcg_job_factory as mcg_job_factory_implementation
from ocs_ci.ocs.node import get_node_objs, schedule_nodes
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pvc
from ocs_ci.ocs.utils import setup_ceph_toolbox, collect_ocs_logs
from ocs_ci.ocs.resources.backingstore import (
backingstore_factory as backingstore_factory_implementation,
)
from ocs_ci.ocs.resources.namespacestore import (
namespace_store_factory as namespacestore_factory_implementation,
)
from ocs_ci.ocs.resources.bucketclass import (
bucket_class_factory as bucketclass_factory_implementation,
)
from ocs_ci.ocs.resources.cloud_manager import CloudManager
from ocs_ci.ocs.resources.cloud_uls import (
cloud_uls_factory as cloud_uls_factory_implementation,
)
from ocs_ci.ocs.node import check_nodes_specs
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.ocs.resources.objectbucket import BUCKET_MAP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.pod import (
get_rgw_pods,
delete_deploymentconfig_pods,
get_pods_having_label,
get_deployments_having_label,
Pod,
)
from ocs_ci.ocs.resources.pvc import PVC, create_restore_pvc
from ocs_ci.ocs.version import get_ocs_version, report_ocs_version
from ocs_ci.ocs.cluster_load import ClusterLoad, wrap_msg
from ocs_ci.utility import (
aws,
deployment_openshift_logging as ocp_logging_obj,
ibmcloud,
kms as KMS,
pagerduty,
reporting,
templating,
users,
version,
)
from ocs_ci.utility.environment_check import (
get_status_before_execution,
get_status_after_execution,
)
from ocs_ci.utility.flexy import load_cluster_info
from ocs_ci.utility.kms import is_kms_enabled
from ocs_ci.utility.prometheus import PrometheusAPI
from ocs_ci.utility.uninstall_openshift_logging import uninstall_cluster_logging
from ocs_ci.utility.utils import (
ceph_health_check,
ceph_health_check_base,
get_ocs_build_number,
get_running_ocp_version,
get_openshift_client,
get_system_architecture,
get_testrun_name,
load_auth_config,
ocsci_log_path,
skipif_ocp_version,
skipif_ocs_version,
TimeoutSampler,
skipif_upgraded_from,
update_container_with_mirrored_image,
skipif_ui_not_support,
)
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import (
create_unique_resource_name,
create_ocs_object_from_kind_and_name,
setup_pod_directories,
get_current_test_name,
)
from ocs_ci.ocs.bucket_utils import get_rgw_restart_counts
from ocs_ci.ocs.pgsql import Postgresql
from ocs_ci.ocs.resources.rgw import RGW
from ocs_ci.ocs.jenkins import Jenkins
from ocs_ci.ocs.amq import AMQ
from ocs_ci.ocs.elasticsearch import ElasticSearch
from ocs_ci.ocs.ui.base_ui import login_ui, close_browser
from ocs_ci.ocs.ui.block_pool import BlockPoolUI
from ocs_ci.ocs.ui.storageclass import StorageClassUI
from ocs_ci.ocs.couchbase_new import CouchBase
log = logging.getLogger(__name__)
class OCSLogFormatter(logging.Formatter):
def __init__(self):
fmt = (
"%(asctime)s - %(threadName)s - %(levelname)s - %(name)s.%(funcName)s.%(lineno)d "
"- %(message)s"
)
super(OCSLogFormatter, self).__init__(fmt)
def pytest_logger_config(logger_config):
logger_config.add_loggers([""], stdout_level="info")
logger_config.set_log_option_default("")
logger_config.split_by_outcome()
logger_config.set_formatter_class(OCSLogFormatter)
def pytest_collection_modifyitems(session, items):
"""
A pytest hook to filter out skipped tests satisfying
skipif_ocs_version, skipif_upgraded_from or skipif_no_kms
Args:
session: pytest session
config: pytest config object
items: list of collected tests
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
# Add squad markers to each test item based on filepath
for item in items:
# check, if test already have squad marker manually assigned
if any(map(lambda x: "_squad" in x.name, item.iter_markers())):
continue
for squad, paths in constants.SQUADS.items():
for _path in paths:
# Limit the test_path to the tests directory
test_path = os.path.relpath(item.fspath.strpath, constants.TOP_DIR)
if _path in test_path:
item.add_marker(f"{squad.lower()}_squad")
item.user_properties.append(("squad", squad))
break
if not (teardown or deploy or (deploy and skip_ocs_deployment)):
for item in items[:]:
skipif_ocp_version_marker = item.get_closest_marker("skipif_ocp_version")
skipif_ocs_version_marker = item.get_closest_marker("skipif_ocs_version")
skipif_upgraded_from_marker = item.get_closest_marker(
"skipif_upgraded_from"
)
skipif_no_kms_marker = item.get_closest_marker("skipif_no_kms")
skipif_ui_not_support_marker = item.get_closest_marker(
"skipif_ui_not_support"
)
if skipif_ocp_version_marker:
skip_condition = skipif_ocp_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocp_version(skip_condition[0]):
log.info(
f"Test: {item} will be skipped due to OCP {skip_condition}"
)
items.remove(item)
continue
if skipif_ocs_version_marker:
skip_condition = skipif_ocs_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocs_version(skip_condition[0]):
log.info(f"Test: {item} will be skipped due to {skip_condition}")
items.remove(item)
continue
if skipif_upgraded_from_marker:
skip_args = skipif_upgraded_from_marker.args
if skipif_upgraded_from(skip_args[0]):
log.info(
f"Test: {item} will be skipped because the OCS cluster is"
f" upgraded from one of these versions: {skip_args[0]}"
)
items.remove(item)
if skipif_no_kms_marker:
try:
if not is_kms_enabled():
log.info(
f"Test: {item} it will be skipped because the OCS cluster"
f" has not configured cluster-wide encryption with KMS"
)
items.remove(item)
except KeyError:
log.warning(
"Cluster is not yet installed. Skipping skipif_no_kms check."
)
if skipif_ui_not_support_marker:
skip_condition = skipif_ui_not_support_marker
if skipif_ui_not_support(skip_condition.args[0]):
log.info(
f"Test: {item} will be skipped due to UI test {skip_condition.args} is not available"
)
items.remove(item)
continue
# skip UI test on openshift dedicated ODF-MS platform
if (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM
):
for item in items.copy():
if "/ui/" in str(item.fspath):
log.info(
f"Test {item} is removed from the collected items"
f" UI is not supported on {config.ENV_DATA['platform'].lower()}"
)
items.remove(item)
@pytest.fixture()
def supported_configuration():
"""
Check that cluster nodes have enough CPU and Memory as described in:
https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.2/html-single/planning_your_deployment/index#infrastructure-requirements_rhocs
This fixture is intended as a prerequisite for tests or fixtures that
run flaky on configurations that don't meet minimal requirements.
Minimum requirements for each starting node (OSD+MON):
16 CPUs
64 GB memory
Last documentation check: 2020-02-21
"""
min_cpu = constants.MIN_NODE_CPU
min_memory = constants.MIN_NODE_MEMORY
log.info("Checking if system meets minimal requirements")
if not check_nodes_specs(min_memory=min_memory, min_cpu=min_cpu):
err_msg = (
f"At least one of the worker nodes doesn't meet the "
f"required minimum specs of {min_cpu} vCPUs and {min_memory} RAM"
)
pytest.xfail(err_msg)
@pytest.fixture(scope="session", autouse=True)
def auto_load_auth_config():
try:
auth_config = {"AUTH": load_auth_config()}
config.update(auth_config)
except FileNotFoundError:
pass # If auth file doesn't exist we just ignore.
@pytest.fixture(scope="class")
def secret_factory_class(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="session")
def secret_factory_session(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="function")
def secret_factory(request):
return secret_factory_fixture(request)
def secret_factory_fixture(request):
"""
Secret factory. Calling this fixture creates a new secret.
RBD based is default.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
"""
secret_obj = helpers.create_secret(interface_type=interface)
assert secret_obj, "Failed to create a secret"
instances.append(secret_obj)
return secret_obj
def finalizer():
"""
Delete the RBD secrets
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def log_ocs_version(cluster):
"""
Fixture handling version reporting for OCS.
This fixture handles alignment of the version reporting, so that we:
* report version for each test run (no matter if just deployment, just
test or both deployment and tests are executed)
* prevent conflict of version reporting with deployment/teardown (eg. we
should not run the version logging before actual deployment, or after
a teardown)
Version is reported in:
* log entries of INFO log level during test setup phase
* ocs_version file in cluster path directory (for copy pasting into bug
reports)
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
dev_mode = config.RUN["cli_params"].get("dev_mode")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if teardown and not deploy:
log.info("Skipping version reporting for teardown.")
return
elif dev_mode:
log.info("Skipping version reporting for development mode.")
return
elif skip_ocs_deployment:
log.info("Skipping version reporting since OCS deployment is skipped.")
return
cluster_version, image_dict = get_ocs_version()
file_name = os.path.join(
config.ENV_DATA["cluster_path"], "ocs_version." + datetime.now().isoformat()
)
with open(file_name, "w") as file_obj:
report_ocs_version(cluster_version, image_dict, file_obj)
log.info("human readable ocs version info written into %s", file_name)
@pytest.fixture(scope="session")
def pagerduty_service(request):
"""
Create a Service in PagerDuty service. The service represents a cluster instance.
The service is deleted at the end of the test run.
Returns:
str: PagerDuty service json
"""
if config.ENV_DATA["platform"].lower() in constants.MANAGED_SERVICE_PLATFORMS:
pagerduty_api = pagerduty.PagerDutyAPI()
payload = pagerduty_api.get_service_dict()
service_response = pagerduty_api.create("services", payload=payload)
msg = f"Request {service_response.request.url} failed"
assert service_response.ok, msg
service = service_response.json().get("service")
def teardown():
"""
Delete the service at the end of test run
"""
service_id = service["id"]
log.info(f"Deleting service with id {service_id}")
delete_response = pagerduty_api.delete(f"services/{service_id}")
msg = f"Deletion of service {service_id} failed"
assert delete_response.ok, msg
request.addfinalizer(teardown)
return service
else:
log.info(
"PagerDuty service is not created because "
f"platform from {constants.MANAGED_SERVICE_PLATFORMS} "
"is not used"
)
return None
@pytest.fixture(scope="session", autouse=True)
def pagerduty_integration(request, pagerduty_service):
"""
Create a new Pagerduty integration for service from pagerduty_service
fixture if it doesn' exist. Update ocs-converged-pagerduty secret with
correct integration key. This is currently applicable only for ODF
Managed Service.
"""
if config.ENV_DATA["platform"].lower() in constants.MANAGED_SERVICE_PLATFORMS:
service_id = pagerduty_service["id"]
pagerduty_api = pagerduty.PagerDutyAPI()
log.info(
"Looking if Prometheus integration for pagerduty service with id "
f"{service_id} exists"
)
integration_key = None
for integration in pagerduty_service.get("integrations"):
if integration["summary"] == "Prometheus":
log.info(
"Prometheus integration already exists. "
"Skipping creation of new one."
)
integration_key = integration["integration_key"]
break
if not integration_key:
payload = pagerduty_api.get_integration_dict("Prometheus")
integration_response = pagerduty_api.create(
f"services/{service_id}/integrations", payload=payload
)
msg = f"Request {integration_response.request.url} failed"
assert integration_response.ok, msg
integration = integration_response.json().get("integration")
integration_key = integration["integration_key"]
pagerduty.set_pagerduty_integration_secret(integration_key)
def update_pagerduty_integration_secret():
"""
Make sure that pagerduty secret is updated with correct integration
token. Check value of config.RUN['thread_pagerduty_secret_update']:
* required - secret is periodically updated to correct value
* not required - secret is not updated
* finished - thread is terminated
"""
while config.RUN["thread_pagerduty_secret_update"] != "finished":
if config.RUN["thread_pagerduty_secret_update"] == "required":
pagerduty.set_pagerduty_integration_secret(integration_key)
time.sleep(60)
config.RUN["thread_pagerduty_secret_update"] = "not required"
thread = threading.Thread(
target=update_pagerduty_integration_secret,
name="thread_pagerduty_secret_update",
)
def finalizer():
"""
Stop the thread that executed update_pagerduty_integration_secret()
"""
config.RUN["thread_pagerduty_secret_update"] = "finished"
if thread:
thread.join()
request.addfinalizer(finalizer)
thread.start()
@pytest.fixture(scope="class")
def ceph_pool_factory_class(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="session")
def ceph_pool_factory_session(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="function")
def ceph_pool_factory(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
def ceph_pool_factory_fixture(request, replica=3, compression=None):
"""
Create a Ceph pool factory.
Calling this fixture creates new Ceph pool instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL, replica=replica, compression=compression
):
if interface == constants.CEPHBLOCKPOOL:
ceph_pool_obj = helpers.create_ceph_block_pool(
replica=replica, compression=compression
)
elif interface == constants.CEPHFILESYSTEM:
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
).get(defaults.CEPHFILESYSTEM_NAME)
ceph_pool_obj = OCS(**cfs)
assert ceph_pool_obj, f"Failed to create {interface} pool"
if interface != constants.CEPHFILESYSTEM:
instances.append(ceph_pool_obj)
return ceph_pool_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_class(request, ceph_pool_factory_class, secret_factory_class):
return storageclass_factory_fixture(
request, ceph_pool_factory_class, secret_factory_class
)
@pytest.fixture(scope="session")
def storageclass_factory_session(
request, ceph_pool_factory_session, secret_factory_session
):
return storageclass_factory_fixture(
request, ceph_pool_factory_session, secret_factory_session
)
@pytest.fixture(scope="function")
def storageclass_factory(request, ceph_pool_factory, secret_factory):
return storageclass_factory_fixture(request, ceph_pool_factory, secret_factory)
def storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory,
):
"""
Create a storage class factory. Default is RBD based.
Calling this fixture creates new storage class instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
secret=None,
custom_data=None,
sc_name=None,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
replica=3,
compression=None,
new_rbd_pool=False,
pool_name=None,
rbd_thick_provision=False,
encrypted=False,
encryption_kms_id=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
secret (object): An OCS instance for the secret.
custom_data (dict): If provided then storageclass object is created
by using these data. Parameters `block_pool` and `secret`
are not useds but references are set if provided.
sc_name (str): Name of the storage class
replica (int): Replica size for a pool
compression (str): Compression type option for a pool
new_rbd_pool (bool): True if user wants to create new rbd pool for SC
pool_name (str): Existing pool name to create the storageclass other
then the default rbd pool.
rbd_thick_provision (bool): True to enable RBD thick provisioning.
Applicable if interface is CephBlockPool
encrypted (bool): True to enable RBD PV encryption
encryption_kms_id (str): Key value of vault config to be used from
csi-kms-connection-details configmap
Returns:
object: helpers.create_storage_class instance with links to
block_pool and secret.
"""
if custom_data:
sc_obj = helpers.create_resource(**custom_data)
else:
secret = secret or secret_factory(interface=interface)
if interface == constants.CEPHBLOCKPOOL:
if config.ENV_DATA.get("new_rbd_pool") or new_rbd_pool:
pool_obj = ceph_pool_factory(
interface=interface,
replica=config.ENV_DATA.get("replica") or replica,
compression=config.ENV_DATA.get("compression") or compression,
)
interface_name = pool_obj.name
else:
if pool_name is None:
interface_name = helpers.default_ceph_block_pool()
else:
interface_name = pool_name
elif interface == constants.CEPHFILESYSTEM:
interface_name = helpers.get_cephfs_data_pool_name()
sc_obj = helpers.create_storage_class(
interface_type=interface,
interface_name=interface_name,
secret_name=secret.name,
sc_name=sc_name,
reclaim_policy=reclaim_policy,
rbd_thick_provision=rbd_thick_provision,
encrypted=encrypted,
encryption_kms_id=encryption_kms_id,
)
assert sc_obj, f"Failed to create {interface} storage class"
sc_obj.secret = secret
instances.append(sc_obj)
return sc_obj
def finalizer():
"""
Delete the storageclass
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def project_factory_class(request):
return project_factory_fixture(request)
@pytest.fixture(scope="session")
def project_factory_session(request):
return project_factory_fixture(request)
@pytest.fixture()
def project_factory(request):
return project_factory_fixture(request)
@pytest.fixture()
def project(project_factory):
"""
This fixture creates a single project instance.
"""
project_obj = project_factory()
return project_obj
def project_factory_fixture(request):
"""
Create a new project factory.
Calling this fixture creates new project.
"""
instances = []
def factory(project_name=None):
"""
Args:
project_name (str): The name for the new project
Returns:
object: ocs_ci.ocs.resources.ocs instance of 'Project' kind.
"""
proj_obj = helpers.create_project(project_name=project_name)
instances.append(proj_obj)
return proj_obj
def finalizer():
delete_projects(instances)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def teardown_project_factory(request):
return teardown_project_factory_fixture(request)
def teardown_project_factory_fixture(request):
"""
Tearing down a project that was created during the test
To use this factory, you'll need to pass 'teardown_project_factory' to your test
function and call it in your test when a new project was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_project_factory):
project_obj = create_project(project_name="xyz")
teardown_project_factory(project_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCP object or list of OCP objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
delete_projects(instances)
request.addfinalizer(finalizer)
return factory
def delete_projects(instances):
"""
Delete the project
instances (list): list of OCP objects (kind is Project)
"""
for instance in instances:
try:
ocp_event = ocp.OCP(kind="Event", namespace=instance.namespace)
events = ocp_event.get()
event_count = len(events["items"])
warn_event_count = 0
for event in events["items"]:
if event["type"] == "Warning":
warn_event_count += 1
log.info(
(
"There were %d events in %s namespace before it's"
" removal (out of which %d were of type Warning)."
" For a full dump of this event list, see DEBUG logs."
),
event_count,
instance.namespace,
warn_event_count,
)
except Exception:
# we don't want any problem to disrupt the teardown itself
log.exception("Failed to get events for project %s", instance.namespace)
ocp.switch_to_default_rook_cluster_project()
instance.delete(resource_name=instance.namespace)
instance.wait_for_delete(instance.namespace, timeout=300)
@pytest.fixture(scope="class")
def pvc_factory_class(request, project_factory_class):
return pvc_factory_fixture(request, project_factory_class)
@pytest.fixture(scope="session")
def pvc_factory_session(request, project_factory_session):
return pvc_factory_fixture(request, project_factory_session)
@pytest.fixture(scope="function")
def pvc_factory(request, project_factory):
return pvc_factory_fixture(
request,
project_factory,
)
def pvc_factory_fixture(request, project_factory):
"""
Create a persistent Volume Claim factory. Calling this fixture creates new
PVC. For custom PVC provide 'storageclass' parameter.
"""
instances = []
active_project = None
active_rbd_storageclass = None
active_cephfs_storageclass = None
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_mode=constants.ACCESS_MODE_RWO,
custom_data=None,
status=constants.STATUS_BOUND,
volume_mode=None,
size_unit="Gi",
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
This decides the access mode to be used for the PVC.
ReadWriteOnce is default.
custom_data (dict): If provided then PVC object is created
by using these data. Parameters `project` and `storageclass`
are not used but reference is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
volume_mode (str): Volume mode for PVC.
eg: volume_mode='Block' to create rbd `block` type volume
size_unit (str): PVC size unit, eg: "Mi"
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pvc_obj = PVC(**custom_data)
pvc_obj.create(do_reload=False)
else:
nonlocal active_project
nonlocal active_rbd_storageclass
nonlocal active_cephfs_storageclass
project = project or active_project or project_factory()
active_project = project
if interface == constants.CEPHBLOCKPOOL:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_rbd_storageclass = storageclass
elif interface == constants.CEPHFILESYSTEM:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_cephfs_storageclass = storageclass
pvc_size = f"{size}{size_unit}" if size else None
pvc_obj = helpers.create_pvc(
sc_name=storageclass.name,
namespace=project.namespace,
size=pvc_size,
do_reload=False,
access_mode=access_mode,
volume_mode=volume_mode,
)
assert pvc_obj, "Failed to create PVC"
if status:
helpers.wait_for_resource_state(pvc_obj, status)
pvc_obj.storageclass = storageclass
pvc_obj.project = project
pvc_obj.access_mode = access_mode
instances.append(pvc_obj)
return pvc_obj
def finalizer():
"""
Delete the PVC
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
# If they have ReclaimPolicy set to Retain then delete them manually
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
helpers.wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(pv_obj.name)
else:
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pod_factory_class(request, pvc_factory_class):
return pod_factory_fixture(request, pvc_factory_class)
@pytest.fixture(scope="session")
def pod_factory_session(request, pvc_factory_session):
return pod_factory_fixture(request, pvc_factory_session)
@pytest.fixture(scope="function")
def pod_factory(request, pvc_factory):
return pod_factory_fixture(request, pvc_factory)
def pod_factory_fixture(request, pvc_factory):
"""
Create a Pod factory. Calling this fixture creates new Pod.
For custom Pods provide 'pvc' parameter.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
custom_data=None,
status=constants.STATUS_RUNNING,
node_name=None,
pod_dict_path=None,
raw_block_pv=False,
deployment_config=False,
service_account=None,
replica_count=1,
command=None,
command_args=None,
subpath=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod.
raw_block_pv (bool): True for creating raw block pv based pod,
False otherwise.
deployment_config (bool): True for DeploymentConfig creation,
False otherwise
service_account (OCS): Service account object, in case DeploymentConfig
is to be created
replica_count (int): The replica count for deployment config
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
subpath (str): Value of subPath parameter in pod yaml
Returns:
object: helpers.create_pod instance
"""
sa_name = service_account.name if service_account else None
if custom_data:
pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface)
pod_obj = helpers.create_pod(
pvc_name=pvc.name,
namespace=pvc.namespace,
interface_type=interface,
node_name=node_name,
pod_dict_path=pod_dict_path,
raw_block_pv=raw_block_pv,
dc_deployment=deployment_config,
sa_name=sa_name,
replica_count=replica_count,
command=command,
command_args=command_args,
subpath=subpath,
)
assert pod_obj, "Failed to create pod"
if deployment_config:
dc_name = pod_obj.get_labels().get("name")
dc_ocp_dict = ocp.OCP(
kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace
).get(resource_name=dc_name)
dc_obj = OCS(**dc_ocp_dict)
instances.append(dc_obj)
else:
instances.append(pod_obj)
if status:
helpers.wait_for_resource_state(pod_obj, status, timeout=300)
pod_obj.reload()
pod_obj.pvc = pvc
if deployment_config:
return dc_obj
return pod_obj
def finalizer():
"""
Delete the Pod or the DeploymentConfig
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def teardown_factory_class(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="session")
def teardown_factory_session(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="function")
def teardown_factory(request):
return teardown_factory_fixture(request)
def teardown_factory_fixture(request):
"""
Tearing down a resource that was created during the test
To use this factory, you'll need to pass 'teardown_factory' to your test
function and call it in your test when a new resource was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_factory):
pvc_obj = create_pvc()
teardown_factory(pvc_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCS object or list of OCS objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
"""
Delete the resources created in the test
"""
for instance in instances[::-1]:
if not instance.is_deleted:
try:
if (instance.kind == constants.PVC) and (instance.reclaim_policy):
pass
reclaim_policy = (
instance.reclaim_policy
if instance.kind == constants.PVC
else None
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
if reclaim_policy == constants.RECLAIM_POLICY_DELETE:
helpers.validate_pv_delete(instance.backed_pv)
except CommandFailed as ex:
log.warning(
f"Resource is already in deleted state, skipping this step"
f"Error: {ex}"
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def service_account_factory_class(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="session")
def service_account_factory_session(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="function")
def service_account_factory(request):
return service_account_factory_fixture(request)
def service_account_factory_fixture(request):
"""
Create a service account
"""
instances = []
active_service_account_obj = None
def factory(project=None, service_account=None):
"""
Args:
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
service_account (str): service_account_name
Returns:
object: serviceaccount instance.
"""
nonlocal active_service_account_obj
if active_service_account_obj and not service_account:
return active_service_account_obj
elif service_account:
sa_obj = helpers.get_serviceaccount_obj(
sa_name=service_account, namespace=project.namespace
)
if not helpers.validate_scc_policy(
sa_name=service_account, namespace=project.namespace
):
helpers.add_scc_policy(
sa_name=service_account, namespace=project.namespace
)
sa_obj.project = project
active_service_account_obj = sa_obj
instances.append(sa_obj)
return sa_obj
else:
sa_obj = helpers.create_serviceaccount(
namespace=project.namespace,
)
sa_obj.project = project
active_service_account_obj = sa_obj
helpers.add_scc_policy(sa_name=sa_obj.name, namespace=project.namespace)
assert sa_obj, "Failed to create serviceaccount"
instances.append(sa_obj)
return sa_obj
def finalizer():
"""
Delete the service account
"""
for instance in instances:
helpers.remove_scc_policy(
sa_name=instance.name, namespace=instance.namespace
)
instance.delete()
instance.ocp.wait_for_delete(resource_name=instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def dc_pod_factory(request, pvc_factory, service_account_factory):
"""
Create deploymentconfig pods
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
service_account=None,
size=None,
custom_data=None,
node_name=None,
node_selector=None,
replica_count=1,
raw_block_pv=False,
sa_obj=None,
wait=True,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
service_account (str): service account name for dc_pods
size (int): The requested size for the PVC
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
node_name (str): The name of specific node to schedule the pod
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
replica_count (int): Replica count for deployment config
raw_block_pv (str): True if pod with raw block pvc
sa_obj (object) : If specific service account is needed
"""
if custom_data:
dc_pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface, size=size)
sa_obj = sa_obj or service_account_factory(
project=pvc.project, service_account=service_account
)
dc_pod_obj = helpers.create_pod(
interface_type=interface,
pvc_name=pvc.name,
do_reload=False,
namespace=pvc.namespace,
sa_name=sa_obj.name,
dc_deployment=True,
replica_count=replica_count,
node_name=node_name,
node_selector=node_selector,
raw_block_pv=raw_block_pv,
pod_dict_path=constants.FEDORA_DC_YAML,
)
instances.append(dc_pod_obj)
log.info(dc_pod_obj.name)
if wait:
helpers.wait_for_resource_state(
dc_pod_obj, constants.STATUS_RUNNING, timeout=180
)
dc_pod_obj.pvc = pvc
return dc_pod_obj
def finalizer():
"""
Delete dc pods
"""
for instance in instances:
delete_deploymentconfig_pods(instance)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def polarion_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures polarion testsuite properties for junit xml
"""
polarion_project_id = config.REPORTING["polarion"]["project_id"]
record_testsuite_property("polarion-project-id", polarion_project_id)
jenkins_build_url = config.RUN.get("jenkins_build_url")
if jenkins_build_url:
record_testsuite_property("polarion-custom-description", jenkins_build_url)
polarion_testrun_name = get_testrun_name()
record_testsuite_property("polarion-testrun-id", polarion_testrun_name)
record_testsuite_property("polarion-testrun-status-id", "inprogress")
record_testsuite_property("polarion-custom-isautomated", "True")
@pytest.fixture(scope="session", autouse=True)
def additional_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures additional custom testsuite properties for junit xml
"""
# add logs url
logs_url = config.RUN.get("logs_url")
if logs_url:
record_testsuite_property("logs-url", logs_url)
# add run_id
record_testsuite_property("run_id", config.RUN["run_id"])
# Report Portal
launch_name = reporting.get_rp_launch_name()
record_testsuite_property("rp_launch_name", launch_name)
launch_description = reporting.get_rp_launch_description()
record_testsuite_property("rp_launch_description", launch_description)
attributes = reporting.get_rp_launch_attributes()
for key, value in attributes.items():
# Prefix with `rp_` so the rp_preproc upload script knows to use the property
record_testsuite_property(f"rp_{key}", value)
launch_url = config.REPORTING.get("rp_launch_url")
if launch_url:
record_testsuite_property("rp_launch_url", launch_url)
@pytest.fixture(scope="session")
def tier_marks_name():
"""
Gets the tier mark names
Returns:
list: list of tier mark names
"""
tier_marks_name = []
for each_tier in tier_marks:
try:
tier_marks_name.append(each_tier.name)
except AttributeError:
tier_marks_name.append(each_tier().args[0].name)
return tier_marks_name
@pytest.fixture(scope="function", autouse=True)
def health_checker(request, tier_marks_name):
skipped = False
dev_mode = config.RUN["cli_params"].get("dev_mode")
if dev_mode:
log.info("Skipping health checks for development mode")
return
def finalizer():
if not skipped:
try:
teardown = config.RUN["cli_params"]["teardown"]
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
mcg_only_deployment = config.ENV_DATA["mcg_only_deployment"]
if not (teardown or skip_ocs_deployment or mcg_only_deployment):
ceph_health_check_base()
log.info("Ceph health check passed at teardown")
except CephHealthException:
log.info("Ceph health check failed at teardown")
# Retrying to increase the chance the cluster health will be OK
# for next test
ceph_health_check()
raise
node = request.node
request.addfinalizer(finalizer)
for mark in node.iter_markers():
if mark.name in tier_marks_name:
log.info("Checking for Ceph Health OK ")
try:
status = ceph_health_check_base()
if status:
log.info("Ceph health check passed at setup")
return
except CephHealthException:
skipped = True
# skip because ceph is not in good health
pytest.skip("Ceph health check failed at setup")
@pytest.fixture(scope="session", autouse=True)
def cluster(request, log_cli_level, record_testsuite_property):
"""
This fixture initiates deployment for both OCP and OCS clusters.
Specific platform deployment classes will handle the fine details
of action
"""
log.info(f"All logs located at {ocsci_log_path()}")
teardown = config.RUN["cli_params"]["teardown"]
deploy = config.RUN["cli_params"]["deploy"]
if teardown or deploy:
factory = dep_factory.DeploymentFactory()
deployer = factory.get_deployment()
# Add a finalizer to teardown the cluster after test execution is finished
if teardown:
def cluster_teardown_finalizer():
# If KMS is configured, clean up the backend resources
# we are doing it before OCP cleanup
if config.DEPLOYMENT.get("kms_deployment"):
kms = KMS.get_kms_deployment()
kms.cleanup()
deployer.destroy_cluster(log_cli_level)
request.addfinalizer(cluster_teardown_finalizer)
log.info("Will teardown cluster because --teardown was provided")
# Download client
if config.DEPLOYMENT["skip_download_client"]:
log.info("Skipping client download")
else:
force_download = (
config.RUN["cli_params"].get("deploy")
and config.DEPLOYMENT["force_download_client"]
)
get_openshift_client(force_download=force_download)
# set environment variable for early testing of RHCOS
if config.ENV_DATA.get("early_testing"):
release_img = config.ENV_DATA["RELEASE_IMG"]
log.info(f"Running early testing of RHCOS with release image: {release_img}")
os.environ["RELEASE_IMG"] = release_img
os.environ["OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"] = release_img
if deploy:
# Deploy cluster
deployer.deploy_cluster(log_cli_level)
else:
if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM:
ibmcloud.login()
record_testsuite_property("rp_ocs_build", get_ocs_build_number())
@pytest.fixture(scope="class")
def environment_checker(request):
node = request.node
# List of marks for which we will ignore the leftover checker
marks_to_ignore = [m.mark for m in [deployment, ignore_leftovers]]
# app labels of resources to be excluded for leftover check
exclude_labels = [constants.must_gather_pod_label]
for mark in node.iter_markers():
if mark in marks_to_ignore:
return
if mark.name == ignore_leftover_label.name:
exclude_labels.extend(list(mark.args))
request.addfinalizer(
partial(get_status_after_execution, exclude_labels=exclude_labels)
)
get_status_before_execution(exclude_labels=exclude_labels)
@pytest.fixture(scope="session")
def log_cli_level(pytestconfig):
"""
Retrieves the log_cli_level set in pytest.ini
Returns:
str: log_cli_level set in pytest.ini or DEBUG if not set
"""
return pytestconfig.getini("log_cli_level") or "DEBUG"
@pytest.fixture(scope="session", autouse=True)
def cluster_load(
request,
project_factory_session,
pvc_factory_session,
service_account_factory_session,
pod_factory_session,
):
"""
Run IO during the test execution
"""
cl_load_obj = None
io_in_bg = config.RUN.get("io_in_bg")
log_utilization = config.RUN.get("log_utilization")
io_load = config.RUN.get("io_load")
cluster_load_error = None
cluster_load_error_msg = (
"Cluster load might not work correctly during this run, because "
"it failed with an exception: %s"
)
# IO load should not happen during deployment
deployment_test = (
True if ("deployment" in request.node.items[0].location[0]) else False
)
if io_in_bg and not deployment_test:
io_load = int(io_load) * 0.01
log.info(wrap_msg("Tests will be running while IO is in the background"))
log.info(
"Start running IO in the background. The amount of IO that "
"will be written is going to be determined by the cluster "
"capabilities according to its limit"
)
try:
cl_load_obj = ClusterLoad(
project_factory=project_factory_session,
sa_factory=service_account_factory_session,
pvc_factory=pvc_factory_session,
pod_factory=pod_factory_session,
target_percentage=io_load,
)
cl_load_obj.reach_cluster_load_percentage()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
if (log_utilization or io_in_bg) and not deployment_test:
if not cl_load_obj:
try:
cl_load_obj = ClusterLoad()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
config.RUN["load_status"] = "running"
def finalizer():
"""
Stop the thread that executed watch_load()
"""
config.RUN["load_status"] = "finished"
if thread:
thread.join()
if cluster_load_error:
raise cluster_load_error
request.addfinalizer(finalizer)
def watch_load():
"""
Watch the cluster load by monitoring the cluster latency.
Print the cluster utilization metrics every 15 seconds.
If IOs are running in the test background, dynamically adjust
the IO load based on the cluster latency.
"""
while config.RUN["load_status"] != "finished":
time.sleep(20)
try:
cl_load_obj.print_metrics(mute_logs=True)
if io_in_bg:
if config.RUN["load_status"] == "running":
cl_load_obj.adjust_load_if_needed()
elif config.RUN["load_status"] == "to_be_paused":
cl_load_obj.reduce_load(pause=True)
config.RUN["load_status"] = "paused"
elif config.RUN["load_status"] == "to_be_reduced":
cl_load_obj.reduce_load(pause=False)
config.RUN["load_status"] = "reduced"
elif config.RUN["load_status"] == "to_be_resumed":
cl_load_obj.resume_load()
config.RUN["load_status"] = "running"
# Any type of exception should be caught and we should continue.
# We don't want any test to fail
except Exception:
continue
thread = threading.Thread(target=watch_load)
thread.start()
def resume_cluster_load_implementation():
"""
Resume cluster load implementation
"""
config.RUN["load_status"] = "to_be_resumed"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status == "running":
break
except TimeoutExpiredError:
log.error("Cluster load was not resumed successfully")
def reduce_cluster_load_implementation(request, pause, resume=True):
"""
Pause/reduce the background cluster load
Args:
pause (bool): True for completely pausing the cluster load, False for reducing it by 50%
resume (bool): True for resuming the cluster load upon teardown, False for not resuming
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
if resume:
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
config.RUN["load_status"] = "to_be_paused" if pause else "to_be_reduced"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status in ["paused", "reduced"]:
break
except TimeoutExpiredError:
log.error(
f"Cluster load was not {'paused' if pause else 'reduced'} successfully"
)
@pytest.fixture()
def pause_cluster_load(request):
"""
Pause the background cluster load without resuming it
"""
reduce_cluster_load_implementation(request=request, pause=True, resume=False)
@pytest.fixture()
def resume_cluster_load(request):
"""
Resume the background cluster load
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
@pytest.fixture()
def pause_and_resume_cluster_load(request):
"""
Pause the background cluster load and resume it in teardown to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=True)
@pytest.fixture()
def reduce_and_resume_cluster_load(request):
"""
Reduce the background cluster load to be 50% of what it is and resume the load in teardown
to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=False)
@pytest.fixture(
params=[
pytest.param({"interface": constants.CEPHBLOCKPOOL}),
pytest.param({"interface": constants.CEPHFILESYSTEM}),
],
ids=["RBD", "CephFS"],
)
def interface_iterate(request):
"""
Iterate over interfaces - CephBlockPool and CephFileSystem
"""
return request.param["interface"]
@pytest.fixture(scope="class")
def multi_pvc_factory_class(project_factory_class, pvc_factory_class):
return multi_pvc_factory_fixture(project_factory_class, pvc_factory_class)
@pytest.fixture(scope="session")
def multi_pvc_factory_session(project_factory_session, pvc_factory_session):
return multi_pvc_factory_fixture(project_factory_session, pvc_factory_session)
@pytest.fixture(scope="function")
def multi_pvc_factory(project_factory, pvc_factory):
return multi_pvc_factory_fixture(project_factory, pvc_factory)
def multi_pvc_factory_fixture(project_factory, pvc_factory):
"""
Create a Persistent Volume Claims factory. Calling this fixture creates a
set of new PVCs. Options for PVC creation based on provided assess modes:
1. For each PVC, choose random value from the list of access modes
2. Create PVCs based on the specified distribution number of access modes.
Create sets of PVCs based on the order of access modes.
3. Create PVCs based on the specified distribution number of access modes.
The order of PVC creation is independent of access mode.
"""
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_modes=None,
access_modes_selection="distribute_sequential",
access_mode_dist_ratio=None,
status=constants.STATUS_BOUND,
num_of_pvc=1,
wait_each=False,
timeout=60,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_modes (list): List of access modes. One of the access modes
will be chosen for creating each PVC. If not specified,
ReadWriteOnce will be selected for all PVCs. To specify
volume mode, append volume mode in the access mode name
separated by '-'.
eg: ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany',
'ReadWriteMany-Block']
access_modes_selection (str): Decides how to select accessMode for
each PVC from the options given in 'access_modes' list.
Values are 'select_random', 'distribute_random'
'select_random' : While creating each PVC, one access mode will
be selected from the 'access_modes' list.
'distribute_random' : The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will not be based on the access modes. For example, 1st and
6th PVC might have same access mode.
'distribute_sequential' :The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will be as sets of PVCs of same assess mode. For example,
first set of 10 will be having same access mode followed by
next set of 13 with a different access mode.
access_mode_dist_ratio (list): Contains the number of PVCs to be
created for each access mode. If not specified, the given list
of access modes will be equally distributed among the PVCs.
eg: [10,12] for num_of_pvc=22 and
access_modes=['ReadWriteOnce', 'ReadWriteMany']
status (str): If provided then factory waits for object to reach
desired state.
num_of_pvc(int): Number of PVCs to be created
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
timeout(int): Time in seconds to wait
Returns:
list: objects of PVC class.
"""
pvc_list = []
if wait_each:
status_tmp = status
else:
status_tmp = ""
project = project or project_factory()
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
access_modes = access_modes or [constants.ACCESS_MODE_RWO]
access_modes_list = []
if access_modes_selection == "select_random":
for _ in range(num_of_pvc):
mode = random.choice(access_modes)
access_modes_list.append(mode)
else:
if not access_mode_dist_ratio:
num_of_modes = len(access_modes)
dist_val = floor(num_of_pvc / num_of_modes)
access_mode_dist_ratio = [dist_val] * num_of_modes
access_mode_dist_ratio[-1] = dist_val + (num_of_pvc % num_of_modes)
zipped_share = list(zip(access_modes, access_mode_dist_ratio))
for mode, share in zipped_share:
access_modes_list.extend([mode] * share)
if access_modes_selection == "distribute_random":
random.shuffle(access_modes_list)
for access_mode in access_modes_list:
if "-" in access_mode:
access_mode, volume_mode = access_mode.split("-")
else:
volume_mode = ""
pvc_obj = pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=size,
access_mode=access_mode,
status=status_tmp,
volume_mode=volume_mode,
)
pvc_list.append(pvc_obj)
pvc_obj.project = project
if status and not wait_each:
for pvc_obj in pvc_list:
helpers.wait_for_resource_state(pvc_obj, status, timeout=timeout)
return pvc_list
return factory
@pytest.fixture(scope="function")
def memory_leak_function(request):
"""
Function to start Memory leak thread which will be executed parallel with test run
Memory leak data will be captured in all worker nodes for ceph-osd process
Data will be appended in /tmp/(worker)-top-output.txt file for each worker
During teardown created tmp files will be deleted
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
def finalizer():
"""
Finalizer to stop memory leak data capture thread and cleanup the files
"""
set_flag_status("terminated")
try:
for status in TimeoutSampler(90, 3, get_flag_status):
if status == "terminated":
break
except TimeoutExpiredError:
log.warning(
"Background test execution still in progress before"
"memory leak thread terminated"
)
if thread:
thread.join()
log_path = ocsci_log_path()
for worker in node.get_worker_nodes():
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
copyfile(
f"/tmp/{worker}-top-output.txt",
f"{log_path}/{worker}-top-output.txt",
)
os.remove(f"/tmp/{worker}-top-output.txt")
log.info("Memory leak capture has stopped")
request.addfinalizer(finalizer)
temp_file = tempfile.NamedTemporaryFile(
mode="w+", prefix="test_status", delete=False
)
def get_flag_status():
with open(temp_file.name, "r") as t_file:
return t_file.readline()
def set_flag_status(value):
with open(temp_file.name, "w") as t_file:
t_file.writelines(value)
set_flag_status("running")
def run_memory_leak_in_bg():
"""
Function to run memory leak in background thread
Memory leak data is written in below format
date time PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
"""
oc = ocp.OCP(namespace=config.ENV_DATA["cluster_namespace"])
while get_flag_status() == "running":
for worker in node.get_worker_nodes():
filename = f"/tmp/{worker}-top-output.txt"
top_cmd = f"debug nodes/{worker} -- chroot /host top -n 2 b"
with open("/tmp/file.txt", "w+") as temp:
temp.write(
str(oc.exec_oc_cmd(command=top_cmd, out_yaml_format=False))
)
temp.seek(0)
for line in temp:
if line.__contains__("ceph-osd"):
with open(filename, "a+") as f:
f.write(str(datetime.now()))
f.write(" ")
f.write(line)
log.info("Start memory leak data capture in the test background")
thread = threading.Thread(target=run_memory_leak_in_bg)
thread.start()
@pytest.fixture()
def aws_obj():
"""
Initialize AWS instance
Returns:
AWS: An instance of AWS class
"""
aws_obj = aws.AWS()
return aws_obj
@pytest.fixture()
def ec2_instances(request, aws_obj):
"""
Get cluster instances
Returns:
dict: The ID keys and the name values of the instances
"""
# Get all cluster nodes objects
nodes = node.get_node_objs()
# Get the cluster nodes ec2 instances
ec2_instances = aws.get_instances_ids_and_names(nodes)
assert (
ec2_instances
), f"Failed to get ec2 instances for node {[n.name for n in nodes]}"
def finalizer():
"""
Make sure all instances are running
"""
# Getting the instances that are in status 'stopping' (if there are any), to wait for them to
# get to status 'stopped' so it will be possible to start them
stopping_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING)
}
# Waiting fot the instances that are in status 'stopping'
# (if there are any) to reach 'stopped'
if stopping_instances:
for stopping_instance in stopping_instances:
instance = aws_obj.get_ec2_instance(stopping_instance.key())
instance.wait_until_stopped()
stopped_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED)
}
# Start the instances
if stopped_instances:
aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)
request.addfinalizer(finalizer)
return ec2_instances
@pytest.fixture(scope="session")
def cld_mgr(request, rgw_endpoint):
"""
Returns a cloud manager instance that'll be used throughout the session
Returns:
CloudManager: A CloudManager resource
"""
cld_mgr = CloudManager()
def finalizer():
for client in vars(cld_mgr):
try:
getattr(cld_mgr, client).secret.delete()
except AttributeError:
log.info(f"{client} secret not found")
request.addfinalizer(finalizer)
return cld_mgr
@pytest.fixture()
def rgw_obj(request):
return rgw_obj_fixture(request)
@pytest.fixture(scope="session")
def rgw_obj_session(request):
return rgw_obj_fixture(request)
def rgw_obj_fixture(request):
"""
Returns an RGW resource that represents RGW in the cluster
Returns:
RGW: An RGW resource
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
try:
storageclass = OCP(
kind=constants.STORAGECLASS,
namespace=config.ENV_DATA["cluster_namespace"],
resource_name=constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RGW,
).get()
except CommandFailed:
storageclass = None
if rgw_deployments or storageclass:
return RGW()
else:
return None
@pytest.fixture()
def rgw_deployments(request):
"""
Return RGW deployments or skip the test.
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
# Force-skipping in case of IBM Cloud -
# https://github.com/red-hat-storage/ocs-ci/issues/3863
if config.ENV_DATA["platform"].lower() == constants.IBMCLOUD_PLATFORM:
pytest.skip(
"RGW deployments were found, but test will be skipped because of BZ1926831"
)
return rgw_deployments
else:
pytest.skip("There is no RGW deployment available for this test.")
@pytest.fixture(scope="session")
def rgw_endpoint(request):
"""
Expose RGW service and return external RGW endpoint address if available.
Returns:
string: external RGW endpoint
"""
log.info("Looking for RGW service to expose")
oc = ocp.OCP(kind=constants.SERVICE, namespace=config.ENV_DATA["cluster_namespace"])
rgw_service = oc.get(selector=constants.RGW_APP_LABEL)["items"]
if rgw_service:
if config.DEPLOYMENT["external_mode"]:
rgw_service = constants.RGW_SERVICE_EXTERNAL_MODE
else:
rgw_service = constants.RGW_SERVICE_INTERNAL_MODE
log.info(f"Service {rgw_service} found and will be exposed")
# custom hostname is provided because default hostname from rgw service
# is too long and OCP rejects it
oc = ocp.OCP(
kind=constants.ROUTE, namespace=config.ENV_DATA["cluster_namespace"]
)
route = oc.get(resource_name="noobaa-mgmt")
router_hostname = route["status"]["ingress"][0]["routerCanonicalHostname"]
rgw_hostname = f"rgw.{router_hostname}"
try:
oc.exec_oc_cmd(f"expose service/{rgw_service} --hostname {rgw_hostname}")
except CommandFailed as cmdfailed:
if "AlreadyExists" in str(cmdfailed):
log.warning("RGW route already exists.")
# new route is named after service
rgw_endpoint = oc.get(resource_name=rgw_service)
endpoint_obj = OCS(**rgw_endpoint)
def _finalizer():
endpoint_obj.delete()
request.addfinalizer(_finalizer)
return f"http://{rgw_hostname}"
else:
log.info("RGW service is not available")
@pytest.fixture()
def mcg_obj(request):
return mcg_obj_fixture(request)
@pytest.fixture(scope="session")
def mcg_obj_session(request):
return mcg_obj_fixture(request)
def mcg_obj_fixture(request, *args, **kwargs):
"""
Returns an MCG resource that's connected to the S3 endpoint
Returns:
MCG: An MCG resource
"""
if config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM:
log.warning("As openshift dedicated is used, no MCG resource is returned")
return None
mcg_obj = MCG(*args, **kwargs)
def finalizer():
if config.ENV_DATA["platform"].lower() == "aws":
mcg_obj.cred_req_obj.delete()
if kwargs.get("create_aws_creds"):
request.addfinalizer(finalizer)
return mcg_obj
@pytest.fixture()
def awscli_pod(request):
return awscli_pod_fixture(request, scope_name="function")
@pytest.fixture(scope="session")
def awscli_pod_session(request):
return awscli_pod_fixture(request, scope_name="session")
def awscli_pod_fixture(request, scope_name):
"""
Creates a new AWSCLI pod for relaying commands
Args:
scope_name (str): The name of the fixture's scope,
used for giving a descriptive name to the pod and configmap
Returns:
pod: A pod running the AWS CLI
"""
# Create the service-ca configmap to be mounted upon pod creation
service_ca_data = templating.load_yaml(constants.AWSCLI_SERVICE_CA_YAML)
service_ca_configmap_name = create_unique_resource_name(
constants.AWSCLI_SERVICE_CA_CONFIGMAP_NAME, scope_name
)
service_ca_data["metadata"]["name"] = service_ca_configmap_name
log.info("Trying to create the AWS CLI service CA")
service_ca_configmap = helpers.create_resource(**service_ca_data)
arch = get_system_architecture()
if arch.startswith("x86"):
pod_dict_path = constants.AWSCLI_POD_YAML
else:
pod_dict_path = constants.AWSCLI_MULTIARCH_POD_YAML
awscli_pod_dict = templating.load_yaml(pod_dict_path)
awscli_pod_dict["spec"]["volumes"][0]["configMap"][
"name"
] = service_ca_configmap_name
awscli_pod_name = create_unique_resource_name(
constants.AWSCLI_RELAY_POD_NAME, scope_name
)
awscli_pod_dict["metadata"]["name"] = awscli_pod_name
update_container_with_mirrored_image(awscli_pod_dict)
awscli_pod_obj = Pod(**awscli_pod_dict)
assert awscli_pod_obj.create(
do_reload=True
), f"Failed to create Pod {awscli_pod_name}"
OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE, kind="ConfigMap").wait_for_resource(
resource_name=service_ca_configmap.name, column="DATA", condition="1"
)
helpers.wait_for_resource_state(awscli_pod_obj, constants.STATUS_RUNNING)
def _awscli_pod_cleanup():
awscli_pod_obj.delete()
service_ca_configmap.delete()
request.addfinalizer(_awscli_pod_cleanup)
return awscli_pod_obj
@pytest.fixture()
def test_directory_setup(request, awscli_pod_session):
return test_directory_setup_fixture(request, awscli_pod_session)
def test_directory_setup_fixture(request, awscli_pod_session):
origin_dir, result_dir = setup_pod_directories(
awscli_pod_session, ["origin", "result"]
)
SetupDirs = namedtuple("SetupDirs", "origin_dir, result_dir")
def dir_cleanup():
test_name = get_current_test_name()
awscli_pod_session.exec_cmd_on_pod(command=f"rm -rf {test_name}")
request.addfinalizer(dir_cleanup)
return SetupDirs(origin_dir=origin_dir, result_dir=result_dir)
@pytest.fixture()
def nodes():
"""
Return an instance of the relevant platform nodes class
(e.g. AWSNodes, VMWareNodes) to be later used in the test
for nodes related operations, like nodes restart,
detach/attach volume, etc.
"""
factory = platform_nodes.PlatformNodesFactory()
nodes = factory.get_nodes_platform()
return nodes
@pytest.fixture()
def uploaded_objects(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
return uploaded_objects_fixture(
request, mcg_obj, awscli_pod, verify_rgw_restart_count
)
@pytest.fixture(scope="session")
def uploaded_objects_session(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
):
return uploaded_objects_fixture(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
)
def uploaded_objects_fixture(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
"""
Deletes all objects that were created as part of the test
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
awscli_pod (Pod): A pod running the AWSCLI tools
Returns:
list: An empty list of objects
"""
uploaded_objects_paths = []
def object_cleanup():
for uploaded_filename in uploaded_objects_paths:
log.info(f"Deleting object {uploaded_filename}")
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command("rm " + uploaded_filename, mcg_obj),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
request.addfinalizer(object_cleanup)
return uploaded_objects_paths
@pytest.fixture()
def verify_rgw_restart_count(request):
return verify_rgw_restart_count_fixture(request)
@pytest.fixture(scope="session")
def verify_rgw_restart_count_session(request):
return verify_rgw_restart_count_fixture(request)
def verify_rgw_restart_count_fixture(request):
"""
Verifies the RGW restart count at start and end of a test
"""
if config.ENV_DATA["platform"].lower() in constants.ON_PREM_PLATFORMS:
log.info("Getting RGW pod restart count before executing the test")
initial_counts = get_rgw_restart_counts()
def finalizer():
rgw_pods = get_rgw_pods()
for rgw_pod in rgw_pods:
rgw_pod.reload()
log.info("Verifying whether RGW pods changed after executing the test")
for rgw_pod in rgw_pods:
assert rgw_pod.restart_count in initial_counts, "RGW pod restarted"
request.addfinalizer(finalizer)
@pytest.fixture()
def rgw_bucket_factory(request, rgw_obj):
if rgw_obj:
return bucket_factory_fixture(request, rgw_obj=rgw_obj)
else:
return None
@pytest.fixture(scope="session")
def rgw_bucket_factory_session(request, rgw_obj_session):
if rgw_obj_session:
return bucket_factory_fixture(request, rgw_obj=rgw_obj_session)
else:
return None
@pytest.fixture()
def bucket_factory(request, bucket_class_factory, mcg_obj):
"""
Returns an MCG bucket factory.
If MCG object not found returns None
"""
if mcg_obj:
return bucket_factory_fixture(request, bucket_class_factory, mcg_obj)
else:
return None
@pytest.fixture(scope="session")
def bucket_factory_session(request, bucket_class_factory_session, mcg_obj_session):
"""
Returns a session-scoped MCG bucket factory.
If session-scoped MCG object not found returns None
"""
if mcg_obj_session:
return bucket_factory_fixture(
request, bucket_class_factory_session, mcg_obj_session
)
else:
return None
def bucket_factory_fixture(
request, bucket_class_factory=None, mcg_obj=None, rgw_obj=None
):
"""
Create a bucket factory. Calling this fixture creates a new bucket(s).
For a custom amount, provide the 'amount' parameter.
***Please note***
Creation of buckets by utilizing the S3 interface *does not* support bucketclasses.
Only OC/CLI buckets can support different bucketclasses.
By default, all S3 buckets utilize the default bucketclass.
Args:
bucket_class_factory: creates a new Bucket Class
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
rgw_obj (RGW): An RGW object
"""
created_buckets = []
def _create_buckets(
amount=1,
interface="S3",
verify_health=True,
bucketclass=None,
replication_policy=None,
*args,
**kwargs,
):
"""
Creates and deletes all buckets that were created as part of the test
Args:
amount (int): The amount of buckets to create
interface (str): The interface to use for creation of buckets.
S3 | OC | CLI | NAMESPACE
verify_Health (bool): Whether to verify the created bucket's health
post-creation
bucketclass (dict): A dictionary describing a new
bucketclass to be created.
When None, the default bucketclass is used.
Returns:
list: A list of s3.Bucket objects, containing all the created
buckets
"""
if bucketclass:
interface = bucketclass["interface"]
current_call_created_buckets = []
if interface.lower() not in BUCKET_MAP:
raise RuntimeError(
f"Invalid interface type received: {interface}. "
f'available types: {", ".join(BUCKET_MAP.keys())}'
)
bucketclass = (
bucketclass if bucketclass is None else bucket_class_factory(bucketclass)
)
for _ in range(amount):
bucket_name = helpers.create_unique_resource_name(
resource_description="bucket", resource_type=interface.lower()
)
created_bucket = BUCKET_MAP[interface.lower()](
bucket_name,
mcg=mcg_obj,
rgw=rgw_obj,
bucketclass=bucketclass,
replication_policy=replication_policy,
*args,
**kwargs,
)
current_call_created_buckets.append(created_bucket)
created_buckets.append(created_bucket)
if verify_health:
created_bucket.verify_health(**kwargs)
return current_call_created_buckets
def bucket_cleanup():
for bucket in created_buckets:
log.info(f"Cleaning up bucket {bucket.name}")
try:
bucket.delete()
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchBucket":
log.warning(f"{bucket.name} could not be found in cleanup")
else:
raise
request.addfinalizer(bucket_cleanup)
return _create_buckets
@pytest.fixture(scope="class")
def cloud_uls_factory(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="session")
def cloud_uls_factory_session(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="function")
def mcg_job_factory(request, bucket_factory, project_factory, mcg_obj, tmp_path):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request, bucket_factory, project_factory, mcg_obj, tmp_path
)
@pytest.fixture(scope="session")
def mcg_job_factory_session(
request, bucket_factory_session, project_factory_session, mcg_obj_session, tmp_path
):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request,
bucket_factory_session,
project_factory_session,
mcg_obj_session,
tmp_path,
)
@pytest.fixture()
def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If MCG object not found
"""
if mcg_obj:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
else:
return None
@pytest.fixture(scope="session")
def backingstore_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
else:
return None
@pytest.fixture()
def bucket_class_factory(
request, mcg_obj, backingstore_factory, namespace_store_factory
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If MCG object not found
"""
if mcg_obj:
return bucketclass_factory_implementation(
request, mcg_obj, backingstore_factory, namespace_store_factory
)
else:
return None
@pytest.fixture(scope="session")
def bucket_class_factory_session(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return bucketclass_factory_implementation(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
)
else:
return None
@pytest.fixture()
def multiregion_mirror_setup(bucket_factory):
return multiregion_mirror_setup_fixture(bucket_factory)
@pytest.fixture(scope="session")
def multiregion_mirror_setup_session(bucket_factory_session):
return multiregion_mirror_setup_fixture(bucket_factory_session)
def multiregion_mirror_setup_fixture(bucket_factory):
# Setup
# Todo:
# add region and amount parametrization - note that `us-east-1`
# will cause an error as it is the default region. If usage of `us-east-1`
# needs to be tested, keep the 'region' field out.
bucketclass = {
"interface": "CLI",
"backingstore_dict": {"aws": [(1, "us-west-1"), (1, "us-east-2")]},
"placement_policy": "Mirror",
}
# Create a NooBucket that'll use the bucket class in order to test
# the mirroring policy
bucket = bucket_factory(1, "OC", bucketclass=bucketclass)[0]
return bucket, bucket.bucketclass.backingstores
@pytest.fixture(scope="session")
def default_storageclasses(request, teardown_factory_session):
"""
Returns dictionary with storageclasses. Keys represent reclaim policy of
storageclass. There are two storageclasses for each key. First is RBD based
and the second one is CephFS based. Storageclasses with Retain Reclaim
Policy are created from default storageclasses.
"""
scs = {constants.RECLAIM_POLICY_DELETE: [], constants.RECLAIM_POLICY_RETAIN: []}
# TODO(fbalak): Use proper constants after
# https://github.com/red-hat-storage/ocs-ci/issues/1056
# is resolved
for sc_name in ("ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"):
sc = OCS(kind=constants.STORAGECLASS, metadata={"name": sc_name})
sc.reload()
scs[constants.RECLAIM_POLICY_DELETE].append(sc)
sc.data["reclaimPolicy"] = constants.RECLAIM_POLICY_RETAIN
sc.data["metadata"]["name"] += "-retain"
sc._name = sc.data["metadata"]["name"]
sc.create()
teardown_factory_session(sc)
scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
return scs
@pytest.fixture(scope="class")
def install_logging(request):
"""
Setup and teardown
* The setup will deploy openshift-logging in the cluster
* The teardown will uninstall cluster-logging from the cluster
"""
def finalizer():
uninstall_cluster_logging()
request.addfinalizer(finalizer)
csv = ocp.OCP(
kind=constants.CLUSTER_SERVICE_VERSION,
namespace=constants.OPENSHIFT_LOGGING_NAMESPACE,
)
logging_csv = csv.get().get("items")
if logging_csv:
log.info("Logging is already configured, Skipping Installation")
return
log.info("Configuring Openshift-logging")
# Checks OCP version
ocp_version = get_running_ocp_version()
logging_channel = "stable" if ocp_version >= "4.7" else ocp_version
# Creates namespace openshift-operators-redhat
ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
# Creates an operator-group for elasticsearch
assert ocp_logging_obj.create_elasticsearch_operator_group(
yaml_file=constants.EO_OG_YAML, resource_name="openshift-operators-redhat"
)
# Set RBAC policy on the project
assert ocp_logging_obj.set_rbac(
yaml_file=constants.EO_RBAC_YAML, resource_name="prometheus-k8s"
)
# Creates subscription for elastic-search operator
subscription_yaml = templating.load_yaml(constants.EO_SUB_YAML)
subscription_yaml["spec"]["channel"] = logging_channel
helpers.create_resource(**subscription_yaml)
assert ocp_logging_obj.get_elasticsearch_subscription()
# Creates a namespace openshift-logging
ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
# Creates an operator-group for cluster-logging
assert ocp_logging_obj.create_clusterlogging_operator_group(
yaml_file=constants.CL_OG_YAML
)
# Creates subscription for cluster-logging
cl_subscription = templating.load_yaml(constants.CL_SUB_YAML)
cl_subscription["spec"]["channel"] = logging_channel
helpers.create_resource(**cl_subscription)
assert ocp_logging_obj.get_clusterlogging_subscription()
# Creates instance in namespace openshift-logging
cluster_logging_operator = OCP(
kind=constants.POD, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
)
log.info(f"The cluster-logging-operator {cluster_logging_operator.get()}")
ocp_logging_obj.create_instance()
@pytest.fixture
def fio_pvc_dict():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture(scope="session")
def fio_pvc_dict_session():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture
def fio_configmap_dict():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture(scope="session")
def fio_configmap_dict_session():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture
def fio_job_dict():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="session")
def fio_job_dict_session():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="function")
def pgsql_factory_fixture(request):
"""
Pgsql factory fixture
"""
pgsql = Postgresql()
def factory(
replicas,
clients=None,
threads=None,
transactions=None,
scaling_factor=None,
timeout=None,
sc_name=None,
):
"""
Factory to start pgsql workload
Args:
replicas (int): Number of pgbench pods to be deployed
clients (int): Number of clients
threads (int): Number of threads
transactions (int): Number of transactions
scaling_factor (int): scaling factor
timeout (int): Time in seconds to wait
"""
# Setup postgres
pgsql.setup_postgresql(replicas=replicas, sc_name=sc_name)
# Create pgbench benchmark
pgsql.create_pgbench_benchmark(
replicas=replicas,
clients=clients,
threads=threads,
transactions=transactions,
scaling_factor=scaling_factor,
timeout=timeout,
)
# Wait for pg_bench pod to initialized and complete
pgsql.wait_for_pgbench_status(status=constants.STATUS_COMPLETED)
# Get pgbench pods
pgbench_pods = pgsql.get_pgbench_pods()
# Validate pgbench run and parse logs
pgsql.validate_pgbench_run(pgbench_pods)
return pgsql
def finalizer():
"""
Clean up
"""
pgsql.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def jenkins_factory_fixture(request):
"""
Jenkins factory fixture
"""
jenkins = Jenkins()
def factory(num_projects=1, num_of_builds=1):
"""
Factory to start jenkins workload
Args:
num_projects (int): Number of Jenkins projects
num_of_builds (int): Number of builds per project
"""
# Jenkins template
jenkins.create_ocs_jenkins_template()
# Init number of projects
jenkins.number_projects = num_projects
# Create app jenkins
jenkins.create_app_jenkins()
# Create jenkins pvc
jenkins.create_jenkins_pvc()
# Create jenkins build config
jenkins.create_jenkins_build_config()
# Wait jenkins deploy pod reach to completed state
jenkins.wait_for_jenkins_deploy_status(status=constants.STATUS_COMPLETED)
# Init number of builds per project
jenkins.number_builds_per_project = num_of_builds
# Start Builds
jenkins.start_build()
# Wait build reach 'Complete' state
jenkins.wait_for_build_to_complete()
# Print table of builds
jenkins.print_completed_builds_results()
return jenkins
def finalizer():
"""
Clean up
"""
jenkins.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def couchbase_new_factory_fixture(request):
"""
Couchbase factory fixture using Couchbase operator
"""
couchbase = CouchBase()
def factory(
replicas=3,
run_in_bg=False,
skip_analyze=True,
sc_name=None,
num_items=None,
num_threads=None,
):
"""
Factory to start couchbase workload
Args:
replicas (int): Number of couchbase workers to be deployed
run_in_bg (bool): Run IOs in background as option
skip_analyze (bool): Skip logs analysis as option
"""
# Create Couchbase subscription
couchbase.couchbase_subscription()
# Create Couchbase worker secrets
couchbase.create_cb_secrets()
# Create couchbase workers
couchbase.create_cb_cluster(replicas=3, sc_name=sc_name)
couchbase.create_data_buckets()
# Run couchbase workload
couchbase.run_workload(
replicas=replicas,
run_in_bg=run_in_bg,
num_items=num_items,
num_threads=num_threads,
)
# Run sanity check on data logs
couchbase.analyze_run(skip_analyze=skip_analyze)
return couchbase
def finalizer():
"""
Clean up
"""
couchbase.teardown()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def amq_factory_fixture(request):
"""
AMQ factory fixture
"""
amq = AMQ()
def factory(
sc_name,
kafka_namespace=constants.AMQ_NAMESPACE,
size=100,
replicas=3,
topic_name="my-topic",
user_name="my-user",
partitions=1,
topic_replicas=1,
num_of_producer_pods=1,
num_of_consumer_pods=1,
value="10000",
since_time=1800,
):
"""
Factory to start amq workload
Args:
sc_name (str): Name of storage clase
kafka_namespace (str): Namespace where kafka cluster to be created
size (int): Size of the storage
replicas (int): Number of kafka and zookeeper pods to be created
topic_name (str): Name of the topic to be created
user_name (str): Name of the user to be created
partitions (int): Number of partitions of topic
topic_replicas (int): Number of replicas of topic
num_of_producer_pods (int): Number of producer pods to be created
num_of_consumer_pods (int): Number of consumer pods to be created
value (str): Number of messages to be sent and received
since_time (int): Number of seconds to required to sent the msg
"""
# Setup kafka cluster
amq.setup_amq_cluster(
sc_name=sc_name, namespace=kafka_namespace, size=size, replicas=replicas
)
# Run open messages
amq.create_messaging_on_amq(
topic_name=topic_name,
user_name=user_name,
partitions=partitions,
replicas=topic_replicas,
num_of_producer_pods=num_of_producer_pods,
num_of_consumer_pods=num_of_consumer_pods,
value=value,
)
# Wait for some time to generate msg
waiting_time = 60
log.info(f"Waiting for {waiting_time}sec to generate msg")
time.sleep(waiting_time)
# Check messages are sent and received
threads = amq.run_in_bg(
namespace=kafka_namespace, value=value, since_time=since_time
)
return amq, threads
def finalizer():
"""
Clean up
"""
# Clean up
amq.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture
def measurement_dir(tmp_path):
"""
Returns directory path where should be stored all results related
to measurement. If 'measurement_dir' is provided by config then use it,
otherwise new directory is generated.
Returns:
str: Path to measurement directory
"""
if config.ENV_DATA.get("measurement_dir"):
measurement_dir = config.ENV_DATA.get("measurement_dir")
log.info(f"Using measurement dir from configuration: {measurement_dir}")
else:
measurement_dir = os.path.join(os.path.dirname(tmp_path), "measurement_results")
if not os.path.exists(measurement_dir):
log.info(f"Measurement dir {measurement_dir} doesn't exist. Creating it.")
os.mkdir(measurement_dir)
return measurement_dir
@pytest.fixture()
def multi_dc_pod(multi_pvc_factory, dc_pod_factory, service_account_factory):
"""
Prepare multiple dc pods for the test
Returns:
list: Pod instances
"""
def factory(
num_of_pvcs=1,
pvc_size=100,
project=None,
access_mode="RWO",
pool_type="rbd",
timeout=60,
):
dict_modes = {
"RWO": "ReadWriteOnce",
"RWX": "ReadWriteMany",
"RWX-BLK": "ReadWriteMany-Block",
}
dict_types = {"rbd": "CephBlockPool", "cephfs": "CephFileSystem"}
if access_mode in "RWX-BLK" and pool_type in "rbd":
modes = dict_modes["RWX-BLK"]
create_rbd_block_rwx_pod = True
else:
modes = dict_modes[access_mode]
create_rbd_block_rwx_pod = False
pvc_objs = multi_pvc_factory(
interface=dict_types[pool_type],
access_modes=[modes],
size=pvc_size,
num_of_pvc=num_of_pvcs,
project=project,
timeout=timeout,
)
dc_pods = []
dc_pods_res = []
sa_obj = service_account_factory(project=project)
with ThreadPoolExecutor() as p:
for pvc_obj in pvc_objs:
if create_rbd_block_rwx_pod:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=constants.CEPHBLOCKPOOL,
pvc=pvc_obj,
raw_block_pv=True,
sa_obj=sa_obj,
)
)
else:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=dict_types[pool_type],
pvc=pvc_obj,
sa_obj=sa_obj,
)
)
for dc in dc_pods_res:
pod_obj = dc.result()
if create_rbd_block_rwx_pod:
log.info(
"#### setting attribute pod_type since "
f"create_rbd_block_rwx_pod = {create_rbd_block_rwx_pod}"
)
setattr(pod_obj, "pod_type", "rbd_block_rwx")
else:
setattr(pod_obj, "pod_type", "")
dc_pods.append(pod_obj)
with ThreadPoolExecutor() as p:
for dc in dc_pods:
p.submit(
helpers.wait_for_resource_state,
resource=dc,
state=constants.STATUS_RUNNING,
timeout=120,
)
return dc_pods
return factory
@pytest.fixture(scope="session")
def htpasswd_path(tmpdir_factory):
"""
Returns:
string: Path to HTPasswd file with additional usernames
"""
return str(tmpdir_factory.mktemp("idp_data").join("users.htpasswd"))
@pytest.fixture(scope="session")
def htpasswd_identity_provider(request):
"""
Creates HTPasswd Identity provider.
Returns:
object: OCS object representing OCP OAuth object with HTPasswd IdP
"""
users.create_htpasswd_idp()
cluster = OCS(kind=constants.OAUTH, metadata={"name": "cluster"})
cluster.reload()
def finalizer():
"""
Remove HTPasswd IdP
"""
# TODO(fbalak): remove HTPasswd identityProvider
# cluster.ocp.patch(
# resource_name='cluster',
# params=f'[{ "op": "remove", "path": "/spec/identityProviders" }]'
# )
# users.delete_htpasswd_secret()
request.addfinalizer(finalizer)
return cluster
@pytest.fixture(scope="function")
def user_factory(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(scope="session")
def user_factory_session(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(autouse=True)
def log_alerts(request):
"""
Log alerts at the beginning and end of each test case. At the end of test
case print a difference: what new alerts are in place after the test is
complete.
"""
teardown = config.RUN["cli_params"].get("teardown")
dev_mode = config.RUN["cli_params"].get("dev_mode")
if teardown:
return
elif dev_mode:
log.info("Skipping alert check for development mode")
return
alerts_before = []
prometheus = None
try:
prometheus = PrometheusAPI()
except Exception:
log.exception("There was a problem with connecting to Prometheus")
def _collect_alerts():
try:
alerts_response = prometheus.get(
"alerts", payload={"silenced": False, "inhibited": False}
)
if alerts_response.ok:
alerts = alerts_response.json().get("data").get("alerts")
log.debug(f"Found alerts: {alerts}")
return alerts
else:
log.warning(
f"There was a problem with collecting alerts for analysis: {alerts_response.text}"
)
return False
except Exception:
log.exception("There was a problem with collecting alerts for analysis")
return False
def _print_diff():
if alerts_before:
alerts_after = _collect_alerts()
if alerts_after:
alerts_new = [
alert for alert in alerts_after if alert not in alerts_before
]
if alerts_new:
log.warning("During test were raised new alerts")
log.warning(alerts_new)
alerts_before = _collect_alerts()
request.addfinalizer(_print_diff)
@pytest.fixture(scope="session", autouse=True)
def ceph_toolbox(request):
"""
This fixture initiates ceph toolbox pod for manually created deployment
and if it does not already exist.
"""
deploy = config.RUN["cli_params"]["deploy"]
teardown = config.RUN["cli_params"].get("teardown")
skip_ocs = config.ENV_DATA["skip_ocs_deployment"]
deploy_teardown = deploy or teardown
managed_platform = (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM
)
if not (deploy_teardown or skip_ocs) or (managed_platform and not deploy_teardown):
try:
# Creating toolbox pod
setup_ceph_toolbox()
except CommandFailed:
log.info("Failed to create toolbox")
@pytest.fixture(scope="function")
def node_drain_teardown(request):
"""
Tear down function after Node drain
"""
def finalizer():
"""
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by marking them as schedulable
"""
scheduling_disabled_nodes = [
n.name
for n in get_node_objs()
if n.ocp.get_resource_status(n.name)
== constants.NODE_READY_SCHEDULING_DISABLED
]
if scheduling_disabled_nodes:
schedule_nodes(scheduling_disabled_nodes)
ceph_health_check(tries=60)
request.addfinalizer(finalizer)
@pytest.fixture(scope="function")
def node_restart_teardown(request, nodes):
"""
Make sure all nodes are up and in 'Ready' state and if not,
try to make them 'Ready' by restarting the nodes.
"""
def finalizer():
# Start the powered off nodes
nodes.restart_nodes_by_stop_and_start_teardown()
try:
node.wait_for_nodes_status(status=constants.NODE_READY)
except ResourceWrongStatusException:
# Restart the nodes if in NotReady state
not_ready_nodes = [
n
for n in node.get_node_objs()
if n.ocp.get_resource_status(n.name) == constants.NODE_NOT_READY
]
if not_ready_nodes:
log.info(
f"Nodes in NotReady status found: {[n.name for n in not_ready_nodes]}"
)
nodes.restart_nodes_by_stop_and_start(not_ready_nodes)
node.wait_for_nodes_status(status=constants.NODE_READY)
request.addfinalizer(finalizer)
@pytest.fixture()
def mcg_connection_factory(request, mcg_obj, cld_mgr):
"""
Create a new MCG connection for given platform. If there already exists
a connection for the platform then return this previously created
connection.
"""
created_connections = {}
def _create_connection(platform=constants.AWS_PLATFORM, name=None):
"""
Args:
platform (str): Platform used for connection
name (str): New connection name. If not provided then new name will
be generated. New name will be used only if there is not
existing connection for given platform
Returns:
str: connection name
"""
if platform not in created_connections:
connection_name = name or create_unique_resource_name(
constants.MCG_CONNECTION, platform
)
mcg_obj.create_connection(cld_mgr, platform, connection_name)
created_connections[platform] = connection_name
return created_connections[platform]
def _connections_cleanup():
for platform in created_connections:
mcg_obj.delete_ns_connection(created_connections[platform])
request.addfinalizer(_connections_cleanup)
return _create_connection
@pytest.fixture()
def ns_resource_factory(
request, mcg_obj, cld_mgr, cloud_uls_factory, mcg_connection_factory
):
"""
Create a namespace resource factory. Calling this fixture creates a new namespace resource.
"""
created_ns_resources = []
def _create_ns_resources(platform=constants.AWS_PLATFORM):
# Create random connection_name
rand_connection = mcg_connection_factory(platform)
# Create the actual namespace resource
rand_ns_resource = create_unique_resource_name(
constants.MCG_NS_RESOURCE, platform
)
if platform == constants.RGW_PLATFORM:
region = None
else:
# TODO: fix this when https://github.com/red-hat-storage/ocs-ci/issues/3338
# is resolved
region = "us-east-2"
target_bucket_name = mcg_obj.create_namespace_resource(
rand_ns_resource,
rand_connection,
region,
cld_mgr,
cloud_uls_factory,
platform,
)
log.info(f"Check validity of NS resource {rand_ns_resource}")
if platform == constants.AWS_PLATFORM:
endpoint = constants.MCG_NS_AWS_ENDPOINT
elif platform == constants.AZURE_PLATFORM:
endpoint = constants.MCG_NS_AZURE_ENDPOINT
elif platform == constants.RGW_PLATFORM:
rgw_conn = RGW()
endpoint, _, _ = rgw_conn.get_credentials()
else:
raise UnsupportedPlatformError(f"Unsupported Platform: {platform}")
mcg_obj.check_ns_resource_validity(
rand_ns_resource, target_bucket_name, endpoint
)
created_ns_resources.append(rand_ns_resource)
return target_bucket_name, rand_ns_resource
def ns_resources_cleanup():
for ns_resource in created_ns_resources:
mcg_obj.delete_ns_resource(ns_resource)
request.addfinalizer(ns_resources_cleanup)
return _create_ns_resources
@pytest.fixture()
def namespace_store_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
@pytest.fixture(scope="session")
def namespace_store_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
@pytest.fixture()
def snapshot_factory(request):
"""
Snapshot factory. Calling this fixture creates a volume snapshot from the
specified PVC
"""
instances = []
def factory(pvc_obj, wait=True, snapshot_name=None):
"""
Args:
pvc_obj (PVC): PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name (str): Name to be provided for snapshot
Returns:
OCS: OCS instance of kind VolumeSnapshot
"""
snap_obj = pvc_obj.create_snapshot(snapshot_name=snapshot_name, wait=wait)
instances.append(snap_obj)
return snap_obj
def finalizer():
"""
Delete the snapshots
"""
snapcontent_objs = []
# Get VolumeSnapshotContent form VolumeSnapshots and delete
# VolumeSnapshots
for instance in instances:
if not instance.is_deleted:
snapcontent_objs.append(
helpers.get_snapshot_content_obj(snap_obj=instance)
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for VolumeSnapshotContents to be deleted
for snapcontent_obj in snapcontent_objs:
snapcontent_obj.ocp.wait_for_delete(
resource_name=snapcontent_obj.name, timeout=240
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_factory(snapshot_factory):
"""
Snapshot factory. Calling this fixture creates volume snapshots of each
PVC in the provided list
"""
def factory(pvc_obj, wait=True, snapshot_name_suffix=None):
"""
Args:
pvc_obj (list): List PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name_suffix (str): Suffix to be added to snapshot
Returns:
OCS: List of OCS instances of kind VolumeSnapshot
"""
snapshot = []
for obj in pvc_obj:
log.info(f"Creating snapshot of PVC {obj.name}")
snapshot_name = (
f"{obj.name}-{snapshot_name_suffix}" if snapshot_name_suffix else None
)
snap_obj = snapshot_factory(
pvc_obj=obj, snapshot_name=snapshot_name, wait=wait
)
snapshot.append(snap_obj)
return snapshot
return factory
@pytest.fixture()
def snapshot_restore_factory(request):
"""
Snapshot restore factory. Calling this fixture creates new PVC out of the
specified VolumeSnapshot.
"""
instances = []
def factory(
snapshot_obj,
restore_pvc_name=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
):
"""
Args:
snapshot_obj (OCS): OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_name (str): Name to be provided for restored pvc
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
Returns:
PVC: Restored PVC object
"""
snapshot_info = snapshot_obj.get()
size = size or snapshot_info["status"]["restoreSize"]
restore_pvc_name = restore_pvc_name or (
helpers.create_unique_resource_name(snapshot_obj.name, "restore")
)
if snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHBLOCKPOOL).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHBLOCKPOOL).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_RBD_PVC_RESTORE_YAML
interface = constants.CEPHBLOCKPOOL
elif snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHFILESYSTEM).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHFILESYSTEM).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_CEPHFS_PVC_RESTORE_YAML
interface = constants.CEPHFILESYSTEM
restored_pvc = create_restore_pvc(
sc_name=storageclass,
snap_name=snapshot_obj.name,
namespace=snapshot_obj.namespace,
size=size,
pvc_name=restore_pvc_name,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
)
instances.append(restored_pvc)
restored_pvc.snapshot = snapshot_obj
restored_pvc.interface = interface
if status:
helpers.wait_for_resource_state(restored_pvc, status)
return restored_pvc
def finalizer():
"""
Delete the PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_restore_factory(snapshot_restore_factory):
"""
Snapshot restore factory. Calling this fixture creates set of new PVC out of the
each VolumeSnapshot provided in the list.
"""
def factory(
snapshot_obj,
restore_pvc_suffix=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
wait_each=False,
):
"""
Args:
snapshot_obj (list): List OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_suffix (str): Suffix to be added to pvc name
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List of restored PVC object
"""
new_pvcs = []
status_tmp = status if wait_each else ""
for snap_obj in snapshot_obj:
log.info(f"Creating a PVC from snapshot {snap_obj.name}")
restore_pvc_name = (
f"{snap_obj.name}-{restore_pvc_suffix}" if restore_pvc_suffix else None
)
restored_pvc = snapshot_restore_factory(
snapshot_obj=snap_obj,
restore_pvc_name=restore_pvc_name,
storageclass=storageclass,
size=size,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
status=status_tmp,
)
restored_pvc.snapshot = snapshot_obj
new_pvcs.append(restored_pvc)
if status and not wait_each:
for restored_pvc in new_pvcs:
helpers.wait_for_resource_state(restored_pvc, status)
return new_pvcs
return factory
@pytest.fixture(scope="session", autouse=True)
def collect_logs_fixture(request):
"""
This fixture collects ocs logs after tier execution and this will allow
to see the cluster's status after the execution on all execution status options.
"""
def finalizer():
"""
Tracking both logs separately reduce changes of collision
"""
if not config.RUN["cli_params"].get("deploy") and not config.RUN[
"cli_params"
].get("teardown"):
if config.REPORTING["collect_logs_on_success_run"]:
collect_ocs_logs("testcases", ocs=False, status_failure=False)
collect_ocs_logs("testcases", ocp=False, status_failure=False)
request.addfinalizer(finalizer)
def get_ready_noobaa_endpoint_count(namespace):
"""
Get the number of ready nooobaa endpoints
"""
pods_info = get_pods_having_label(
label=constants.NOOBAA_ENDPOINT_POD_LABEL, namespace=namespace
)
ready_count = 0
for ep_info in pods_info:
container_statuses = ep_info.get("status", {}).get("containerStatuses")
if container_statuses is not None and len(container_statuses) > 0:
if container_statuses[0].get("ready"):
ready_count += 1
return ready_count
@pytest.fixture(scope="function")
def nb_ensure_endpoint_count(request):
"""
Validate and ensure the number of running noobaa endpoints
"""
cls = request.cls
min_ep_count = cls.MIN_ENDPOINT_COUNT
max_ep_count = cls.MAX_ENDPOINT_COUNT
assert min_ep_count <= max_ep_count
namespace = defaults.ROOK_CLUSTER_NAMESPACE
should_wait = False
# prior to 4.6 we configured the ep count directly on the noobaa cr.
if version.get_semantic_ocs_version_from_config() < version.VERSION_4_6:
noobaa = OCP(kind="noobaa", namespace=namespace)
resource = noobaa.get()["items"][0]
endpoints = resource.get("spec", {}).get("endpoints", {})
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
else:
storage_cluster = OCP(kind=constants.STORAGECLUSTER, namespace=namespace)
resource = storage_cluster.get()["items"][0]
resource_name = resource["metadata"]["name"]
endpoints = (
resource.get("spec", {}).get("multiCloudGateway", {}).get("endpoints", {})
)
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if should_wait:
# Wait for the NooBaa endpoint pods to stabilize
try:
for ready_nb_ep_count in TimeoutSampler(
300, 30, get_ready_noobaa_endpoint_count, namespace
):
if min_ep_count <= ready_nb_ep_count <= max_ep_count:
log.info(
f"NooBaa endpoints stabilized. Ready endpoints: {ready_nb_ep_count}"
)
break
log.info(
f"Waiting for the NooBaa endpoints to stabilize. "
f"Current ready count: {ready_nb_ep_count}"
)
except TimeoutExpiredError:
raise TimeoutExpiredError(
"NooBaa endpoints did not stabilize in time.\n"
f"Min count: {min_ep_count}, max count: {max_ep_count}, ready count: {ready_nb_ep_count}"
)
@pytest.fixture()
def pvc_clone_factory(request):
"""
Calling this fixture creates a clone from the specified PVC
"""
instances = []
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
):
"""
Args:
pvc_obj (PVC): PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
Returns:
PVC: PVC instance
"""
assert (
pvc_obj.provisioner in constants.OCS_PROVISIONERS
), f"Unknown provisioner in PVC {pvc_obj.name}"
if pvc_obj.provisioner == "openshift-storage.rbd.csi.ceph.com":
clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
interface = constants.CEPHBLOCKPOOL
elif pvc_obj.provisioner == "openshift-storage.cephfs.csi.ceph.com":
clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
interface = constants.CEPHFILESYSTEM
size = size or pvc_obj.get().get("spec").get("resources").get("requests").get(
"storage"
)
storageclass = storageclass or pvc_obj.backed_sc
access_mode = access_mode or pvc_obj.get_pvc_access_mode
volume_mode = volume_mode or getattr(pvc_obj, "volume_mode", None)
# Create clone
clone_pvc_obj = pvc.create_pvc_clone(
sc_name=storageclass,
parent_pvc=pvc_obj.name,
clone_yaml=clone_yaml,
pvc_name=clone_name,
namespace=pvc_obj.namespace,
storage_size=size,
access_mode=access_mode,
volume_mode=volume_mode,
)
instances.append(clone_pvc_obj)
clone_pvc_obj.parent = pvc_obj
clone_pvc_obj.volume_mode = volume_mode
clone_pvc_obj.interface = interface
if status:
helpers.wait_for_resource_state(clone_pvc_obj, status)
return clone_pvc_obj
def finalizer():
"""
Delete the cloned PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def reportportal_customization(request):
if config.REPORTING.get("rp_launch_url"):
request.config._metadata["RP Launch URL:"] = config.REPORTING["rp_launch_url"]
@pytest.fixture()
def multi_pvc_clone_factory(pvc_clone_factory):
"""
Calling this fixture creates clone from each PVC in the provided list of PVCs
"""
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
wait_each=False,
):
"""
Args:
pvc_obj (list): List PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List PVC instance
"""
cloned_pvcs = []
status_tmp = status if wait_each else ""
for obj in pvc_obj:
# Create clone
clone_pvc_obj = pvc_clone_factory(
pvc_obj=obj,
clone_name=clone_name,
storageclass=storageclass,
size=size,
access_mode=access_mode,
volume_mode=volume_mode,
status=status_tmp,
)
cloned_pvcs.append(clone_pvc_obj)
if status and not wait_each:
for cloned_pvc in cloned_pvcs:
helpers.wait_for_resource_state(cloned_pvc, status)
return cloned_pvcs
return factory
@pytest.fixture(scope="function")
def multiple_snapshot_and_clone_of_postgres_pvc_factory(
request,
multi_snapshot_factory,
multi_snapshot_restore_factory,
multi_pvc_clone_factory,
):
"""
Calling this fixture creates multiple snapshots & clone of postgres PVC
"""
instances = []
def factory(pvc_size_new, pgsql):
"""
Args:
pvc_size_new (int): Resize/Expand the pvc size
pgsql (obj): Pgsql obj
Returns:
Postgres pod: Pod instances
"""
# Get postgres pvc list obj
postgres_pvcs_obj = pgsql.get_postgres_pvc()
snapshots = multi_snapshot_factory(pvc_obj=postgres_pvcs_obj)
log.info("Created snapshots from all the PVCs and snapshots are in Ready state")
restored_pvc_objs = multi_snapshot_restore_factory(snapshot_obj=snapshots)
log.info("Created new PVCs from all the snapshots")
cloned_pvcs = multi_pvc_clone_factory(
pvc_obj=restored_pvc_objs, volume_mode=constants.VOLUME_MODE_FILESYSTEM
)
log.info("Created new PVCs from all restored volumes")
# Attach a new pgsql pod cloned pvcs
sset_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=cloned_pvcs, postgres_name="postgres-clone", run_benchmark=False
)
instances.extend(sset_list)
# Resize cloned PVCs
for pvc_obj in cloned_pvcs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
new_snapshots = multi_snapshot_factory(pvc_obj=cloned_pvcs)
log.info(
"Created snapshots from all the cloned PVCs"
" and snapshots are in Ready state"
)
new_restored_pvc_objs = multi_snapshot_restore_factory(
snapshot_obj=new_snapshots
)
log.info("Created new PVCs from all the snapshots and in Bound state")
# Attach a new pgsql pod restored pvcs
pgsql_obj_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=new_restored_pvc_objs,
postgres_name="postgres-clone-restore",
run_benchmark=False,
)
instances.extend(pgsql_obj_list)
# Resize restored PVCs
for pvc_obj in new_restored_pvc_objs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
return instances
def finalizer():
"""
Delete the list of pod objects created
"""
for instance in instances:
if not instance.is_deleted:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def es(request):
"""
Create In-cluster elastic-search deployment for benchmark-operator tests.
using the name es - as shortcut for elastic-search for simplicity
"""
def teardown():
es.cleanup()
request.addfinalizer(teardown)
es = ElasticSearch()
return es
@pytest.fixture(scope="session")
def setup_ui_session(request):
return setup_ui_fixture(request)
@pytest.fixture(scope="class")
def setup_ui_class(request):
return setup_ui_fixture(request)
@pytest.fixture(scope="function")
def setup_ui(request):
return setup_ui_fixture(request)
def setup_ui_fixture(request):
driver = login_ui()
def finalizer():
close_browser(driver)
request.addfinalizer(finalizer)
return driver
@pytest.fixture(scope="session", autouse=True)
def load_cluster_info_file(request):
"""
This fixture tries to load cluster_info.json file if exists (on cluster
installed via Flexy) and apply the information to the config object (for
example related to disconnected cluster)
"""
load_cluster_info()
@pytest.fixture(scope="function")
def pv_encryption_kms_setup_factory(request):
"""
Create vault resources and setup csi-kms-connection-details configMap
"""
vault = KMS.Vault()
def factory(kv_version):
"""
Args:
kv_version(str): KV version to be used, either v1 or v2
Returns:
object: Vault(KMS) object
"""
vault.gather_init_vault_conf()
vault.update_vault_env_vars()
# Check if cert secrets already exist, if not create cert resources
ocp_obj = OCP(kind="secret", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
try:
ocp_obj.get_resource(resource_name="ocs-kms-ca-secret", column="NAME")
except CommandFailed as cfe:
if "not found" not in str(cfe):
raise
else:
vault.create_ocs_vault_cert_resources()
# Create vault namespace, backend path and policy in vault
vault_resource_name = create_unique_resource_name("test", "vault")
vault.vault_create_namespace(namespace=vault_resource_name)
vault.vault_create_backend_path(
backend_path=vault_resource_name, kv_version=kv_version
)
vault.vault_create_policy(policy_name=vault_resource_name)
# If csi-kms-connection-details exists, edit the configmap to add new vault config
ocp_obj = OCP(kind="configmap", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
try:
ocp_obj.get_resource(
resource_name="csi-kms-connection-details", column="NAME"
)
new_kmsid = vault_resource_name
vdict = defaults.VAULT_CSI_CONNECTION_CONF
for key in vdict.keys():
old_key = key
vdict[new_kmsid] = vdict.pop(old_key)
vdict[new_kmsid]["VAULT_BACKEND_PATH"] = vault_resource_name
vdict[new_kmsid]["VAULT_NAMESPACE"] = vault_resource_name
vault.kmsid = vault_resource_name
if kv_version == "v1":
vdict[new_kmsid]["VAULT_BACKEND"] = "kv"
else:
vdict[new_kmsid]["VAULT_BACKEND"] = "kv-v2"
KMS.update_csi_kms_vault_connection_details(vdict)
except CommandFailed as cfe:
if "not found" not in str(cfe):
raise
else:
vault.kmsid = "1-vault"
vault.create_vault_csi_kms_connection_details(kv_version=kv_version)
return vault
def finalizer():
"""
Remove the vault config from csi-kms-connection-details configMap
"""
if len(KMS.get_encryption_kmsid()) > 1:
KMS.remove_kmsid(vault.kmsid)
# Delete the resources in vault
vault.remove_vault_backend_path()
vault.remove_vault_policy()
vault.remove_vault_namespace()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def cephblockpool_factory_ui_class(request, setup_ui_class):
return cephblockpool_factory_ui_fixture(request, setup_ui_class)
@pytest.fixture(scope="session")
def cephblockpool_factory_ui_session(request, setup_ui_session):
return cephblockpool_factory_ui_fixture(request, setup_ui_session)
@pytest.fixture(scope="function")
def cephblockpool_factory_ui(request, setup_ui):
return cephblockpool_factory_ui_fixture(request, setup_ui)
def cephblockpool_factory_ui_fixture(request, setup_ui):
"""
This funcion create new cephblockpool
"""
instances = []
def factory(
replica=3,
compression=False,
):
"""
Args:
replica (int): size of pool 2,3 supported for now
compression (bool): True to enable compression otherwise False
Return:
(ocs_ci.ocs.resource.ocs) ocs object of the CephBlockPool.
"""
blockpool_ui_object = BlockPoolUI(setup_ui)
pool_name, pool_status = blockpool_ui_object.create_pool(
replica=replica, compression=compression
)
if pool_status:
log.info(
f"Pool {pool_name} with replica {replica} and compression {compression} was created and "
f"is in ready state"
)
ocs_blockpool_obj = create_ocs_object_from_kind_and_name(
kind=constants.CEPHBLOCKPOOL,
resource_name=pool_name,
)
instances.append(ocs_blockpool_obj)
return ocs_blockpool_obj
else:
blockpool_ui_object.take_screenshot()
if pool_name:
instances.append(
create_ocs_object_from_kind_and_name(
kind=constants.CEPHBLOCKPOOL, resource_name=pool_name
)
)
raise PoolDidNotReachReadyState(
f"Pool {pool_name} with replica {replica} and compression {compression}"
f" did not reach ready state"
)
def finalizer():
"""
Delete the cephblockpool from ui and if fails from cli
"""
for instance in instances:
try:
instance.get()
except CommandFailed:
log.warning("Pool is already deleted")
continue
blockpool_ui_obj = BlockPoolUI(setup_ui)
if not blockpool_ui_obj.delete_pool(instance.name):
instance.delete()
raise PoolNotDeletedFromUI(
f"Could not delete block pool {instances.name} from UI."
f" Deleted from CLI"
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_ui_class(
request, cephblockpool_factory_ui_class, setup_ui_class
):
return storageclass_factory_ui_fixture(
request, cephblockpool_factory_ui_class, setup_ui_class
)
@pytest.fixture(scope="session")
def storageclass_factory_ui_session(
request, cephblockpool_factory_ui_session, setup_ui_session
):
return storageclass_factory_ui_fixture(
request, cephblockpool_factory_ui_session, setup_ui_session
)
@pytest.fixture(scope="function")
def storageclass_factory_ui(request, cephblockpool_factory_ui, setup_ui):
return storageclass_factory_ui_fixture(request, cephblockpool_factory_ui, setup_ui)
def storageclass_factory_ui_fixture(request, cephblockpool_factory_ui, setup_ui):
"""
The function create new storageclass
"""
instances = []
def factory(
provisioner=constants.OCS_PROVISIONERS[0],
compression=False,
replica=3,
create_new_pool=False,
encryption=False, # not implemented yet
reclaim_policy=constants.RECLAIM_POLICY_DELETE, # not implemented yet
default_pool=constants.DEFAULT_BLOCKPOOL,
existing_pool=None,
):
"""
Args:
provisioner (str): The name of the provisioner. Default is openshift-storage.rbd.csi.ceph.com
compression (bool): if create_new_pool is True, compression will be set if True.
replica (int): if create_new_pool is True, replica will be set.
create_new_pool (bool): True to create new pool with factory.
encryption (bool): enable PV encryption if True.
reclaim_policy (str): Reclaim policy for the storageclass.
existing_pool(str): Use pool name for storageclass.
Return:
(ocs_ci.ocs.resource.ocs) ocs object of the storageclass.
"""
storageclass_ui_object = StorageClassUI(setup_ui)
if existing_pool is None and create_new_pool is False:
pool_name = default_pool
if create_new_pool is True:
pool_ocs_obj = cephblockpool_factory_ui(
replica=replica, compression=compression
)
pool_name = pool_ocs_obj.name
if existing_pool is not None:
pool_name = existing_pool
sc_name = storageclass_ui_object.create_storageclass(pool_name)
if sc_name is None:
log.error("Storageclass was not created")
raise StorageclassNotCreated(
"Storageclass is not found in storageclass list page"
)
else:
log.info(f"Storageclass created with name {sc_name}")
sc_obj = create_ocs_object_from_kind_and_name(
resource_name=sc_name, kind=constants.STORAGECLASS
)
instances.append(sc_obj)
log.info(f"{sc_obj.get()}")
return sc_obj
def finalizer():
for instance in instances:
try:
instance.get()
except CommandFailed:
log.warning("Storageclass is already deleted")
continue
storageclass_ui_obj = StorageClassUI(setup_ui)
if not storageclass_ui_obj.delete_rbd_storage_class(instance.name):
instance.delete()
raise StorageClassNotDeletedFromUI(
f"Could not delete storageclass {instances.name} from UI."
f"Deleted from CLI"
)
request.addfinalizer(finalizer)
return factory
| []
| []
| [
"RELEASE_IMG",
"OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"
]
| [] | ["RELEASE_IMG", "OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"] | python | 2 | 0 | |
ourrevolution/wsgi.py | """
WSGI config for ourrevolution project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ourrevolution.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
bench-scripts/bench_histogram_nominal.py | #!/usr/bin/env python
from common import *
configure_network()
subprocess.run(["cmake", "."])
subprocess.run(["make"])
print("running the bench_histogram_nominal")
num_records = 100000
print("running the case for " + str(num_records) + " entries")
time.sleep(5)
for i in range(5):
num_groups = 10 + 10 * i
write_configure_info(str(num_records) + " " + str(num_groups))
subprocess.run(["bin/bench_histogram_nominal", os.getenv("EMP_MY_PARTY_ID"), "5000"])
copy_benchmark_result_to_log("bench_histogram_nominal " + str(num_records) + " " + str(num_groups))
| []
| []
| [
"EMP_MY_PARTY_ID"
]
| [] | ["EMP_MY_PARTY_ID"] | python | 1 | 0 | |
astropy/config/paths.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" This module contains functions to determine where configuration and
data/cache files used by Astropy should be placed.
"""
from functools import wraps
import os
import shutil
import sys
__all__ = ['get_config_dir', 'get_cache_dir', 'set_temp_config',
'set_temp_cache']
def _find_home():
""" Locates and return the home directory (or best approximation) on this
system.
Raises
------
OSError
If the home directory cannot be located - usually means you are running
Astropy on some obscure platform that doesn't have standard home
directories.
"""
# First find the home directory - this is inspired by the scheme ipython
# uses to identify "home"
if os.name == 'posix':
# Linux, Unix, AIX, OS X
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find unix home directory to search for '
'astropy config dir')
elif os.name == 'nt': # This is for all modern Windows (NT or after)
if 'MSYSTEM' in os.environ and os.environ.get('HOME'):
# Likely using an msys shell; use whatever it is using for its
# $HOME directory
homedir = os.environ['HOME']
# Next try for a network home
elif 'HOMESHARE' in os.environ:
homedir = os.environ['HOMESHARE']
# See if there's a local home
elif 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ:
homedir = os.path.join(os.environ['HOMEDRIVE'],
os.environ['HOMEPATH'])
# Maybe a user profile?
elif 'USERPROFILE' in os.environ:
homedir = os.path.join(os.environ['USERPROFILE'])
else:
try:
import winreg as wreg
shell_folders = r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders)
homedir = wreg.QueryValueEx(key, 'Personal')[0]
key.Close()
except Exception:
# As a final possible resort, see if HOME is present
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find windows home directory to '
'search for astropy config dir')
else:
# for other platforms, try HOME, although it probably isn't there
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find a home directory to search for '
'astropy config dir - are you on an unsupported '
'platform?')
return homedir
def get_config_dir(rootname='astropy'):
"""
Determines the package configuration directory name and creates the
directory if it doesn't exist.
This directory is typically ``$HOME/.astropy/config``, but if the
XDG_CONFIG_HOME environment variable is set and the
``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Parameters
----------
rootname : str
Name of the root configuration directory. For example, if ``rootname =
'pkgname'``, the configuration directory would be ``<home>/.pkgname/``
rather than ``<home>/.astropy`` (depending on platform).
Returns
-------
configdir : str
The absolute path to the configuration directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_config, that overrides all
if set_temp_config._temp_path is not None:
xch = set_temp_config._temp_path
config_path = os.path.join(xch, rootname)
if not os.path.exists(config_path):
os.mkdir(config_path)
return os.path.abspath(config_path)
# first look for XDG_CONFIG_HOME
xch = os.environ.get('XDG_CONFIG_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, rootname)
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_root_dir('config', linkto, rootname))
def get_cache_dir(rootname="astropy"):
"""
Determines the Astropy cache directory name and creates the directory if it
doesn't exist.
This directory is typically ``$HOME/.astropy/cache``, but if the
XDG_CACHE_HOME environment variable is set and the
``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Parameters
----------
rootname : str
Name of the root cache directory. For example, if
``rootname = 'pkgname'``, the cache directory will be
``<cache>/.pkgname/``.
Returns
-------
cachedir : str
The absolute path to the cache directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_cache, that overrides all
if set_temp_cache._temp_path is not None:
xch = set_temp_cache._temp_path
cache_path = os.path.join(xch, rootname)
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return os.path.abspath(cache_path)
# first look for XDG_CACHE_HOME
xch = os.environ.get('XDG_CACHE_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, rootname)
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_root_dir('cache', linkto, rootname))
class _SetTempPath:
_temp_path = None
_default_path_getter = None
def __init__(self, path=None, delete=False):
if path is not None:
path = os.path.abspath(path)
self._path = path
self._delete = delete
self._prev_path = self.__class__._temp_path
def __enter__(self):
self.__class__._temp_path = self._path
try:
return self._default_path_getter('astropy')
except Exception:
self.__class__._temp_path = self._prev_path
raise
def __exit__(self, *args):
self.__class__._temp_path = self._prev_path
if self._delete and self._path is not None:
shutil.rmtree(self._path)
def __call__(self, func):
"""Implements use as a decorator."""
@wraps(func)
def wrapper(*args, **kwargs):
with self:
func(*args, **kwargs)
return wrapper
class set_temp_config(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy config, primarily
for use with testing.
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the config path
just within that function.
Parameters
----------
path : str, optional
The directory (which must exist) in which to find the Astropy config
files, or create them if they do not already exist. If None, this
restores the config path to the user's default config path as returned
by `get_config_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_config_dir)
def __enter__(self):
# Special case for the config case, where we need to reset all the
# cached config objects
from .configuration import _cfgobjs
path = super().__enter__()
_cfgobjs.clear()
return path
def __exit__(self, *args):
from .configuration import _cfgobjs
super().__exit__(*args)
_cfgobjs.clear()
class set_temp_cache(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy download cache,
primarily for use with testing (though there may be other applications
for setting a different cache directory, for example to switch to a cache
dedicated to large files).
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the cache path
just within that function.
Parameters
----------
path : str
The directory (which must exist) in which to find the Astropy cache
files, or create them if they do not already exist. If None, this
restores the cache path to the user's default cache path as returned
by `get_cache_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_cache_dir)
def _find_or_create_root_dir(dirnm, linkto, pkgname='astropy'):
innerdir = os.path.join(_find_home(), '.{}'.format(pkgname))
maindir = os.path.join(_find_home(), '.{}'.format(pkgname), dirnm)
if not os.path.exists(maindir):
# first create .astropy dir if needed
if not os.path.exists(innerdir):
try:
os.mkdir(innerdir)
except OSError:
if not os.path.isdir(innerdir):
raise
elif not os.path.isdir(innerdir):
msg = 'Intended {0} {1} directory {1} is actually a file.'
raise OSError(msg.format(pkgname, dirnm, maindir))
try:
os.mkdir(maindir)
except OSError:
if not os.path.isdir(maindir):
raise
if (not sys.platform.startswith('win') and
linkto is not None and
not os.path.exists(linkto)):
os.symlink(maindir, linkto)
elif not os.path.isdir(maindir):
msg = 'Intended {0} {1} directory {1} is actually a file.'
raise OSError(msg.format(pkgname, dirnm, maindir))
return os.path.abspath(maindir)
| []
| []
| [
"HOMEPATH",
"HOMEDRIVE",
"XDG_CACHE_HOME",
"HOMESHARE",
"USERPROFILE",
"HOME",
"XDG_CONFIG_HOME"
]
| [] | ["HOMEPATH", "HOMEDRIVE", "XDG_CACHE_HOME", "HOMESHARE", "USERPROFILE", "HOME", "XDG_CONFIG_HOME"] | python | 7 | 0 | |
main.go | package main
import (
"encoding/json"
"io"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
"golang.org/x/crypto/ssh"
)
var (
uri = os.Getenv("SKA_KEY_URI")
groups = os.Getenv("SKA_GROUPS")
// interval in seconds
interval = os.Getenv("SKA_INTERVAL")
// authorized_keys file location
akfLoc = os.Getenv("SKA_AKF_LOC")
)
type keyMap struct {
LastUpdated string `json:"last_updated"`
Groups []group `json:"groups"`
}
type group struct {
Name string `json:"name"`
Keys []string `json:"keys"`
}
func validate() {
if uri == "" {
log.Fatal("Need to set value for SKA_KEY_URI")
}
if groups == "" {
log.Fatal("Need to set value for SKA_GROUPS")
}
if interval == "" {
log.Fatal("Need to set value for SKA_INTERVAL")
} else {
_, err := strconv.Atoi(interval)
if err != nil {
log.Fatal("SKA_INTERVAL must be an int")
}
}
if akfLoc == "" {
log.Fatal("Need to set value for SKA_AKF_LOC")
}
}
func keysFromMap(keyMap *keyMap) (keys []string) {
var rawKeys []string
groupNames := strings.Split(groups, ",")
for _, gn := range groupNames {
for _, g := range keyMap.Groups {
if gn == g.Name {
for _, k := range g.Keys {
key, comment, _, _, err := ssh.ParseAuthorizedKey([]byte(k))
if err != nil {
log.Printf("%v", err)
continue
}
// sanitize
fmtKey := strings.TrimSuffix(string(ssh.MarshalAuthorizedKey(key)), "\n") + " " + comment + "\n"
rawKeys = append(rawKeys, fmtKey)
}
}
}
}
// dedup keys
dk := make(map[string]bool)
for _, v := range rawKeys {
if !dk[v] {
keys = append(keys, v)
dk[v] = true
}
}
return
}
func writeKeys(keys []string) {
if len(keys) == 0 {
log.Printf("Found 0 keys, need at least 1 to write")
return
}
var out []byte
for _, k := range keys {
out = append(out, k...)
}
err := os.WriteFile(akfLoc, out, 0644)
if err != nil {
log.Printf("%v", err)
}
}
func updateKeys() {
var keyMap keyMap
resp, err := http.Get(uri)
defer func() {
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}()
if err != nil {
log.Printf("%v", err)
return
}
dec := json.NewDecoder(resp.Body)
err = dec.Decode(&keyMap)
if err != nil {
log.Printf("%v", err)
return
}
keys := keysFromMap(&keyMap)
writeKeys(keys)
}
func sync() {
intrv, _ := strconv.Atoi(interval)
for t := time.Tick(time.Second * time.Duration(intrv)); ; <-t {
updateKeys()
}
}
func main() {
validate()
log.Printf("Running...")
sync()
}
| [
"\"SKA_KEY_URI\"",
"\"SKA_GROUPS\"",
"\"SKA_INTERVAL\"",
"\"SKA_AKF_LOC\""
]
| []
| [
"SKA_KEY_URI",
"SKA_INTERVAL",
"SKA_AKF_LOC",
"SKA_GROUPS"
]
| [] | ["SKA_KEY_URI", "SKA_INTERVAL", "SKA_AKF_LOC", "SKA_GROUPS"] | go | 4 | 0 | |
obj/obj_import_mtl_test.go | package obj
import (
"os"
"testing"
)
func TestImportMtl(t *testing.T) {
if os.Getenv("SINGLE_TEST") != "1" {
return
}
obj := &ObjData{}
err := importMtl(obj, "../eq/soldungb/cache/soldungb.mtl")
if err != nil {
t.Fatalf("importMtl: %s", err)
}
t.Fatalf("%+v", obj)
}
| [
"\"SINGLE_TEST\""
]
| []
| [
"SINGLE_TEST"
]
| [] | ["SINGLE_TEST"] | go | 1 | 0 | |
go/src/infra/cmd/drone-agent/main.go | // Copyright 2019 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build !windows
// Command drone-agent is the client that talks to the drone queen
// service to provide Swarming bots for running tasks against test
// devices. See the README.
package main
import (
"context"
"log"
"os"
"path/filepath"
"strconv"
"sync"
"time"
"go.chromium.org/luci/auth"
"go.chromium.org/luci/grpc/prpc"
"infra/appengine/drone-queen/api"
"infra/cmd/drone-agent/internal/agent"
"infra/cmd/drone-agent/internal/bot"
"infra/cmd/drone-agent/internal/draining"
"infra/cmd/drone-agent/internal/tokman"
)
const (
drainingFile = "drone-agent.drain"
oauthTokenPath = "/var/lib/swarming/oauth_bot_token.json"
)
var (
queenService = os.Getenv("DRONE_AGENT_QUEEN_SERVICE")
// DRONE_AGENT_SWARMING_URL is the URL of the Swarming
// instance. Should be a full URL without the path,
// e.g. https://host.example.com
swarmingURL = os.Getenv("DRONE_AGENT_SWARMING_URL")
dutCapacity = getIntEnv("DRONE_AGENT_DUT_CAPACITY", 10)
reportingInterval = time.Duration(getIntEnv("DRONE_AGENT_REPORTING_INTERVAL_MINS", 1)) * time.Minute
authOptions = auth.Options{
Method: auth.ServiceAccountMethod,
ServiceAccountJSONPath: os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"),
}
workingDirPath = filepath.Join(os.Getenv("HOME"), "skylab_bots")
)
func main() {
if err := innerMain(); err != nil {
log.Fatal(err)
}
}
func innerMain() error {
// TODO(ayatane): Add environment validation.
ctx := context.Background()
ctx = notifySIGTERM(ctx)
ctx = notifyDraining(ctx, filepath.Join(workingDirPath, drainingFile))
var wg sync.WaitGroup
defer wg.Wait()
authn := auth.NewAuthenticator(ctx, auth.SilentLogin, authOptions)
r, err := tokman.Make(authn, oauthTokenPath, time.Minute)
if err != nil {
return err
}
wg.Add(1)
go func() {
r.KeepNew(ctx)
wg.Done()
}()
h, err := authn.Client()
if err != nil {
return err
}
if err := os.MkdirAll(workingDirPath, 0777); err != nil {
return err
}
a := agent.Agent{
Client: api.NewDronePRPCClient(&prpc.Client{
C: h,
Host: queenService,
}),
SwarmingURL: swarmingURL,
WorkingDir: workingDirPath,
ReportingInterval: reportingInterval,
DUTCapacity: dutCapacity,
StartBotFunc: bot.NewStarter(h).Start,
}
a.Run(ctx)
return nil
}
const checkDrainingInterval = time.Minute
// notifyDraining returns a context that is marked as draining when a
// file exists at the given path.
func notifyDraining(ctx context.Context, path string) context.Context {
ctx, drain := draining.WithDraining(ctx)
_, err := os.Stat(path)
if err == nil {
drain()
return ctx
}
go func() {
for {
time.Sleep(checkDrainingInterval)
_, err := os.Stat(path)
if err == nil {
drain()
return
}
}
}()
return ctx
}
// getIntEnv gets an int value from an environment variable. If the
// environment variable is not valid or is not set, use the default value.
func getIntEnv(key string, defaultValue int) int {
v, ok := os.LookupEnv(key)
if !ok {
return defaultValue
}
n, err := strconv.Atoi(v)
if err != nil {
log.Printf("Invalid %s, using default value (error: %v)", key, err)
return defaultValue
}
return n
}
| [
"\"DRONE_AGENT_QUEEN_SERVICE\"",
"\"DRONE_AGENT_SWARMING_URL\"",
"\"GOOGLE_APPLICATION_CREDENTIALS\"",
"\"HOME\""
]
| []
| [
"DRONE_AGENT_QUEEN_SERVICE",
"HOME",
"DRONE_AGENT_SWARMING_URL",
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["DRONE_AGENT_QUEEN_SERVICE", "HOME", "DRONE_AGENT_SWARMING_URL", "GOOGLE_APPLICATION_CREDENTIALS"] | go | 4 | 0 | |
share/we-lang/we-lang.go | package main
import (
"bytes"
_ "crypto/sha512"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"net/url"
"os"
"os/user"
"path"
"regexp"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/klauspost/lctime"
"github.com/mattn/go-colorable"
"github.com/mattn/go-runewidth"
)
type configuration struct {
APIKey string
City string
Numdays int
Imperial bool
WindUnit bool
Inverse bool
Lang string
Narrow bool
LocationName string
WindMS bool
RightToLeft bool
}
type cond struct {
ChanceOfRain string `json:"chanceofrain"`
FeelsLikeC int `json:",string"`
PrecipMM float32 `json:"precipMM,string"`
TempC int `json:"tempC,string"`
TempC2 int `json:"temp_C,string"`
Time int `json:"time,string"`
VisibleDistKM int `json:"visibility,string"`
WeatherCode int `json:"weatherCode,string"`
WeatherDesc []struct{ Value string }
WindGustKmph int `json:",string"`
Winddir16Point string
WindspeedKmph int `json:"windspeedKmph,string"`
}
type astro struct {
Moonrise string
Moonset string
Sunrise string
Sunset string
}
type weather struct {
Astronomy []astro
Date string
Hourly []cond
MaxtempC int `json:"maxtempC,string"`
MintempC int `json:"mintempC,string"`
}
type loc struct {
Query string `json:"query"`
Type string `json:"type"`
}
type resp struct {
Data struct {
Cur []cond `json:"current_condition"`
Err []struct{ Msg string } `json:"error"`
Req []loc `json:"request"`
Weather []weather `json:"weather"`
} `json:"data"`
}
var (
ansiEsc *regexp.Regexp
config configuration
configpath string
debug bool
windDir = map[string]string{
"N": "\033[1m↓\033[0m",
"NNE": "\033[1m↓\033[0m",
"NE": "\033[1m↙\033[0m",
"ENE": "\033[1m↙\033[0m",
"E": "\033[1m←\033[0m",
"ESE": "\033[1m←\033[0m",
"SE": "\033[1m↖\033[0m",
"SSE": "\033[1m↖\033[0m",
"S": "\033[1m↑\033[0m",
"SSW": "\033[1m↑\033[0m",
"SW": "\033[1m↗\033[0m",
"WSW": "\033[1m↗\033[0m",
"W": "\033[1m→\033[0m",
"WNW": "\033[1m→\033[0m",
"NW": "\033[1m↘\033[0m",
"NNW": "\033[1m↘\033[0m",
}
unitRain = map[bool]string{
false: "mm",
true: "in",
}
unitTemp = map[bool]string{
false: "C",
true: "F",
}
unitVis = map[bool]string{
false: "km",
true: "mi",
}
unitWind = map[int]string{
0: "km/h",
1: "mph",
2: "m/s",
}
slotTimes = [slotcount]int{9 * 60, 12 * 60, 18 * 60, 22 * 60}
codes = map[int][]string{
113: iconSunny,
116: iconPartlyCloudy,
119: iconCloudy,
122: iconVeryCloudy,
143: iconFog,
176: iconLightShowers,
179: iconLightSleetShowers,
182: iconLightSleet,
185: iconLightSleet,
200: iconThunderyShowers,
227: iconLightSnow,
230: iconHeavySnow,
248: iconFog,
260: iconFog,
263: iconLightShowers,
266: iconLightRain,
281: iconLightSleet,
284: iconLightSleet,
293: iconLightRain,
296: iconLightRain,
299: iconHeavyShowers,
302: iconHeavyRain,
305: iconHeavyShowers,
308: iconHeavyRain,
311: iconLightSleet,
314: iconLightSleet,
317: iconLightSleet,
320: iconLightSnow,
323: iconLightSnowShowers,
326: iconLightSnowShowers,
329: iconHeavySnow,
332: iconHeavySnow,
335: iconHeavySnowShowers,
338: iconHeavySnow,
350: iconLightSleet,
353: iconLightShowers,
356: iconHeavyShowers,
359: iconHeavyRain,
362: iconLightSleetShowers,
365: iconLightSleetShowers,
368: iconLightSnowShowers,
371: iconHeavySnowShowers,
374: iconLightSleetShowers,
377: iconLightSleet,
386: iconThunderyShowers,
389: iconThunderyHeavyRain,
392: iconThunderySnowShowers,
395: iconHeavySnowShowers,
}
iconUnknown = []string{
" .-. ",
" __) ",
" ( ",
" `-’ ",
" • "}
iconSunny = []string{
"\033[38;5;226m \\ / \033[0m",
"\033[38;5;226m .-. \033[0m",
"\033[38;5;226m ― ( ) ― \033[0m",
"\033[38;5;226m `-’ \033[0m",
"\033[38;5;226m / \\ \033[0m"}
iconPartlyCloudy = []string{
"\033[38;5;226m \\ /\033[0m ",
"\033[38;5;226m _ /\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m \\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
" "}
iconCloudy = []string{
" ",
"\033[38;5;250m .--. \033[0m",
"\033[38;5;250m .-( ). \033[0m",
"\033[38;5;250m (___.__)__) \033[0m",
" "}
iconVeryCloudy = []string{
" ",
"\033[38;5;240;1m .--. \033[0m",
"\033[38;5;240;1m .-( ). \033[0m",
"\033[38;5;240;1m (___.__)__) \033[0m",
" "}
iconLightShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
"\033[38;5;111m ‘ ‘ ‘ ‘ \033[0m",
"\033[38;5;111m ‘ ‘ ‘ ‘ \033[0m"}
iconHeavyShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;240;1m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;240;1m( ). \033[0m",
"\033[38;5;226m /\033[38;5;240;1m(___(__) \033[0m",
"\033[38;5;21;1m ‚‘‚‘‚‘‚‘ \033[0m",
"\033[38;5;21;1m ‚’‚’‚’‚’ \033[0m"}
iconLightSnowShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
"\033[38;5;255m * * * \033[0m",
"\033[38;5;255m * * * \033[0m"}
iconHeavySnowShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;240;1m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;240;1m( ). \033[0m",
"\033[38;5;226m /\033[38;5;240;1m(___(__) \033[0m",
"\033[38;5;255;1m * * * * \033[0m",
"\033[38;5;255;1m * * * * \033[0m"}
iconLightSleetShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
"\033[38;5;111m ‘ \033[38;5;255m*\033[38;5;111m ‘ \033[38;5;255m* \033[0m",
"\033[38;5;255m *\033[38;5;111m ‘ \033[38;5;255m*\033[38;5;111m ‘ \033[0m"}
iconThunderyShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
"\033[38;5;228;5m ⚡\033[38;5;111;25m‘‘\033[38;5;228;5m⚡\033[38;5;111;25m‘‘ \033[0m",
"\033[38;5;111m ‘ ‘ ‘ ‘ \033[0m"}
iconThunderyHeavyRain = []string{
"\033[38;5;240;1m .-. \033[0m",
"\033[38;5;240;1m ( ). \033[0m",
"\033[38;5;240;1m (___(__) \033[0m",
"\033[38;5;21;1m ‚‘\033[38;5;228;5m⚡\033[38;5;21;25m‘‚\033[38;5;228;5m⚡\033[38;5;21;25m‚‘ \033[0m",
"\033[38;5;21;1m ‚’‚’\033[38;5;228;5m⚡\033[38;5;21;25m’‚’ \033[0m"}
iconThunderySnowShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
"\033[38;5;255m *\033[38;5;228;5m⚡\033[38;5;255;25m*\033[38;5;228;5m⚡\033[38;5;255;25m* \033[0m",
"\033[38;5;255m * * * \033[0m"}
iconLightRain = []string{
"\033[38;5;250m .-. \033[0m",
"\033[38;5;250m ( ). \033[0m",
"\033[38;5;250m (___(__) \033[0m",
"\033[38;5;111m ‘ ‘ ‘ ‘ \033[0m",
"\033[38;5;111m ‘ ‘ ‘ ‘ \033[0m"}
iconHeavyRain = []string{
"\033[38;5;240;1m .-. \033[0m",
"\033[38;5;240;1m ( ). \033[0m",
"\033[38;5;240;1m (___(__) \033[0m",
"\033[38;5;21;1m ‚‘‚‘‚‘‚‘ \033[0m",
"\033[38;5;21;1m ‚’‚’‚’‚’ \033[0m"}
iconLightSnow = []string{
"\033[38;5;250m .-. \033[0m",
"\033[38;5;250m ( ). \033[0m",
"\033[38;5;250m (___(__) \033[0m",
"\033[38;5;255m * * * \033[0m",
"\033[38;5;255m * * * \033[0m"}
iconHeavySnow = []string{
"\033[38;5;240;1m .-. \033[0m",
"\033[38;5;240;1m ( ). \033[0m",
"\033[38;5;240;1m (___(__) \033[0m",
"\033[38;5;255;1m * * * * \033[0m",
"\033[38;5;255;1m * * * * \033[0m"}
iconLightSleet = []string{
"\033[38;5;250m .-. \033[0m",
"\033[38;5;250m ( ). \033[0m",
"\033[38;5;250m (___(__) \033[0m",
"\033[38;5;111m ‘ \033[38;5;255m*\033[38;5;111m ‘ \033[38;5;255m* \033[0m",
"\033[38;5;255m *\033[38;5;111m ‘ \033[38;5;255m*\033[38;5;111m ‘ \033[0m"}
iconFog = []string{
" ",
"\033[38;5;251m _ - _ - _ - \033[0m",
"\033[38;5;251m _ - _ - _ \033[0m",
"\033[38;5;251m _ - _ - _ - \033[0m",
" "}
locale = map[string]string{
"af": "af_ZA",
"am": "am_ET",
"ar": "ar_TN",
"az": "az_AZ",
"be": "be_BY",
"bg": "bg_BG",
"bs": "bs_BA",
"ca": "ca_ES",
"cs": "cs_CZ",
"cy": "cy_GB",
"da": "da_DK",
"de": "de_DE",
"el": "el_GR",
"eo": "eo",
"es": "es_ES",
"et": "et_EE",
"eu": "eu_ES",
"fa": "fa_IR",
"fi": "fi_FI",
"fr": "fr_FR",
"fy": "fy_NL",
"ga": "ga_IE",
"he": "he_IL",
"hr": "hr_HR",
"hu": "hu_HU",
"hy": "hy_AM",
"ia": "ia",
"id": "id_ID",
"is": "is_IS",
"it": "it_IT",
"ja": "ja_JP",
"jv": "en_US",
"ka": "ka_GE",
"ko": "ko_KR",
"kk": "kk_KZ",
"ky": "ky_KG",
"lt": "lt_LT",
"lv": "lv_LV",
"mk": "mk_MK",
"ml": "ml_IN",
"nb": "nb_NO",
"nl": "nl_NL",
"nn": "nn_NO",
"oc": "oc_FR",
"pt": "pt_PT",
"pt-br": "pt_BR",
"pl": "pl_PL",
"ro": "ro_RO",
"ru": "ru_RU",
"sv": "sv_SE",
"sk": "sk_SK",
"sl": "sl_SI",
"sr": "sr_RS",
"sr-lat": "sr_RS@latin",
"sw": "sw_KE",
"th": "th_TH",
"tr": "tr_TR",
"uk": "uk_UA",
"uz": "uz_UZ",
"vi": "vi_VN",
"zu": "zu_ZA",
"zh": "zh_CN",
"zh-cn": "zh_CN",
"zh-tw": "zh_TW",
}
localizedCaption = map[string]string{
"af": "Weer verslag vir:",
"am": "የአየር ሁኔታ ዘገባ ለ ፥",
"ar": "تقرير حالة ألطقس",
"az": "Hava proqnozu:",
"be": "Прагноз надвор'я для:",
"bg": "Прогноза за времето в:",
"bs": "Vremenske prognoze za:",
"ca": "Informe del temps per a:",
"cs": "Předpověď počasí pro:",
"cy": "Adroddiad tywydd ar gyfer:",
"da": "Vejret i:",
"de": "Wetterbericht für:",
"el": "Πρόγνωση καιρού για:",
"eo": "Veterprognozo por:",
"es": "El tiempo en:",
"et": "Ilmaprognoos:",
"eu": "Eguraldia:",
"fa": "اوه و بآ تیعضو شرازگ",
"fi": "Säätiedotus:",
"fr": "Prévisions météo pour:",
"fy": "Waarberjocht foar:",
"ga": "Réamhaisnéis na haimsire do:",
"he": ":ריוואה גזמ תיזחת",
"hr": "Vremenska prognoza za:",
"hu": "Időjárás előrejelzés:",
"hy": "Եղանակի տեսություն:",
"ia": "Le tempore a:",
"id": "Prakiraan cuaca:",
"it": "Previsioni meteo:",
"is": "Veðurskýrsla fyrir:",
"ja": "天気予報:",
"jv": "Weather forecast for:",
"ka": "ამინდის პროგნოზი:",
"kk": "Ауа райы:",
"ko": "일기 예보:",
"ky": "Аба ырайы:",
"lt": "Orų prognozė:",
"lv": "Laika ziņas:",
"mk": "Прогноза за времето во:",
"ml": "കാലാവസ്ഥ റിപ്പോർട്ട്:",
"nb": "Værmelding for:",
"nl": "Weerbericht voor:",
"nn": "Vêrmelding for:",
"oc": "Previsions metèo per:",
"pl": "Pogoda w:",
"pt": "Previsão do tempo para:",
"pt-br": "Previsão do tempo para:",
"ro": "Prognoza meteo pentru:",
"ru": "Прогноз погоды:",
"sk": "Predpoveď počasia pre:",
"sl": "Vremenska napoved za",
"sr": "Временска прогноза за:",
"sr-lat": "Vremenska prognoza za:",
"sv": "Väderleksprognos för:",
"sw": "Ripoti ya hali ya hewa, jiji la:",
"te": "వాతావరణ సమాచారము:",
"th": "รายงานสภาพอากาศ:",
"tr": "Hava beklentisi:",
"uk": "Прогноз погоди для:",
"uz": "Ob-havo bashorati:",
"vi": "Báo cáo thời tiết:",
"zu": "Isimo sezulu:",
"zh": "天气预报:",
"zh-cn": "天气预报:",
"zh-tw": "天氣預報:",
}
daytimeTranslation = map[string][]string{
"af": {"Oggend", "Middag", "Vroegaand", "Laatnag"},
"am": {"ጠዋት", "ከሰዓት በኋላ", "ምሽት", "ሌሊት"},
"ar": {"ﺎﻠﻠﻴﻟ", "ﺎﻠﻤﺳﺍﺀ", "ﺎﻠﻈﻫﺭ", "ﺎﻠﺼﺑﺎﺣ"},
"az": {"Səhər", "Gün", "Axşam", "Gecə"},
"be": {"Раніца", "Дзень", "Вечар", "Ноч"},
"bg": {"Сутрин", "Обяд", "Вечер", "Нощ"},
"bs": {"Ujutro", "Dan", "Večer", "Noć"},
"cs": {"Ráno", "Ve dne", "Večer", "V noci"},
"ca": {"Matí", "Dia", "Tarda", "Nit"},
"cy": {"Bore", "Dydd", "Hwyr", "Nos"},
"da": {"Morgen", "Middag", "Aften", "Nat"},
"de": {"Früh", "Mittag", "Abend", "Nacht"},
"el": {"Πρωί", "Μεσημέρι", "Απόγευμα", "Βράδυ"},
"en": {"Morning", "Noon", "Evening", "Night"},
"eo": {"Mateno", "Tago", "Vespero", "Nokto"},
"es": {"Mañana", "Mediodía", "Tarde", "Noche"},
"et": {"Hommik", "Päev", "Õhtu", "Öösel"},
"eu": {"Goiza", "Eguerdia", "Arratsaldea", "Gaua"},
"fa": {"حبص", "رهظ", "رصع", "بش"},
"fi": {"Aamu", "Keskipäivä", "Ilta", "Yö"},
"fr": {"Matin", "Après-midi", "Soir", "Nuit"},
"fy": {"Moarns", "Middeis", "Jûns", "Nachts"},
"ga": {"Maidin", "Nóin", "Tráthnóna", "Oíche"},
"he": {"רקוב", "םוֹיְ", "ברֶעֶ", "הלָיְלַ"},
"hr": {"Jutro", "Dan", "Večer", "Noć"},
"hu": {"Reggel", "Dél", "Este", "Éjszaka"},
"hy": {"Առավոտ", "Կեսօր", "Երեկո", "Գիշեր"},
"ia": {"Matino", "Mediedie", "Vespere", "Nocte"},
"id": {"Pagi", "Hari", "Petang", "Malam"},
"it": {"Mattina", "Pomeriggio", "Sera", "Notte"},
"is": {"Morgunn", "Dagur", "Kvöld", "Nótt"},
"ja": {"朝", "昼", "夕", "夜"},
"jv": {"Morning", "Noon", "Evening", "Night"},
"ka": {"დილა", "დღე", "საღამო", "ღამე"},
"kk": {"Таң", "Күндіз", "Кеш", "Түн"},
"ko": {"아침", "낮", "저녁", "밤"},
"ky": {"Эртең", "Күн", "Кеч", "Түн"},
"lt": {"Rytas", "Diena", "Vakaras", "Naktis"},
"lv": {"Rīts", "Diena", "Vakars", "Nakts"},
"mk": {"Утро", "Пладне", "Вечер", "Ноќ"},
"ml": {"മോണിംഗ്", "മധ്യാഹ്നം", "വൈകുന്നേരം", "രാത്രി"},
"nl": {"'s Ochtends", "'s Middags", "'s Avonds", "'s Nachts"},
"nb": {"Morgen", "Middag", "Kveld", "Natt"},
"nn": {"Morgon", "Middag", "Kveld", "Natt"},
"oc": {"Matin", "Jorn", "Vèspre", "Nuèch"},
"pl": {"Ranek", "Dzień", "Wieczór", "Noc"},
"pt": {"Manhã", "Meio-dia", "Tarde", "Noite"},
"pt-br": {"Manhã", "Meio-dia", "Tarde", "Noite"},
"ro": {"Dimineaţă", "Amiază", "Seară", "Noapte"},
"ru": {"Утро", "День", "Вечер", "Ночь"},
"sk": {"Ráno", "Cez deň", "Večer", "V noci"},
"sl": {"Jutro", "Dan", "Večer", "Noč"},
"sr": {"Јутро", "Подне", "Вече", "Ноћ"},
"sr-lat": {"Jutro", "Podne", "Veče", "Noć"},
"sv": {"Morgon", "Eftermiddag", "Kväll", "Natt"},
"sw": {"Asubuhi", "Adhuhuri", "Jioni", "Usiku"},
"te": {"ఉదయం", "రోజు", "సాయంత్రం", "రాత్రి"},
"th": {"เช้า", "วัน", "เย็น", "คืน"},
"tr": {"Sabah", "Öğle", "Akşam", "Gece"},
"uk": {"Ранок", "День", "Вечір", "Ніч"},
"uz": {"Ertalab", "Kunduzi", "Kechqurun", "Kecha"},
"vi": {"Sáng", "Trưa", "Chiều", "Tối"},
"zh": {"早上", "中午", "傍晚", "夜间"},
"zh-cn": {"早上", "中午", "傍晚", "夜间"},
"zh-tw": {"早上", "中午", "傍晚", "夜間"},
"zu": {"Morning", "Noon", "Evening", "Night"},
}
)
// Add this languages:
// da tr hu sr jv zu
// More languages: https://developer.worldweatheronline.com/api/multilingual.aspx
// const (
// wuri = "https://api.worldweatheronline.com/premium/v1/weather.ashx?"
// suri = "https://api.worldweatheronline.com/premium/v1/search.ashx?"
// slotcount = 4
// )
const (
wuri = "http://127.0.0.1:5001/premium/v1/weather.ashx?"
suri = "http://127.0.0.1:5001/premium/v1/search.ashx?"
slotcount = 4
)
func configload() error {
b, err := ioutil.ReadFile(configpath)
if err == nil {
return json.Unmarshal(b, &config)
}
return err
}
func configsave() error {
j, err := json.MarshalIndent(config, "", "\t")
if err == nil {
return ioutil.WriteFile(configpath, j, 0600)
}
return err
}
func pad(s string, mustLen int) (ret string) {
ret = s
realLen := utf8.RuneCountInString(ansiEsc.ReplaceAllLiteralString(s, ""))
delta := mustLen - realLen
if delta > 0 {
if config.RightToLeft {
ret = strings.Repeat(" ", delta) + ret + "\033[0m"
} else {
ret += "\033[0m" + strings.Repeat(" ", delta)
}
} else if delta < 0 {
toks := ansiEsc.Split(s, 2)
tokLen := utf8.RuneCountInString(toks[0])
esc := ansiEsc.FindString(s)
if tokLen > mustLen {
ret = fmt.Sprintf("%.*s\033[0m", mustLen, toks[0])
} else {
ret = fmt.Sprintf("%s%s%s", toks[0], esc, pad(toks[1], mustLen-tokLen))
}
}
return
}
func formatTemp(c cond) string {
color := func(temp int, explicitPlus bool) string {
var col = 0
if !config.Inverse {
// Extemely cold temperature must be shown with violet
// because dark blue is too dark
col = 165
switch temp {
case -15, -14, -13:
col = 171
case -12, -11, -10:
col = 33
case -9, -8, -7:
col = 39
case -6, -5, -4:
col = 45
case -3, -2, -1:
col = 51
case 0, 1:
col = 50
case 2, 3:
col = 49
case 4, 5:
col = 48
case 6, 7:
col = 47
case 8, 9:
col = 46
case 10, 11, 12:
col = 82
case 13, 14, 15:
col = 118
case 16, 17, 18:
col = 154
case 19, 20, 21:
col = 190
case 22, 23, 24:
col = 226
case 25, 26, 27:
col = 220
case 28, 29, 30:
col = 214
case 31, 32, 33:
col = 208
case 34, 35, 36:
col = 202
default:
if temp > 0 {
col = 196
}
}
} else {
col = 16
switch temp {
case -15, -14, -13:
col = 17
case -12, -11, -10:
col = 18
case -9, -8, -7:
col = 19
case -6, -5, -4:
col = 20
case -3, -2, -1:
col = 21
case 0, 1:
col = 30
case 2, 3:
col = 28
case 4, 5:
col = 29
case 6, 7:
col = 30
case 8, 9:
col = 34
case 10, 11, 12:
col = 35
case 13, 14, 15:
col = 36
case 16, 17, 18:
col = 40
case 19, 20, 21:
col = 59
case 22, 23, 24:
col = 100
case 25, 26, 27:
col = 101
case 28, 29, 30:
col = 94
case 31, 32, 33:
col = 166
case 34, 35, 36:
col = 52
default:
if temp > 0 {
col = 196
}
}
}
if config.Imperial {
temp = (temp*18 + 320) / 10
}
if explicitPlus {
return fmt.Sprintf("\033[38;5;%03dm+%d\033[0m", col, temp)
}
return fmt.Sprintf("\033[38;5;%03dm%d\033[0m", col, temp)
}
t := c.TempC
if t == 0 {
t = c.TempC2
}
hyphen := " - "
// if (config.Lang == "sl") {
// hyphen = "-"
// }
hyphen = ".."
explicitPlus := false
if c.FeelsLikeC < t {
if c.FeelsLikeC < 0 && t > 0 {
explicitPlus = true
}
return pad(fmt.Sprintf("%s%s%s °%s", color(c.FeelsLikeC, false), hyphen, color(t, explicitPlus), unitTemp[config.Imperial]), 15)
} else if c.FeelsLikeC > t {
if t < 0 && c.FeelsLikeC > 0 {
explicitPlus = true
}
return pad(fmt.Sprintf("%s%s%s °%s", color(t, false), hyphen, color(c.FeelsLikeC, explicitPlus), unitTemp[config.Imperial]), 15)
}
return pad(fmt.Sprintf("%s °%s", color(c.FeelsLikeC, false), unitTemp[config.Imperial]), 15)
}
func formatWind(c cond) string {
windInRightUnits := func(spd int) int {
if config.WindMS {
spd = (spd * 1000) / 3600
} else {
if config.Imperial {
spd = (spd * 1000) / 1609
}
}
return spd
}
color := func(spd int) string {
var col = 46
switch spd {
case 1, 2, 3:
col = 82
case 4, 5, 6:
col = 118
case 7, 8, 9:
col = 154
case 10, 11, 12:
col = 190
case 13, 14, 15:
col = 226
case 16, 17, 18, 19:
col = 220
case 20, 21, 22, 23:
col = 214
case 24, 25, 26, 27:
col = 208
case 28, 29, 30, 31:
col = 202
default:
if spd > 0 {
col = 196
}
}
spd = windInRightUnits(spd)
return fmt.Sprintf("\033[38;5;%03dm%d\033[0m", col, spd)
}
unitWindString := unitWind[0]
if config.WindMS {
unitWindString = unitWind[2]
} else {
if config.Imperial {
unitWindString = unitWind[1]
}
}
hyphen := " - "
// if (config.Lang == "sl") {
// hyphen = "-"
// }
hyphen = "-"
cWindGustKmph := color(c.WindGustKmph)
cWindspeedKmph := color(c.WindspeedKmph)
if windInRightUnits(c.WindGustKmph) > windInRightUnits(c.WindspeedKmph) {
return pad(fmt.Sprintf("%s %s%s%s %s", windDir[c.Winddir16Point], cWindspeedKmph, hyphen, cWindGustKmph, unitWindString), 15)
}
return pad(fmt.Sprintf("%s %s %s", windDir[c.Winddir16Point], cWindspeedKmph, unitWindString), 15)
}
func formatVisibility(c cond) string {
if config.Imperial {
c.VisibleDistKM = (c.VisibleDistKM * 621) / 1000
}
return pad(fmt.Sprintf("%d %s", c.VisibleDistKM, unitVis[config.Imperial]), 15)
}
func formatRain(c cond) string {
rainUnit := float32(c.PrecipMM)
if config.Imperial {
rainUnit = float32(c.PrecipMM) * 0.039
}
if c.ChanceOfRain != "" {
return pad(fmt.Sprintf("%.1f %s | %s%%", rainUnit, unitRain[config.Imperial], c.ChanceOfRain), 15)
}
return pad(fmt.Sprintf("%.1f %s", rainUnit, unitRain[config.Imperial]), 15)
}
func formatCond(cur []string, c cond, current bool) (ret []string) {
var icon []string
if i, ok := codes[c.WeatherCode]; !ok {
icon = iconUnknown
} else {
icon = i
}
if config.Inverse {
// inverting colors
for i := range icon {
icon[i] = strings.Replace(icon[i], "38;5;226", "38;5;94", -1)
icon[i] = strings.Replace(icon[i], "38;5;250", "38;5;243", -1)
icon[i] = strings.Replace(icon[i], "38;5;21", "38;5;18", -1)
icon[i] = strings.Replace(icon[i], "38;5;255", "38;5;245", -1)
icon[i] = strings.Replace(icon[i], "38;5;111", "38;5;63", -1)
icon[i] = strings.Replace(icon[i], "38;5;251", "38;5;238", -1)
}
}
//desc := fmt.Sprintf("%-15.15v", c.WeatherDesc[0].Value)
desc := c.WeatherDesc[0].Value
if config.RightToLeft {
for runewidth.StringWidth(desc) < 15 {
desc = " " + desc
}
for runewidth.StringWidth(desc) > 15 {
_, size := utf8.DecodeLastRuneInString(desc)
desc = desc[size:]
}
} else {
for runewidth.StringWidth(desc) < 15 {
desc += " "
}
for runewidth.StringWidth(desc) > 15 {
_, size := utf8.DecodeLastRuneInString(desc)
desc = desc[:len(desc)-size]
}
}
if current {
if config.RightToLeft {
desc = c.WeatherDesc[0].Value
if runewidth.StringWidth(desc) < 15 {
desc = strings.Repeat(" ", 15-runewidth.StringWidth(desc)) + desc
}
} else {
desc = c.WeatherDesc[0].Value
}
} else {
if config.RightToLeft {
if frstRune, size := utf8.DecodeRuneInString(desc); frstRune != ' ' {
desc = "…" + desc[size:]
for runewidth.StringWidth(desc) < 15 {
desc = " " + desc
}
}
} else {
if lastRune, size := utf8.DecodeLastRuneInString(desc); lastRune != ' ' {
desc = desc[:len(desc)-size] + "…"
//for numberOfSpaces < runewidth.StringWidth(fmt.Sprintf("%c", lastRune)) - 1 {
for runewidth.StringWidth(desc) < 15 {
desc = desc + " "
}
}
}
}
if config.RightToLeft {
ret = append(ret, fmt.Sprintf("%v %v %v", cur[0], desc, icon[0]))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[1], formatTemp(c), icon[1]))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[2], formatWind(c), icon[2]))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[3], formatVisibility(c), icon[3]))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[4], formatRain(c), icon[4]))
} else {
ret = append(ret, fmt.Sprintf("%v %v %v", cur[0], icon[0], desc))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[1], icon[1], formatTemp(c)))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[2], icon[2], formatWind(c)))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[3], icon[3], formatVisibility(c)))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[4], icon[4], formatRain(c)))
}
return
}
func justifyCenter(s string, width int) string {
appendSide := 0
for runewidth.StringWidth(s) <= width {
if appendSide == 1 {
s = s + " "
appendSide = 0
} else {
s = " " + s
appendSide = 1
}
}
return s
}
func reverse(s string) string {
r := []rune(s)
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r)
}
func printDay(w weather) (ret []string) {
hourly := w.Hourly
ret = make([]string, 5)
for i := range ret {
ret[i] = "│"
}
// find hourly data which fits the desired times of day best
var slots [slotcount]cond
for _, h := range hourly {
c := int(math.Mod(float64(h.Time), 100)) + 60*(h.Time/100)
for i, s := range slots {
if math.Abs(float64(c-slotTimes[i])) < math.Abs(float64(s.Time-slotTimes[i])) {
h.Time = c
slots[i] = h
}
}
}
if config.RightToLeft {
slots[0], slots[3] = slots[3], slots[0]
slots[1], slots[2] = slots[2], slots[1]
}
for i, s := range slots {
if config.Narrow {
if i == 0 || i == 2 {
continue
}
}
ret = formatCond(ret, s, false)
for i := range ret {
ret[i] = ret[i] + "│"
}
}
d, _ := time.Parse("2006-01-02", w.Date)
// dateFmt := "┤ " + d.Format("Mon 02. Jan") + " ├"
if val, ok := locale[config.Lang]; ok {
lctime.SetLocale(val)
} else {
lctime.SetLocale("en_US")
}
dateName := ""
if config.RightToLeft {
dow := lctime.Strftime("%a", d)
day := lctime.Strftime("%d", d)
month := lctime.Strftime("%b", d)
dateName = reverse(month) + " " + day + " " + reverse(dow)
} else {
dateName = lctime.Strftime("%a %d %b", d)
if config.Lang == "ko" {
dateName = lctime.Strftime("%b %d일 %a", d)
}
if config.Lang == "zh" || config.Lang == "zh-tw" || config.Lang == "zh-cn" {
dateName = lctime.Strftime("%b%d日%A", d)
}
}
// appendSide := 0
// // for utf8.RuneCountInString(dateName) <= dateWidth {
// for runewidth.StringWidth(dateName) <= dateWidth {
// if appendSide == 1 {
// dateName = dateName + " "
// appendSide = 0
// } else {
// dateName = " " + dateName
// appendSide = 1
// }
// }
dateFmt := "┤" + justifyCenter(dateName, 12) + "├"
trans := daytimeTranslation["en"]
if t, ok := daytimeTranslation[config.Lang]; ok {
trans = t
}
if config.Narrow {
names := "│ " + justifyCenter(trans[1], 16) +
"└──────┬──────┘" + justifyCenter(trans[3], 16) + " │"
ret = append([]string{
" ┌─────────────┐ ",
"┌───────────────────────" + dateFmt + "───────────────────────┐",
names,
"├──────────────────────────────┼──────────────────────────────┤"},
ret...)
return append(ret,
"└──────────────────────────────┴──────────────────────────────┘")
}
names := ""
if config.RightToLeft {
names = "│" + justifyCenter(trans[3], 29) + "│ " + justifyCenter(trans[2], 16) +
"└──────┬──────┘" + justifyCenter(trans[1], 16) + " │" + justifyCenter(trans[0], 29) + "│"
} else {
names = "│" + justifyCenter(trans[0], 29) + "│ " + justifyCenter(trans[1], 16) +
"└──────┬──────┘" + justifyCenter(trans[2], 16) + " │" + justifyCenter(trans[3], 29) + "│"
}
ret = append([]string{
" ┌─────────────┐ ",
"┌──────────────────────────────┬───────────────────────" + dateFmt + "───────────────────────┬──────────────────────────────┐",
names,
"├──────────────────────────────┼──────────────────────────────┼──────────────────────────────┼──────────────────────────────┤"},
ret...)
return append(ret,
"└──────────────────────────────┴──────────────────────────────┴──────────────────────────────┴──────────────────────────────┘")
}
func unmarshalLang(body []byte, r *resp) error {
var rv map[string]interface{}
if err := json.Unmarshal(body, &rv); err != nil {
return err
}
if data, ok := rv["data"].(map[string]interface{}); ok {
if ccs, ok := data["current_condition"].([]interface{}); ok {
for _, cci := range ccs {
cc, ok := cci.(map[string]interface{})
if !ok {
continue
}
langs, ok := cc["lang_"+config.Lang].([]interface{})
if !ok || len(langs) == 0 {
continue
}
weatherDesc, ok := cc["weatherDesc"].([]interface{})
if !ok || len(weatherDesc) == 0 {
continue
}
weatherDesc[0] = langs[0]
}
}
if ws, ok := data["weather"].([]interface{}); ok {
for _, wi := range ws {
w, ok := wi.(map[string]interface{})
if !ok {
continue
}
if hs, ok := w["hourly"].([]interface{}); ok {
for _, hi := range hs {
h, ok := hi.(map[string]interface{})
if !ok {
continue
}
langs, ok := h["lang_"+config.Lang].([]interface{})
if !ok || len(langs) == 0 {
continue
}
weatherDesc, ok := h["weatherDesc"].([]interface{})
if !ok || len(weatherDesc) == 0 {
continue
}
weatherDesc[0] = langs[0]
}
}
}
}
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(rv); err != nil {
return err
}
if err := json.NewDecoder(&buf).Decode(r); err != nil {
return err
}
return nil
}
func getDataFromAPI() (ret resp) {
var params []string
if len(config.APIKey) == 0 {
log.Fatal("No API key specified. Setup instructions are in the README.")
}
params = append(params, "key="+config.APIKey)
// non-flag shortcut arguments will overwrite possible flag arguments
for _, arg := range flag.Args() {
if v, err := strconv.Atoi(arg); err == nil && len(arg) == 1 {
config.Numdays = v
} else {
config.City = arg
}
}
if len(config.City) > 0 {
params = append(params, "q="+url.QueryEscape(config.City))
}
params = append(params, "format=json")
params = append(params, "num_of_days="+strconv.Itoa(config.Numdays))
params = append(params, "tp=3")
if config.Lang != "" {
params = append(params, "lang="+config.Lang)
}
if debug {
fmt.Fprintln(os.Stderr, params)
}
res, err := http.Get(wuri + strings.Join(params, "&"))
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
if debug {
var out bytes.Buffer
json.Indent(&out, body, "", " ")
out.WriteTo(os.Stderr)
fmt.Print("\n\n")
}
if config.Lang == "" {
if err = json.Unmarshal(body, &ret); err != nil {
log.Println(err)
}
} else {
if err = unmarshalLang(body, &ret); err != nil {
log.Println(err)
}
}
return
}
func init() {
flag.IntVar(&config.Numdays, "days", 3, "Number of days of weather forecast to be displayed")
flag.StringVar(&config.Lang, "lang", "en", "Language of the report")
flag.StringVar(&config.City, "city", "New York", "City to be queried")
flag.BoolVar(&debug, "debug", false, "Print out raw json response for debugging purposes")
flag.BoolVar(&config.Imperial, "imperial", false, "Use imperial units")
flag.BoolVar(&config.Inverse, "inverse", false, "Use inverted colors")
flag.BoolVar(&config.Narrow, "narrow", false, "Narrow output (two columns)")
flag.StringVar(&config.LocationName, "location_name", "", "Location name (used in the caption)")
flag.BoolVar(&config.WindMS, "wind_in_ms", false, "Show wind speed in m/s")
flag.BoolVar(&config.RightToLeft, "right_to_left", false, "Right to left script")
configpath = os.Getenv("WEGORC")
if configpath == "" {
usr, err := user.Current()
if err != nil {
log.Fatalf("%v\nYou can set the environment variable WEGORC to point to your config file as a workaround.", err)
}
configpath = path.Join(usr.HomeDir, ".wegorc")
}
config.APIKey = ""
config.Imperial = false
config.Lang = "en"
err := configload()
if _, ok := err.(*os.PathError); ok {
log.Printf("No config file found. Creating %s ...", configpath)
if err2 := configsave(); err2 != nil {
log.Fatal(err2)
}
} else if err != nil {
log.Fatalf("could not parse %v: %v", configpath, err)
}
ansiEsc = regexp.MustCompile("\033.*?m")
}
func main() {
flag.Parse()
r := getDataFromAPI()
if r.Data.Req == nil || len(r.Data.Req) < 1 {
if r.Data.Err != nil && len(r.Data.Err) >= 1 {
log.Fatal(r.Data.Err[0].Msg)
}
log.Fatal("Malformed response.")
}
locationName := r.Data.Req[0].Query
if config.LocationName != "" {
locationName = config.LocationName
}
if config.Lang == "he" || config.Lang == "ar" || config.Lang == "fa" {
config.RightToLeft = true
}
if caption, ok := localizedCaption[config.Lang]; !ok {
// r.Data.Req[0].Type,
fmt.Printf("Weather report: %s\n\n", locationName)
} else {
if config.RightToLeft {
caption = locationName + " " + caption
space := strings.Repeat(" ", 125-runewidth.StringWidth(caption))
fmt.Printf("%s%s\n\n", space, caption)
} else {
fmt.Printf("%s %s\n\n", caption, locationName)
}
}
stdout := colorable.NewColorableStdout()
if r.Data.Cur == nil || len(r.Data.Cur) < 1 {
log.Fatal("No weather data available.")
}
out := formatCond(make([]string, 5), r.Data.Cur[0], true)
for _, val := range out {
if config.RightToLeft {
fmt.Fprint(stdout, strings.Repeat(" ", 94))
} else {
fmt.Fprint(stdout, " ")
}
fmt.Fprintln(stdout, val)
}
if config.Numdays == 0 {
return
}
if r.Data.Weather == nil {
log.Fatal("No detailed weather forecast available.")
}
for _, d := range r.Data.Weather {
for _, val := range printDay(d) {
fmt.Fprintln(stdout, val)
}
}
}
| [
"\"WEGORC\""
]
| []
| [
"WEGORC"
]
| [] | ["WEGORC"] | go | 1 | 0 | |
src/main/java/org/jenkinsci/remoting/engine/JnlpAgentEndpointResolver.java | /*
* The MIT License
*
* Copyright (c) 2016, CloudBees, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.jenkinsci.remoting.engine;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import hudson.remoting.Engine;
import hudson.remoting.Launcher;
import hudson.remoting.NoProxyEvaluator;
import org.jenkinsci.remoting.util.VersionNumber;
import org.jenkinsci.remoting.util.https.NoCheckHostnameVerifier;
import org.jenkinsci.remoting.util.https.NoCheckTrustManager;
import javax.annotation.CheckForNull;
import javax.annotation.Nonnull;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocketFactory;
import javax.net.ssl.TrustManager;
import java.io.IOException;
import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.NoRouteToHostException;
import java.net.Proxy;
import java.net.ProxySelector;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.StandardCharsets;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.security.interfaces.RSAPublicKey;
import java.security.spec.InvalidKeySpecException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.StringJoiner;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Pattern;
import static java.util.logging.Level.INFO;
import static org.jenkinsci.remoting.util.ThrowableUtils.chain;
/**
* @author Stephen Connolly
* @since 3.0
*/
public class JnlpAgentEndpointResolver extends JnlpEndpointResolver {
private static final Logger LOGGER = Logger.getLogger(JnlpAgentEndpointResolver.class.getName());
@Nonnull
private final List<String> jenkinsUrls;
private SSLSocketFactory sslSocketFactory;
private String credentials;
private String proxyCredentials;
private String tunnel;
private boolean disableHttpsCertValidation;
/**
* If specified, only the protocols from the list will be tried during the connection.
* The option provides protocol names, but the order of the check is defined internally and cannot be changed.
* This option can be also used in order to workaround issues when the headers cannot be delivered
* from the server due to whatever reason (e.g. JENKINS-41730).
*/
private static String PROTOCOL_NAMES_TO_TRY =
System.getProperty(JnlpAgentEndpointResolver.class.getName() + ".protocolNamesToTry");
public JnlpAgentEndpointResolver(String... jenkinsUrls) {
this.jenkinsUrls = new ArrayList<String>(Arrays.asList(jenkinsUrls));
}
public JnlpAgentEndpointResolver(@Nonnull List<String> jenkinsUrls) {
this(jenkinsUrls, null, null, null, null, false);
}
public JnlpAgentEndpointResolver(List<String> jenkinsUrls, String credentials, String proxyCredentials,
String tunnel, SSLSocketFactory sslSocketFactory, boolean disableHttpsCertValidation) {
this.jenkinsUrls = new ArrayList<>(jenkinsUrls);
this.credentials = credentials;
this.proxyCredentials = proxyCredentials;
this.tunnel = tunnel;
this.sslSocketFactory = sslSocketFactory;
this.disableHttpsCertValidation = disableHttpsCertValidation;
}
public SSLSocketFactory getSslSocketFactory() {
return sslSocketFactory;
}
public void setSslSocketFactory(SSLSocketFactory sslSocketFactory) {
this.sslSocketFactory = sslSocketFactory;
}
public String getCredentials() {
return credentials;
}
public void setCredentials(String credentials) {
this.credentials = credentials;
}
public void setCredentials(String user, String pass) {
this.credentials = user + ":" + pass;
}
public String getProxyCredentials() {
return proxyCredentials;
}
public void setProxyCredentials(String proxyCredentials) {
this.proxyCredentials = proxyCredentials;
}
public void setProxyCredentials(String user, String pass) {
this.proxyCredentials = user + ":" + pass;
}
@CheckForNull
public String getTunnel() {
return tunnel;
}
public void setTunnel(@CheckForNull String tunnel) {
this.tunnel = tunnel;
}
/**
* Determine if certificate checking should be ignored for JNLP endpoint
*
* @return {@code true} if the HTTPs certificate is disabled, endpoint check is ignored
*/
public boolean isDisableHttpsCertValidation() {
return disableHttpsCertValidation;
}
/**
* Sets if the HTTPs certificate check should be disabled.
*
* This behavior is not recommended.
* @param disableHttpsCertValidation
*/
public void setDisableHttpsCertValidation(boolean disableHttpsCertValidation) {
this.disableHttpsCertValidation = disableHttpsCertValidation;
}
@CheckForNull
@Override
public JnlpAgentEndpoint resolve() throws IOException {
IOException firstError = null;
for (String jenkinsUrl : jenkinsUrls) {
if (jenkinsUrl == null) {
continue;
}
final URL selectedJenkinsURL;
final URL salURL;
try {
selectedJenkinsURL = new URL(jenkinsUrl);
salURL = toAgentListenerURL(jenkinsUrl);
} catch (MalformedURLException ex) {
LOGGER.log(Level.WARNING, String.format("Cannot parse agent endpoint URL %s. Skipping it", jenkinsUrl), ex);
continue;
}
// find out the TCP port
HttpURLConnection con =
(HttpURLConnection) openURLConnection(salURL, credentials, proxyCredentials, sslSocketFactory, disableHttpsCertValidation);
try {
try {
con.setConnectTimeout(30000);
con.setReadTimeout(60000);
con.connect();
} catch (IOException x) {
firstError = chain(firstError,
new IOException("Failed to connect to " + salURL + ": " + x.getMessage(), x));
continue;
}
if (con.getResponseCode() != 200) {
firstError = chain(firstError, new IOException(
salURL + " is invalid: " + con.getResponseCode() + " " + con.getResponseMessage()));
continue;
}
// Check if current version of agent is supported
String minimumSupportedVersionHeader = first(header(con, Engine.REMOTING_MINIMUM_VERSION_HEADER));
if (minimumSupportedVersionHeader != null) {
VersionNumber minimumSupportedVersion = new VersionNumber(minimumSupportedVersionHeader);
VersionNumber currentVersion = new VersionNumber(Launcher.VERSION);
if (currentVersion.isOlderThan(minimumSupportedVersion)) {
firstError = chain(firstError, new IOException(
"Agent version " + minimumSupportedVersion + " or newer is required."
));
continue;
}
}
String host;
String portStr;
Set<String> agentProtocolNames = null;
portStr = first(header(con, "X-Jenkins-JNLP-Port", "X-Hudson-JNLP-Port"));
host = defaultString(first(header(con, "X-Jenkins-JNLP-Host")), salURL.getHost());
List<String> protocols = header(con, "X-Jenkins-Agent-Protocols");
if (protocols != null) {
// Take the list of protocols to try from the headers
agentProtocolNames = new HashSet<String>();
for (String names : protocols) {
for (String name : names.split(",")) {
name = name.trim();
if (!name.isEmpty()) {
agentProtocolNames.add(name);
}
}
}
if (agentProtocolNames.isEmpty()) {
LOGGER.log(Level.WARNING, "Received the empty list of supported protocols from the server. " +
"All protocols are disabled on the master side OR the 'X-Jenkins-Agent-Protocols' header is corrupted (JENKINS-41730). " +
"In the case of the header corruption as a workaround you can use the " +
"'org.jenkinsci.remoting.engine.JnlpAgentEndpointResolver.protocolNamesToTry' system property " +
"to define the supported protocols.");
} else {
LOGGER.log(Level.INFO, "Remoting server accepts the following protocols: {0}", agentProtocolNames);
}
}
if (PROTOCOL_NAMES_TO_TRY != null) {
// Take a list of protocols to try from the system property
agentProtocolNames = new HashSet<String>();
LOGGER.log(Level.INFO, "Ignoring the list of supported remoting protocols provided by the server, because the " +
"'org.jenkinsci.remoting.engine.JnlpAgentEndpointResolver.protocolNamesToTry' property is defined. Will try {0}", PROTOCOL_NAMES_TO_TRY);
for (String name : PROTOCOL_NAMES_TO_TRY.split(",")) {
name = name.trim();
if (!name.isEmpty()) {
agentProtocolNames.add(name);
}
}
}
String idHeader = first(header(con, "X-Instance-Identity"));
RSAPublicKey identity;
try {
identity = getIdentity(idHeader);
if (identity == null) {
firstError = chain(firstError, new IOException(
salURL + " appears to be publishing an invalid X-Instance-Identity."));
continue;
}
} catch (InvalidKeySpecException e) {
firstError = chain(firstError, new IOException(
salURL + " appears to be publishing an invalid X-Instance-Identity."));
continue;
}
if (portStr == null) {
firstError = chain(firstError, new IOException(jenkinsUrl + " is not Jenkins"));
continue;
}
int port = 0;
try {
port = Integer.parseInt(portStr);
} catch (NumberFormatException e) {
firstError = chain(firstError, new IOException(jenkinsUrl + " is publishing an invalid port", e));
continue;
}
if (port <= 0 || 65536 <= port) {
firstError = chain(firstError, new IOException(jenkinsUrl + " is publishing an invalid port"));
continue;
}
if (tunnel == null) {
if (!isPortVisible(host, port, 5000)) {
firstError = chain(firstError, new IOException(jenkinsUrl + " provided port:" + port
+ " is not reachable"));
continue;
} else {
LOGGER.log(Level.FINE, "TCP Agent Listener Port availability check passed");
}
} else {
LOGGER.log(Level.INFO, "Remoting TCP connection tunneling is enabled. " +
"Skipping the TCP Agent Listener Port availability check");
}
// sort the URLs so that the winner is the one we try first next time
final String winningJenkinsUrl = jenkinsUrl;
Collections.sort(jenkinsUrls, new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
if (winningJenkinsUrl.equals(o1)) {
return -1;
}
if (winningJenkinsUrl.equals(o2)) {
return 1;
}
return 0;
}
});
if (tunnel != null) {
HostPort hostPort = new HostPort(tunnel, host, port);
host = hostPort.getHost();
port = hostPort.getPort();
}
//TODO: all the checks above do not make much sense if tunneling is enabled (JENKINS-52246)
return new JnlpAgentEndpoint(host, port, identity, agentProtocolNames, selectedJenkinsURL);
} finally {
con.disconnect();
}
}
if (firstError != null) {
throw firstError;
}
return null;
}
@SuppressFBWarnings(value = "UNENCRYPTED_SOCKET", justification = "This just verifies connection to the port. No data is transmitted.")
private boolean isPortVisible(String hostname, int port, int timeout) {
boolean exitStatus = false;
Socket s = null;
try {
s = new Socket();
s.setReuseAddress(true);
SocketAddress sa = new InetSocketAddress(hostname, port);
s.connect(sa, timeout);
} catch (IOException e) {
LOGGER.warning(e.getMessage());
} finally {
if (s != null) {
if (s.isConnected()) {
exitStatus = true;
}
try {
s.close();
} catch (IOException e) {
LOGGER.warning(e.getMessage());
}
}
}
return exitStatus;
}
@Nonnull
private URL toAgentListenerURL(@Nonnull String jenkinsUrl) throws MalformedURLException {
return jenkinsUrl.endsWith("/")
? new URL(jenkinsUrl + "tcpSlaveAgentListener/")
: new URL(jenkinsUrl + "/tcpSlaveAgentListener/");
}
@Override
public void waitForReady() throws InterruptedException {
Thread t = Thread.currentThread();
String oldName = t.getName();
try {
int retries = 0;
while (true) {
Thread.sleep(1000 * 10);
try {
// Jenkins top page might be read-protected. see http://www.nabble
// .com/more-lenient-retry-logic-in-Engine.waitForServerToBack-td24703172.html
final String firstUrl = first(jenkinsUrls);
if (firstUrl == null) {
// returning here will cause the whole loop to be broken and all the urls to be tried again
return;
}
URL url = toAgentListenerURL(firstUrl);
retries++;
t.setName(oldName + ": trying " + url + " for " + retries + " times");
HttpURLConnection con =
(HttpURLConnection) openURLConnection(url, credentials, proxyCredentials, sslSocketFactory, disableHttpsCertValidation);
con.setConnectTimeout(5000);
con.setReadTimeout(5000);
con.connect();
if (con.getResponseCode() == 200) {
return;
}
LOGGER.log(Level.INFO,
"Master isn''t ready to talk to us on {0}. Will try again: response code={1}",
new Object[]{url, con.getResponseCode()});
} catch (SocketTimeoutException | ConnectException | NoRouteToHostException e) {
LOGGER.log(INFO, "Failed to connect to the master. Will try again: {0} {1}",
new String[] { e.getClass().getName(), e.getMessage() });
} catch (IOException e) {
// report the failure
LOGGER.log(INFO, "Failed to connect to the master. Will try again", e);
}
}
} finally {
t.setName(oldName);
}
}
@CheckForNull
static InetSocketAddress getResolvedHttpProxyAddress(@Nonnull String host, int port) throws IOException {
InetSocketAddress targetAddress = null;
Iterator<Proxy>
proxies =
ProxySelector.getDefault().select(URI.create(String.format("http://%s:%d", host, port))).iterator();
while (targetAddress == null && proxies.hasNext()) {
Proxy proxy = proxies.next();
if (proxy.type() == Proxy.Type.DIRECT) {
// Proxy.NO_PROXY with a DIRECT type is returned in two cases:
// - when no proxy (none) has been configured in the JVM (either with system properties or by the operating system)
// - when the host URI is part of the exclusion list defined by system property -Dhttp.nonProxyHosts
//
// Unfortunately, the Proxy class does not provide a way to differentiate both cases to fallback to
// environment variables only when no proxy has been configured. Therefore, we have to recheck if the URI
// host is in the exclusion list.
//
// Warning:
// This code only supports Java 9+ implementation where nonProxyHosts entries are not interpreted as regex expressions anymore.
// Wildcard at the beginning or the end of an expression are the only remaining supported behaviours (e.g. *.jenkins.io or 127.*)
// https://bugs.java.com/view_bug.do?bug_id=8035158
// http://hg.openjdk.java.net/jdk9/jdk9/jdk/rev/50a749f2cade
String nonProxyHosts = System.getProperty("http.nonProxyHosts");
if(nonProxyHosts != null && nonProxyHosts.length() != 0) {
// Build a list of regexps matching all nonProxyHosts entries
StringJoiner sj = new StringJoiner("|");
nonProxyHosts = nonProxyHosts.toLowerCase(Locale.ENGLISH);
for(String entry : nonProxyHosts.split("\\|")) {
if(entry.isEmpty())
continue;
else if(entry.startsWith("*"))
sj.add(".*" + Pattern.quote(entry.substring(1)));
else if(entry.endsWith("*"))
sj.add(Pattern.quote(entry.substring(0, entry.length() - 1)) + ".*");
else
sj.add(Pattern.quote(entry));
// Detect when the pattern contains multiple wildcard, which used to work previous to Java 9 (e.g. 127.*.*.*)
if(entry.split("\\*").length > 2)
LOGGER.log(Level.WARNING, "Using more than one wildcard is not supported in nonProxyHosts entries: {0}", entry);
}
Pattern nonProxyRegexps = Pattern.compile(sj.toString());
if(nonProxyRegexps.matcher(host.toLowerCase(Locale.ENGLISH)).matches()) {
return null;
} else {
break;
}
}
}
if (proxy.type() == Proxy.Type.HTTP) {
final SocketAddress address = proxy.address();
if (!(address instanceof InetSocketAddress)) {
LOGGER.log(Level.WARNING, "Unsupported proxy address type {0}", (address != null ? address.getClass() : "null"));
continue;
}
InetSocketAddress proxyAddress = (InetSocketAddress) address;
if (proxyAddress.isUnresolved())
proxyAddress = new InetSocketAddress(proxyAddress.getHostName(), proxyAddress.getPort());
targetAddress = proxyAddress;
}
}
if (targetAddress == null) {
String httpProxy = System.getenv("http_proxy");
if (httpProxy != null && !inNoProxyEnvVar(host)) {
try {
URL url = new URL(httpProxy);
targetAddress = new InetSocketAddress(url.getHost(), url.getPort());
} catch (MalformedURLException e) {
LOGGER.log(Level.WARNING, "Not using http_proxy environment variable which is invalid.", e);
}
}
}
return targetAddress;
}
/**
* Gets URL connection.
* If http_proxy environment variable exists, the connection uses the proxy.
* Credentials can be passed e.g. to support running Jenkins behind a (reverse) proxy requiring authorization
* FIXME: similar to hudson.remoting.Util.openURLConnection which is still used in hudson.remoting.Launcher
*/
@SuppressFBWarnings(value = "URLCONNECTION_SSRF_FD", justification = "Used by the agent for retrieving connection info from the server.")
static URLConnection openURLConnection(URL url, String credentials, String proxyCredentials,
SSLSocketFactory sslSocketFactory, boolean disableHttpsCertValidation) throws IOException {
String httpProxy = null;
// If http.proxyHost property exists, openConnection() uses it.
if (System.getProperty("http.proxyHost") == null) {
httpProxy = System.getenv("http_proxy");
}
URLConnection con = null;
if (httpProxy != null && "http".equals(url.getProtocol()) && !inNoProxyEnvVar(url.getHost())) {
try {
URL proxyUrl = new URL(httpProxy);
SocketAddress addr = new InetSocketAddress(proxyUrl.getHost(), proxyUrl.getPort());
Proxy proxy = new Proxy(Proxy.Type.HTTP, addr);
con = url.openConnection(proxy);
} catch (MalformedURLException e) {
LOGGER.log(Level.WARNING, "Not using http_proxy environment variable which is invalid.", e);
con = url.openConnection();
}
} else {
con = url.openConnection();
}
if (credentials != null) {
String encoding = Base64.getEncoder().encodeToString(credentials.getBytes(StandardCharsets.UTF_8));
con.setRequestProperty("Authorization", "Basic " + encoding);
}
if (proxyCredentials != null) {
String encoding = Base64.getEncoder().encodeToString(proxyCredentials.getBytes(StandardCharsets.UTF_8));
con.setRequestProperty("Proxy-Authorization", "Basic " + encoding);
}
if (con instanceof HttpsURLConnection) {
final HttpsURLConnection httpsConnection = (HttpsURLConnection) con;
if (disableHttpsCertValidation) {
LOGGER.log(Level.WARNING, "HTTPs certificate check is disabled for the endpoint.");
try {
SSLContext ctx = SSLContext.getInstance("TLS");
ctx.init(null, new TrustManager[]{new NoCheckTrustManager()}, new SecureRandom());
sslSocketFactory = ctx.getSocketFactory();
httpsConnection.setHostnameVerifier(new NoCheckHostnameVerifier());
httpsConnection.setSSLSocketFactory(sslSocketFactory);
} catch (KeyManagementException | NoSuchAlgorithmException ex) {
// We could just suppress it, but the exception will unlikely happen.
// So let's just propagate the error and fail the resolution
throw new IOException("Cannot initialize the insecure HTTPs mode", ex);
}
} else if (sslSocketFactory != null) {
httpsConnection.setSSLSocketFactory(sslSocketFactory);
//FIXME: Is it really required in this path? Seems like a bug
httpsConnection.setHostnameVerifier(new NoCheckHostnameVerifier());
}
}
return con;
}
static boolean inNoProxyEnvVar(String host) {
return !NoProxyEvaluator.shouldProxy(host);
}
@CheckForNull
private static List<String> header(@Nonnull HttpURLConnection connection, String... headerNames) {
Map<String, List<String>> headerFields = connection.getHeaderFields();
for (String headerName : headerNames) {
for (Map.Entry<String, List<String>> entry: headerFields.entrySet()) {
final String headerField = entry.getKey();
if (headerField != null && headerField.equalsIgnoreCase(headerName)) {
return entry.getValue();
}
}
}
return null;
}
@CheckForNull
private static String first(@CheckForNull List<String> values) {
return values == null || values.isEmpty() ? null : values.get(0);
}
@Nonnull
private static String defaultString(@CheckForNull String value, @Nonnull String defaultValue) {
return value == null ? defaultValue : value;
}
}
| [
"\"http_proxy\"",
"\"http_proxy\""
]
| []
| [
"http_proxy"
]
| [] | ["http_proxy"] | java | 1 | 0 | |
examples/ssd/ssd_pascal_zf.py | '''
Before running this script, you should download the fully convolutional reduced (atrous) ZFNet at:
http://cs.unc.edu/~wliu/projects/SSD/ZF_conv_reduced.caffemodel
By default, we assume the model is stored in `$CAFFE_ROOT/models/ZFNet/`
'''
from __future__ import print_function
import caffe
from caffe.model_libs import *
from google.protobuf import text_format
import math
import os
import shutil
import stat
import subprocess
import sys
# Add extra layers on top of a "base" network (e.g. VGGNet or Inception).
def AddExtraLayers(net, use_batchnorm=True, lr_mult=1):
use_relu = True
# Add additional convolutional layers.
# 19 x 19
from_layer = net.keys()[-1]
# TODO(weiliu89): Construct the name using the last layer to avoid duplication.
# 10 x 10
out_layer = "conv6_1"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 1, 0, 1,
lr_mult=lr_mult)
from_layer = out_layer
out_layer = "conv6_2"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 512, 3, 1, 2,
lr_mult=lr_mult)
# 5 x 5
from_layer = out_layer
out_layer = "conv7_1"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 128, 1, 0, 1,
lr_mult=lr_mult)
from_layer = out_layer
out_layer = "conv7_2"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 1, 2,
lr_mult=lr_mult)
# 3 x 3
from_layer = out_layer
out_layer = "conv8_1"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 128, 1, 0, 1,
lr_mult=lr_mult)
from_layer = out_layer
out_layer = "conv8_2"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 0, 1,
lr_mult=lr_mult)
# 1 x 1
from_layer = out_layer
out_layer = "conv9_1"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 128, 1, 0, 1,
lr_mult=lr_mult)
from_layer = out_layer
out_layer = "conv9_2"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 0, 1,
lr_mult=lr_mult)
return net
### Modify the following parameters accordingly ###
# The directory which contains the caffe code.
# We assume you are running the script at the CAFFE_ROOT.
caffe_root = os.getcwd()
# Set true if you want to start training right after generating all files.
run_soon = True
# Set true if you want to load from most recently saved snapshot.
# Otherwise, we will load from the pretrain_model defined below.
resume_training = True
# If true, Remove old model files.
remove_old_models = False
# The database file for training data. Created by data/VOC0712/create_data.sh
train_data = "examples/VOC0712/VOC0712_trainval_lmdb"
# The database file for testing data. Created by data/VOC0712/create_data.sh
test_data = "examples/VOC0712/VOC0712_test_lmdb"
# Specify the batch sampler.
resize_width = 300
resize_height = 300
resize = "{}x{}".format(resize_width, resize_height)
batch_sampler = [
{
'sampler': {
},
'max_trials': 1,
'max_sample': 1,
},
{
'sampler': {
'min_scale': 0.3,
'max_scale': 1.0,
'min_aspect_ratio': 0.5,
'max_aspect_ratio': 2.0,
},
'sample_constraint': {
'min_jaccard_overlap': 0.1,
},
'max_trials': 50,
'max_sample': 1,
},
{
'sampler': {
'min_scale': 0.3,
'max_scale': 1.0,
'min_aspect_ratio': 0.5,
'max_aspect_ratio': 2.0,
},
'sample_constraint': {
'min_jaccard_overlap': 0.3,
},
'max_trials': 50,
'max_sample': 1,
},
{
'sampler': {
'min_scale': 0.3,
'max_scale': 1.0,
'min_aspect_ratio': 0.5,
'max_aspect_ratio': 2.0,
},
'sample_constraint': {
'min_jaccard_overlap': 0.5,
},
'max_trials': 50,
'max_sample': 1,
},
{
'sampler': {
'min_scale': 0.3,
'max_scale': 1.0,
'min_aspect_ratio': 0.5,
'max_aspect_ratio': 2.0,
},
'sample_constraint': {
'min_jaccard_overlap': 0.7,
},
'max_trials': 50,
'max_sample': 1,
},
{
'sampler': {
'min_scale': 0.3,
'max_scale': 1.0,
'min_aspect_ratio': 0.5,
'max_aspect_ratio': 2.0,
},
'sample_constraint': {
'min_jaccard_overlap': 0.9,
},
'max_trials': 50,
'max_sample': 1,
},
{
'sampler': {
'min_scale': 0.3,
'max_scale': 1.0,
'min_aspect_ratio': 0.5,
'max_aspect_ratio': 2.0,
},
'sample_constraint': {
'max_jaccard_overlap': 1.0,
},
'max_trials': 50,
'max_sample': 1,
},
]
train_transform_param = {
'mirror': True,
'mean_value': [104, 117, 123],
'resize_param': {
'prob': 1,
'resize_mode': P.Resize.WARP,
'height': resize_height,
'width': resize_width,
'interp_mode': [
P.Resize.LINEAR,
P.Resize.AREA,
P.Resize.NEAREST,
P.Resize.CUBIC,
P.Resize.LANCZOS4,
],
},
'distort_param': {
'brightness_prob': 0.5,
'brightness_delta': 32,
'contrast_prob': 0.5,
'contrast_lower': 0.5,
'contrast_upper': 1.5,
'hue_prob': 0.5,
'hue_delta': 18,
'saturation_prob': 0.5,
'saturation_lower': 0.5,
'saturation_upper': 1.5,
'random_order_prob': 0.0,
},
'expand_param': {
'prob': 0.5,
'max_expand_ratio': 4.0,
},
'emit_constraint': {
'emit_type': caffe_pb2.EmitConstraint.CENTER,
}
}
test_transform_param = {
'mean_value': [104, 117, 123],
'resize_param': {
'prob': 1,
'resize_mode': P.Resize.WARP,
'height': resize_height,
'width': resize_width,
'interp_mode': [P.Resize.LINEAR],
},
}
# If true, use batch norm for all newly added layers.
# Currently only the non batch norm version has been tested.
use_batchnorm = False
lr_mult = 1
# Use different initial learning rate.
if use_batchnorm:
base_lr = 0.0004
else:
# A learning rate for batch_size = 1, num_gpus = 1.
base_lr = 0.00004
# Modify the job name if you want.
job_name = "SSD_{}".format(resize)
# The name of the model. Modify it if you want.
model_name = "ZF_VOC0712_{}".format(job_name)
# Directory which stores the model .prototxt file.
save_dir = "models/ZFNet/VOC0712/{}".format(job_name)
# Directory which stores the snapshot of models.
snapshot_dir = "models/ZFNet/VOC0712/{}".format(job_name)
# Directory which stores the job script and log file.
job_dir = "jobs/ZFNet/VOC0712/{}".format(job_name)
# Directory which stores the detection results.
output_result_dir = "{}/data/VOCdevkit/results/VOC2007/{}/Main".format(os.environ['HOME'], job_name)
# model definition files.
train_net_file = "{}/train.prototxt".format(save_dir)
test_net_file = "{}/test.prototxt".format(save_dir)
deploy_net_file = "{}/deploy.prototxt".format(save_dir)
solver_file = "{}/solver.prototxt".format(save_dir)
# snapshot prefix.
snapshot_prefix = "{}/{}".format(snapshot_dir, model_name)
# job script path.
job_file = "{}/{}.sh".format(job_dir, model_name)
# Stores the test image names and sizes. Created by data/VOC0712/create_list.sh
name_size_file = "data/VOC0712/test_name_size.txt"
# The pretrained model. We use the Fully convolutional reduced (atrous) ZFNet.
pretrain_model = "models/ZFNet/ZF_conv_reduced.caffemodel"
# Stores LabelMapItem.
label_map_file = "data/VOC0712/labelmap_voc.prototxt"
# MultiBoxLoss parameters.
num_classes = 21
share_location = True
background_label_id=0
train_on_diff_gt = True
normalization_mode = P.Loss.VALID
code_type = P.PriorBox.CENTER_SIZE
ignore_cross_boundary_bbox = False
mining_type = P.MultiBoxLoss.MAX_NEGATIVE
neg_pos_ratio = 3.
loc_weight = (neg_pos_ratio + 1.) / 4.
multibox_loss_param = {
'loc_loss_type': P.MultiBoxLoss.SMOOTH_L1,
'conf_loss_type': P.MultiBoxLoss.SOFTMAX,
'loc_weight': loc_weight,
'num_classes': num_classes,
'share_location': share_location,
'match_type': P.MultiBoxLoss.PER_PREDICTION,
'overlap_threshold': 0.5,
'use_prior_for_matching': True,
'background_label_id': background_label_id,
'use_difficult_gt': train_on_diff_gt,
'mining_type': mining_type,
'neg_pos_ratio': neg_pos_ratio,
'neg_overlap': 0.5,
'code_type': code_type,
'ignore_cross_boundary_bbox': ignore_cross_boundary_bbox,
}
loss_param = {
'normalization': normalization_mode,
}
# parameters for generating priors.
# minimum dimension of input image
min_dim = 300
# conv2 ==> 38 x 38
# fc7 ==> 19 x 19
# conv6_2 ==> 10 x 10
# conv7_2 ==> 5 x 5
# conv8_2 ==> 3 x 3
# conv9_2 ==> 1 x 1
mbox_source_layers = ['conv2', 'fc7', 'conv6_2', 'conv7_2', 'conv8_2', 'conv9_2']
# in percent %
min_ratio = 20
max_ratio = 90
step = int(math.floor((max_ratio - min_ratio) / (len(mbox_source_layers) - 2)))
min_sizes = []
max_sizes = []
for ratio in range(min_ratio, max_ratio + 1, step):
min_sizes.append(min_dim * ratio / 100.)
max_sizes.append(min_dim * (ratio + step) / 100.)
min_sizes = [min_dim * 10 / 100.] + min_sizes
max_sizes = [min_dim * 20 / 100.] + max_sizes
steps = [8, 16, 32, 64, 100, 300]
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
# L2 normalize conv2.
normalizations = [20, -1, -1, -1, -1, -1]
# variance used to encode/decode prior bboxes.
if code_type == P.PriorBox.CENTER_SIZE:
prior_variance = [0.1, 0.1, 0.2, 0.2]
else:
prior_variance = [0.1]
flip = True
clip = False
# Solver parameters.
# Defining which GPUs to use.
gpus = "0,1,2,3"
gpulist = gpus.split(",")
num_gpus = len(gpulist)
# Divide the mini-batch to different GPUs.
batch_size = 32
accum_batch_size = 32
iter_size = accum_batch_size / batch_size
solver_mode = P.Solver.CPU
device_id = 0
batch_size_per_device = batch_size
if num_gpus > 0:
batch_size_per_device = int(math.ceil(float(batch_size) / num_gpus))
iter_size = int(math.ceil(float(accum_batch_size) / (batch_size_per_device * num_gpus)))
solver_mode = P.Solver.GPU
device_id = int(gpulist[0])
if normalization_mode == P.Loss.NONE:
base_lr /= batch_size_per_device
elif normalization_mode == P.Loss.VALID:
base_lr *= 25. / loc_weight
elif normalization_mode == P.Loss.FULL:
# Roughly there are 2000 prior bboxes per image.
# TODO(weiliu89): Estimate the exact # of priors.
base_lr *= 2000.
# Evaluate on whole test set.
num_test_image = 4952
test_batch_size = 8
# Ideally test_batch_size should be divisible by num_test_image,
# otherwise mAP will be slightly off the true value.
test_iter = int(math.ceil(float(num_test_image) / test_batch_size))
solver_param = {
# Train parameters
'base_lr': base_lr,
'weight_decay': 0.0005,
'lr_policy': "multistep",
'stepvalue': [80000, 100000, 120000],
'gamma': 0.1,
'momentum': 0.9,
'iter_size': iter_size,
'max_iter': 120000,
'snapshot': 80000,
'display': 10,
'average_loss': 10,
'type': "SGD",
'solver_mode': solver_mode,
'device_id': device_id,
'debug_info': False,
'snapshot_after_train': True,
# Test parameters
'test_iter': [test_iter],
'test_interval': 10000,
'eval_type': "detection",
'ap_version': "11point",
'test_initialization': False,
}
# parameters for generating detection output.
det_out_param = {
'num_classes': num_classes,
'share_location': share_location,
'background_label_id': background_label_id,
'nms_param': {'nms_threshold': 0.45, 'top_k': 400},
'save_output_param': {
'output_directory': output_result_dir,
'output_name_prefix': "comp4_det_test_",
'output_format': "VOC",
'label_map_file': label_map_file,
'name_size_file': name_size_file,
'num_test_image': num_test_image,
},
'keep_top_k': 200,
'confidence_threshold': 0.01,
'code_type': code_type,
}
# parameters for evaluating detection results.
det_eval_param = {
'num_classes': num_classes,
'background_label_id': background_label_id,
'overlap_threshold': 0.5,
'evaluate_difficult_gt': False,
'name_size_file': name_size_file,
}
### Hopefully you don't need to change the following ###
# Check file.
check_if_exist(train_data)
check_if_exist(test_data)
check_if_exist(label_map_file)
check_if_exist(pretrain_model)
make_if_not_exist(save_dir)
make_if_not_exist(job_dir)
make_if_not_exist(snapshot_dir)
# Create train net.
net = caffe.NetSpec()
net.data, net.label = CreateAnnotatedDataLayer(train_data, batch_size=batch_size_per_device,
train=True, output_label=True, label_map_file=label_map_file,
transform_param=train_transform_param, batch_sampler=batch_sampler)
ZFNetBody(net, from_layer='data', fully_conv=True, reduced=True, dilated=True,
dropout=False)
AddExtraLayers(net, use_batchnorm, lr_mult=lr_mult)
mbox_layers = CreateMultiBoxHead(net, data_layer='data', from_layers=mbox_source_layers,
use_batchnorm=use_batchnorm, min_sizes=min_sizes, max_sizes=max_sizes,
aspect_ratios=aspect_ratios, steps=steps, normalizations=normalizations,
num_classes=num_classes, share_location=share_location, flip=flip, clip=clip,
prior_variance=prior_variance, kernel_size=3, pad=1, lr_mult=lr_mult)
# Create the MultiBoxLossLayer.
name = "mbox_loss"
mbox_layers.append(net.label)
net[name] = L.MultiBoxLoss(*mbox_layers, multibox_loss_param=multibox_loss_param,
loss_param=loss_param, include=dict(phase=caffe_pb2.Phase.Value('TRAIN')),
propagate_down=[True, True, False, False])
with open(train_net_file, 'w') as f:
print('name: "{}_train"'.format(model_name), file=f)
print(net.to_proto(), file=f)
shutil.copy(train_net_file, job_dir)
# Create test net.
net = caffe.NetSpec()
net.data, net.label = CreateAnnotatedDataLayer(test_data, batch_size=test_batch_size,
train=False, output_label=True, label_map_file=label_map_file,
transform_param=test_transform_param)
ZFNetBody(net, from_layer='data', fully_conv=True, reduced=True, dilated=True,
dropout=False)
AddExtraLayers(net, use_batchnorm, lr_mult=lr_mult)
mbox_layers = CreateMultiBoxHead(net, data_layer='data', from_layers=mbox_source_layers,
use_batchnorm=use_batchnorm, min_sizes=min_sizes, max_sizes=max_sizes,
aspect_ratios=aspect_ratios, steps=steps, normalizations=normalizations,
num_classes=num_classes, share_location=share_location, flip=flip, clip=clip,
prior_variance=prior_variance, kernel_size=3, pad=1, lr_mult=lr_mult)
conf_name = "mbox_conf"
if multibox_loss_param["conf_loss_type"] == P.MultiBoxLoss.SOFTMAX:
reshape_name = "{}_reshape".format(conf_name)
net[reshape_name] = L.Reshape(net[conf_name], shape=dict(dim=[0, -1, num_classes]))
softmax_name = "{}_softmax".format(conf_name)
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "{}_flatten".format(conf_name)
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_layers[1] = net[flatten_name]
elif multibox_loss_param["conf_loss_type"] == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "{}_sigmoid".format(conf_name)
net[sigmoid_name] = L.Sigmoid(net[conf_name])
mbox_layers[1] = net[sigmoid_name]
net.detection_out = L.DetectionOutput(*mbox_layers,
detection_output_param=det_out_param,
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
net.detection_eval = L.DetectionEvaluate(net.detection_out, net.label,
detection_evaluate_param=det_eval_param,
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
with open(test_net_file, 'w') as f:
print('name: "{}_test"'.format(model_name), file=f)
print(net.to_proto(), file=f)
shutil.copy(test_net_file, job_dir)
# Create deploy net.
# Remove the first and last layer from test net.
deploy_net = net
with open(deploy_net_file, 'w') as f:
net_param = deploy_net.to_proto()
# Remove the first (AnnotatedData) and last (DetectionEvaluate) layer from test net.
del net_param.layer[0]
del net_param.layer[-1]
net_param.name = '{}_deploy'.format(model_name)
net_param.input.extend(['data'])
net_param.input_shape.extend([
caffe_pb2.BlobShape(dim=[1, 3, resize_height, resize_width])])
print(net_param, file=f)
shutil.copy(deploy_net_file, job_dir)
# Create solver.
solver = caffe_pb2.SolverParameter(
train_net=train_net_file,
test_net=[test_net_file],
snapshot_prefix=snapshot_prefix,
**solver_param)
with open(solver_file, 'w') as f:
print(solver, file=f)
shutil.copy(solver_file, job_dir)
max_iter = 0
# Find most recent snapshot.
for file in os.listdir(snapshot_dir):
if file.endswith(".solverstate"):
basename = os.path.splitext(file)[0]
iter = int(basename.split("{}_iter_".format(model_name))[1])
if iter > max_iter:
max_iter = iter
train_src_param = '--weights="{}" \\\n'.format(pretrain_model)
if resume_training:
if max_iter > 0:
train_src_param = '--snapshot="{}_iter_{}.solverstate" \\\n'.format(snapshot_prefix, max_iter)
if remove_old_models:
# Remove any snapshots smaller than max_iter.
for file in os.listdir(snapshot_dir):
if file.endswith(".solverstate"):
basename = os.path.splitext(file)[0]
iter = int(basename.split("{}_iter_".format(model_name))[1])
if max_iter > iter:
os.remove("{}/{}".format(snapshot_dir, file))
if file.endswith(".caffemodel"):
basename = os.path.splitext(file)[0]
iter = int(basename.split("{}_iter_".format(model_name))[1])
if max_iter > iter:
os.remove("{}/{}".format(snapshot_dir, file))
# Create job file.
with open(job_file, 'w') as f:
f.write('cd {}\n'.format(caffe_root))
f.write('./build/tools/caffe train \\\n')
f.write('--solver="{}" \\\n'.format(solver_file))
f.write(train_src_param)
if solver_param['solver_mode'] == P.Solver.GPU:
f.write('--gpu {} 2>&1 | tee {}/{}.log\n'.format(gpus, job_dir, model_name))
else:
f.write('2>&1 | tee {}/{}.log\n'.format(job_dir, model_name))
# Copy the python script to job_dir.
py_file = os.path.abspath(__file__)
shutil.copy(py_file, job_dir)
# Run the job.
os.chmod(job_file, stat.S_IRWXU)
if run_soon:
subprocess.call(job_file, shell=True)
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
cordova/playstoreHelper/publish_to_beta.py | """Uploads apk to rollout track with user fraction."""
import sys
import socket
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import subprocess
import xml.etree.ElementTree as ET
import os
from pathlib import Path
TRACK = 'beta'
USER_FRACTION = 1
APK_FILE = '../platforms/android/build/outputs/apk/release/android-release.apk'
CREDENTIALS_JSON = 'playstore-service-account.json'
def main(argv):
package_name = os.environ.get('PACKAGE_NAME')
if package_name:
print('using provided package name', package_name)
else:
# get package name from somewhere
print('finding package name')
package_name = ET.parse('../platforms/android/res/xml/config.xml').getroot().attrib['id']
print('found package name', package_name)
print()
apk_file = os.environ.get('APK_FILE')
if apk_file is None:
print('using default apk file path', APK_FILE)
apk_file = APK_FILE
print('Retrieving release notes from CHANGELOG.md...')
releaseText = subprocess.run('../../scripts/get_newest_release.js', stdout=subprocess.PIPE).stdout.decode()
if len(releaseText) > 500:
releaseText = releaseText[:495] + '\n...'
print()
print(releaseText)
print()
credentials = ServiceAccountCredentials.from_json_keyfile_name(
CREDENTIALS_JSON, scopes=['https://www.googleapis.com/auth/androidpublisher']
)
print('Found credentials, trying to connect...')
socket.setdefaulttimeout(900)
service = build('androidpublisher', 'v3', credentials=credentials)
edit_response = service.edits().insert(body={}, packageName=package_name).execute()
edit_id = edit_response['id']
print('Inserted edit with ID', edit_id)
print('Uploading APK...')
apk_response = service.edits().apks().upload(
editId=edit_id, packageName=package_name, media_body=apk_file
).execute()
print('Version code %d has been uploaded' % apk_response['versionCode'])
track_response = service.edits().tracks().patch(
editId=edit_id,
track=TRACK,
packageName=package_name,
body={
'releases': [{
'releaseNotes': [{
'text': releaseText,
'language': 'en-US'
}],
'versionCodes': [apk_response['versionCode']],
'userFraction': USER_FRACTION,
'status': 'inProgress',
}]
}
).execute()
print('Track %s is set with releases: %s' % (track_response['track'], str(track_response['releases'])))
if package_name == 'world.karrot':
assets = Path('../playstoreAssets')
language = 'en-US'
listing = assets / language / 'listing'
with (listing / 'shortDescription.txt').open() as shortDescription, \
(listing / 'fullDescription.txt').open() as fullDescription:
service.edits().listings().update(
editId=edit_id,
packageName=package_name,
language=language,
body={
'title': 'Karrot',
'language': language,
'shortDescription': shortDescription.read(),
'fullDescription': fullDescription.read(),
'video': '',
}
).execute()
print('Listing of %s has been updated' % package_name)
images_path = assets / language / 'images'
imageTypes = (
'featureGraphic',
'icon',
'phoneScreenshots',
'promoGraphic',
'sevenInchScreenshots',
'tenInchScreenshots',
'tvBanner',
'tvScreenshots',
'wearScreenshots',
)
images = [str(p) for p in images_path.iterdir()]
sha1 = subprocess.run(['sha1sum', *images], stdout=subprocess.PIPE).stdout.decode()
sha1_images = {sha1: path for (sha1, path) in [i.split() for i in sha1.splitlines()]}
for imageType in imageTypes:
our_images = {
sha1: path
for (sha1, path) in sha1_images.items() if path.split('/')[-1].startswith(imageType)
}
images_response = service.edits().images().list(
editId=edit_id,
packageName=package_name,
language=language,
imageType=imageType,
).execute()
their_images = images_response.get('images') or []
their_images = {i['sha1']: i['id'] for i in their_images}
to_upload = [our_images.get(k) for k in (our_images.keys() - their_images.keys())]
to_delete = [their_images.get(k) for k in (their_images.keys() - our_images.keys())]
for image_id in to_delete:
service.edits().images().delete(
editId=edit_id,
packageName=package_name,
language=language,
imageType=imageType,
imageId=image_id,
).execute()
print('Deleted', image_id)
for path in to_upload:
service.edits().images().upload(
editId=edit_id,
packageName=package_name,
language=language,
imageType=imageType,
media_body=path,
).execute()
print('Uploaded', path)
commit_request = service.edits().commit(editId=edit_id, packageName=package_name).execute()
print('Edit "%s" has been committed' % (commit_request['id']))
if __name__ == '__main__':
main(sys.argv)
| []
| []
| [
"PACKAGE_NAME",
"APK_FILE"
]
| [] | ["PACKAGE_NAME", "APK_FILE"] | python | 2 | 0 | |
cmd/config/cmd/cmdwrap.go | // Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"bytes"
"io"
"os"
"path/filepath"
"github.com/spf13/cobra"
"sigs.k8s.io/kustomize/kyaml/kio"
"sigs.k8s.io/kustomize/kyaml/kio/filters"
)
// GetWrapRunner returns a command runner.
func GetWrapRunner() *WrapRunner {
r := &WrapRunner{}
c := &cobra.Command{
Use: "wrap CMD...",
Short: "Wrap an executable so it implements the config fn interface",
Long: `Wrap an executable so it implements the config fn interface
wrap simplifies writing config functions by:
- invoking an executable command converting an input ResourceList into environment
- merging the output onto the original input as a set of patches
- setting filenames on any Resources missing them
config function authors may use wrap by using it to invoke a command from a container image
The following are equivalent:
kyaml wrap -- CMD
kyaml xargs -- CMD | kyaml merge | kyaml fmt --set-filenames
Environment Variables:
KUST_OVERRIDE_DIR:
Path to a directory containing patches to apply to after merging.
`,
Example: `
`,
RunE: r.runE,
SilenceUsage: true,
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
Args: cobra.MinimumNArgs(1),
}
r.Command = c
r.XArgs = GetXArgsRunner()
c.Flags().BoolVar(&r.XArgs.EnvOnly,
"env-only", true, "only set env vars, not arguments.")
c.Flags().StringVar(&r.XArgs.WrapKind,
"wrap-kind", "List", "wrap the input xargs give to the command in this type.")
c.Flags().StringVar(&r.XArgs.WrapVersion,
"wrap-version", "v1", "wrap the input xargs give to the command in this type.")
return r
}
// WrapRunner contains the run function
type WrapRunner struct {
Command *cobra.Command
XArgs *XArgsRunner
getEnv func(key string) string
}
const (
KustMergeEnv = "KUST_MERGE"
KustOverrideDirEnv = "KUST_OVERRIDE_DIR"
)
func WrapCommand() *cobra.Command {
return GetWrapRunner().Command
}
func (r *WrapRunner) runE(c *cobra.Command, args []string) error {
if r.getEnv == nil {
r.getEnv = os.Getenv
}
xargsIn := &bytes.Buffer{}
if _, err := io.Copy(xargsIn, c.InOrStdin()); err != nil {
return err
}
mergeInput := bytes.NewBuffer(xargsIn.Bytes())
// Run the command
xargsOut := &bytes.Buffer{}
r.XArgs.Command.SetArgs(args)
r.XArgs.Command.SetIn(xargsIn)
r.XArgs.Command.SetOut(xargsOut)
r.XArgs.Command.SetErr(os.Stderr)
if err := r.XArgs.Command.Execute(); err != nil {
return err
}
// merge the results
buff := &kio.PackageBuffer{}
var fltrs []kio.Filter
var inputs []kio.Reader
if r.getEnv(KustMergeEnv) == "" || r.getEnv(KustMergeEnv) == "true" || r.getEnv(KustMergeEnv) == "1" {
inputs = append(inputs, &kio.ByteReader{Reader: mergeInput})
fltrs = append(fltrs, &filters.MergeFilter{})
}
inputs = append(inputs, &kio.ByteReader{Reader: xargsOut})
if err := (kio.Pipeline{Inputs: inputs, Filters: fltrs, Outputs: []kio.Writer{buff}}).
Execute(); err != nil {
return err
}
inputs, fltrs = []kio.Reader{buff}, nil
if r.getEnv(KustOverrideDirEnv) != "" {
// merge the overrides on top of the output
fltrs = append(fltrs, filters.MergeFilter{})
inputs = append(inputs,
kio.LocalPackageReader{
OmitReaderAnnotations: true, // don't set path annotations, as they would override
PackagePath: r.getEnv(KustOverrideDirEnv)})
}
fltrs = append(fltrs,
&filters.FileSetter{
FilenamePattern: filepath.Join("config", filters.DefaultFilenamePattern)},
&filters.FormatFilter{})
err := kio.Pipeline{
Inputs: inputs,
Filters: fltrs,
Outputs: []kio.Writer{kio.ByteWriter{
Sort: true,
KeepReaderAnnotations: true,
Writer: c.OutOrStdout(),
WrappingKind: kio.ResourceListKind,
WrappingApiVersion: kio.ResourceListApiVersion}}}.Execute()
if err != nil {
return err
}
return nil
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
localstack/constants.py | import os
import localstack_client.config
# LocalStack version
VERSION = '0.11.6'
# constant to represent the "local" region, i.e., local machine
REGION_LOCAL = 'local'
# dev environment
ENV_DEV = 'dev'
# HTTP headers used to forward proxy request URLs
HEADER_LOCALSTACK_EDGE_URL = 'x-localstack-edge'
HEADER_LOCALSTACK_TARGET = 'x-localstack-target'
# backend service ports, for services that are behind a proxy (counting down from 4566)
DEFAULT_PORT_EDGE = 4566
DEFAULT_PORT_WEB_UI = 8080
# host name for localhost
LOCALHOST = 'localhost'
LOCALHOST_IP = '127.0.0.1'
# version of the Maven dependency with Java utility code
LOCALSTACK_MAVEN_VERSION = '0.2.5'
# map of default service APIs and ports to be spun up (fetch map from localstack_client)
DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()
# host to bind to when starting the services
BIND_HOST = '0.0.0.0'
# AWS user account ID used for tests
if 'TEST_AWS_ACCOUNT_ID' not in os.environ:
os.environ['TEST_AWS_ACCOUNT_ID'] = '000000000000'
TEST_AWS_ACCOUNT_ID = os.environ['TEST_AWS_ACCOUNT_ID']
# root code folder
LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
# virtualenv folder
LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')
if not os.path.isdir(LOCALSTACK_VENV_FOLDER):
# assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/
LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))
# API Gateway path to indicate a user request sent to the gateway
PATH_USER_REQUEST = '_user_request_'
# name of LocalStack Docker image
DOCKER_IMAGE_NAME = 'localstack/localstack'
DOCKER_IMAGE_NAME_FULL = 'localstack/localstack-full'
# backdoor API path used to retrieve or update config variables
CONFIG_UPDATE_PATH = '/?_config_'
# environment variable name to tag local test runs
ENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'
# content types
APPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'
APPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'
APPLICATION_AMZ_CBOR_1_1 = 'application/x-amz-cbor-1.1'
APPLICATION_JSON = 'application/json'
APPLICATION_XML = 'application/xml'
APPLICATION_X_WWW_FORM_URLENCODED = 'application/x-www-form-urlencoded'
# strings to indicate truthy/falsy values
TRUE_STRINGS = ('1', 'true', 'True')
FALSE_STRINGS = ('0', 'false', 'False')
# Lambda defaults
LAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID
# installation constants
ELASTICSEARCH_URLS = {
'7.7.0': 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.7.0-linux-x86_64.tar.gz',
'7.4.0': 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.4.0-linux-x86_64.tar.gz',
'7.1.0': 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.1.0-linux-x86_64.tar.gz',
'6.7.0': 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.7.0.zip'
}
ELASTICSEARCH_DEFAULT_VERSION = '7.7.0'
# See https://docs.aws.amazon.com/ja_jp/elasticsearch-service/latest/developerguide/aes-supported-plugins.html
ELASTICSEARCH_PLUGIN_LIST = ['analysis-icu', 'ingest-attachment', 'analysis-kuromoji',
'mapper-murmur3', 'mapper-size', 'analysis-phonetic', 'analysis-smartcn', 'analysis-stempel', 'analysis-ukrainian']
# Default ES modules to exclude (save apprx 66MB in the final image)
ELASTICSEARCH_DELETE_MODULES = ['ingest-geoip']
ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.15.7.jar'
STS_JAR_URL = 'https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'
STEPFUNCTIONS_ZIP_URL = 'https://s3.amazonaws.com/stepfunctionslocal/StepFunctionsLocal.zip'
KMS_URL_PATTERN = 'https://s3-eu-west-2.amazonaws.com/local-kms/localstack/v3/local-kms.<arch>.bin'
# TODO: Temporarily using a fixed version of DDB in Alpine, as we're hitting a SIGSEGV JVM crash with latest
DYNAMODB_JAR_URL_ALPINE = 'https://github.com/whummer/dynamodb-local/raw/master/etc/DynamoDBLocal.zip'
DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'
# API endpoint for analytics events
API_ENDPOINT = os.environ.get('API_ENDPOINT') or 'https://api.localstack.cloud/v1'
# environment variable to indicates that this process is running the Web UI
LOCALSTACK_WEB_PROCESS = 'LOCALSTACK_WEB_PROCESS'
LOCALSTACK_INFRA_PROCESS = 'LOCALSTACK_INFRA_PROCESS'
# hardcoded AWS account ID used by moto
MOTO_ACCOUNT_ID = TEST_AWS_ACCOUNT_ID
# default AWS region us-east-1
AWS_REGION_US_EAST_1 = 'us-east-1'
# default lambda registry
DEFAULT_LAMBDA_CONTAINER_REGISTRY = 'lambci/lambda'
# environment variable to override max pool connections
try:
MAX_POOL_CONNECTIONS = int(os.environ['MAX_POOL_CONNECTIONS'])
except Exception:
MAX_POOL_CONNECTIONS = 150
| []
| []
| [
"TEST_AWS_ACCOUNT_ID",
"API_ENDPOINT",
"MAX_POOL_CONNECTIONS"
]
| [] | ["TEST_AWS_ACCOUNT_ID", "API_ENDPOINT", "MAX_POOL_CONNECTIONS"] | python | 3 | 0 | |
pkg/workflow/controllers/suite_test.go | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package controllers
import (
"context"
"os"
"path/filepath"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"go.uber.org/fx"
"k8s.io/client-go/rest"
"k8s.io/kubectl/pkg/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/types"
"github.com/chaos-mesh/chaos-mesh/controllers/utils/test"
)
var app *fx.App
var kubeClient client.Client
var config *rest.Config
var testEnv *envtest.Environment
var setupLog = ctrl.Log.WithName("setup")
// TestWorkflow runs the integration tests of workflow.
// Before run tests, take a look on ENV KUBEBUILDER_ASSETS, it should be set to <repo-root>/output/bin/kubebuilder/bin
func TestWorkflow(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"workflow suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
By("bootstrapping test environment")
t := true
if os.Getenv("USE_EXISTING_CLUSTER") == "true" {
testEnv = &envtest.Environment{
UseExistingCluster: &t,
}
} else {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")},
}
}
err := v1alpha1.SchemeBuilder.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
config, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(config).ToNot(BeNil())
kubeClient, err = client.New(config, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(kubeClient).ToNot(BeNil())
app = fx.New(
fx.Options(
test.Module,
fx.Supply(config),
types.ChaosObjects,
),
// only startup workflow related
fx.Invoke(BootstrapWorkflowControllers),
)
startCtx, cancel := context.WithTimeout(context.Background(), app.StartTimeout())
defer cancel()
if err := app.Start(startCtx); err != nil {
setupLog.Error(err, "fail to start manager")
}
Expect(err).ToNot(HaveOccurred())
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
stopCtx, cancel := context.WithTimeout(context.Background(), app.StopTimeout())
defer cancel()
if err := app.Stop(stopCtx); err != nil {
setupLog.Error(err, "fail to stop manager")
}
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
| [
"\"USE_EXISTING_CLUSTER\""
]
| []
| [
"USE_EXISTING_CLUSTER"
]
| [] | ["USE_EXISTING_CLUSTER"] | go | 1 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uvu^8-hoq^arnz1!g%7ay#%w_r_u26mp@0ip4p7u1)i*xuy5pi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'users',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.' +
'UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = '/vol/web/static'
MEDIA_ROOT = '/vol/web/media'
AUTH_USER_MODEL = 'core.User'
| []
| []
| [
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
]
| [] | ["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"] | python | 4 | 0 | |
pyfakefs/tests/fake_filesystem_test.py | # -*- coding: utf-8 -*-
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for fake_filesystem module."""
import contextlib
import errno
import os
import stat
import sys
import time
import unittest
from pyfakefs import fake_filesystem
from pyfakefs.fake_filesystem import set_uid, set_gid, is_root, reset_ids
from pyfakefs.helpers import IS_WIN
from pyfakefs.tests.test_utils import DummyTime, TestCase
class FakeDirectoryUnitTest(TestCase):
def setUp(self):
self.orig_time = time.time
time.time = DummyTime(10, 1)
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.fake_file = fake_filesystem.FakeFile(
'foobar', contents='dummy_file', filesystem=self.filesystem)
self.fake_dir = fake_filesystem.FakeDirectory(
'somedir', filesystem=self.filesystem)
def tearDown(self):
time.time = self.orig_time
def test_new_file_and_directory(self):
self.assertTrue(stat.S_IFREG & self.fake_file.st_mode)
self.assertTrue(stat.S_IFDIR & self.fake_dir.st_mode)
self.assertEqual({}, self.fake_dir.contents)
self.assertEqual(10, self.fake_file.st_ctime)
def test_add_entry(self):
self.fake_dir.add_entry(self.fake_file)
self.assertEqual({'foobar': self.fake_file}, self.fake_dir.contents)
def test_get_entry(self):
self.fake_dir.add_entry(self.fake_file)
self.assertEqual(self.fake_file, self.fake_dir.get_entry('foobar'))
def test_path(self):
self.filesystem.root.add_entry(self.fake_dir)
self.fake_dir.add_entry(self.fake_file)
self.assertEqual('/somedir/foobar', self.fake_file.path)
self.assertEqual('/somedir', self.fake_dir.path)
def test_path_with_drive(self):
self.filesystem.is_windows_fs = True
dir_path = 'C:/foo/bar/baz'
self.filesystem.create_dir(dir_path)
dir_object = self.filesystem.get_object(dir_path)
self.assertEqual(dir_path, dir_object.path)
def test_path_after_chdir(self):
dir_path = '/foo/bar/baz'
self.filesystem.create_dir(dir_path)
self.os.chdir(dir_path)
dir_object = self.filesystem.get_object(dir_path)
self.assertEqual(dir_path, dir_object.path)
def test_path_after_chdir_with_drive(self):
self.filesystem.is_windows_fs = True
dir_path = 'C:/foo/bar/baz'
self.filesystem.create_dir(dir_path)
self.os.chdir(dir_path)
dir_object = self.filesystem.get_object(dir_path)
self.assertEqual(dir_path, dir_object.path)
def test_remove_entry(self):
self.fake_dir.add_entry(self.fake_file)
self.assertEqual(self.fake_file, self.fake_dir.get_entry('foobar'))
self.fake_dir.remove_entry('foobar')
with self.assertRaises(KeyError):
self.fake_dir.get_entry('foobar')
def test_should_throw_if_set_size_is_not_integer(self):
def set_size():
self.fake_file.size = 0.1
self.assert_raises_os_error(errno.ENOSPC, set_size)
def test_should_throw_if_set_size_is_negative(self):
def set_size():
self.fake_file.size = -1
self.assert_raises_os_error(errno.ENOSPC, set_size)
def test_produce_empty_file_if_set_size_is_zero(self):
self.fake_file.size = 0
self.assertEqual('', self.fake_file.contents)
def test_sets_content_empty_if_set_size_is_zero(self):
self.fake_file.size = 0
self.assertEqual('', self.fake_file.contents)
def test_truncate_file_if_size_is_smaller_than_current_size(self):
self.fake_file.size = 6
self.assertEqual('dummy_', self.fake_file.contents)
def test_leave_file_unchanged_if_size_is_equal_to_current_size(self):
self.fake_file.size = 10
self.assertEqual('dummy_file', self.fake_file.contents)
def test_set_contents_to_dir_raises(self):
# Regression test for #276
self.filesystem.is_windows_fs = True
self.assert_raises_os_error(
errno.EISDIR, self.fake_dir.set_contents, 'a')
self.filesystem.is_windows_fs = False
self.assert_raises_os_error(
errno.EISDIR, self.fake_dir.set_contents, 'a')
def test_pads_with_nullbytes_if_size_is_greater_than_current_size(self):
self.fake_file.size = 13
self.assertEqual('dummy_file\0\0\0', self.fake_file.contents)
def test_set_m_time(self):
self.assertEqual(10, self.fake_file.st_mtime)
self.fake_file.st_mtime = 13
self.assertEqual(13, self.fake_file.st_mtime)
self.fake_file.st_mtime = 131
self.assertEqual(131, self.fake_file.st_mtime)
def test_file_inode(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
fake_os = fake_filesystem.FakeOsModule(filesystem)
file_path = 'some_file1'
filesystem.create_file(file_path, contents='contents here1')
self.assertLess(0, fake_os.stat(file_path)[stat.ST_INO])
file_obj = filesystem.get_object(file_path)
file_obj.st_ino = 43
self.assertEqual(43, fake_os.stat(file_path)[stat.ST_INO])
def test_directory_inode(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
fake_os = fake_filesystem.FakeOsModule(filesystem)
dirpath = 'testdir'
filesystem.create_dir(dirpath)
self.assertLess(0, fake_os.stat(dirpath)[stat.ST_INO])
dir_obj = filesystem.get_object(dirpath)
dir_obj.st_ino = 43
self.assertEqual(43, fake_os.stat(dirpath)[stat.ST_INO])
def test_ordered_dirs(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
filesystem.create_dir('/foo')
filesystem.create_file('/foo/2')
filesystem.create_file('/foo/4')
filesystem.create_file('/foo/1')
filesystem.create_file('/foo/3')
fake_dir = filesystem.get_object('/foo')
self.assertEqual(['2', '4', '1', '3'], fake_dir.ordered_dirs)
class SetLargeFileSizeTest(TestCase):
def setUp(self):
filesystem = fake_filesystem.FakeFilesystem()
self.fake_file = fake_filesystem.FakeFile('foobar',
filesystem=filesystem)
def test_should_throw_if_size_is_not_integer(self):
self.assert_raises_os_error(errno.ENOSPC,
self.fake_file.set_large_file_size, 0.1)
def test_should_throw_if_size_is_negative(self):
self.assert_raises_os_error(errno.ENOSPC,
self.fake_file.set_large_file_size, -1)
def test_sets_content_none_if_size_is_non_negative_integer(self):
self.fake_file.set_large_file_size(1000000000)
self.assertEqual(None, self.fake_file.contents)
self.assertEqual(1000000000, self.fake_file.st_size)
class NormalizePathTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = '/'
def test_empty_path_should_get_normalized_to_root_path(self):
self.assertEqual(self.root_name, self.filesystem.absnormpath(''))
def test_root_path_remains_unchanged(self):
self.assertEqual(self.root_name,
self.filesystem.absnormpath(self.root_name))
def test_relative_path_forced_to_cwd(self):
path = 'bar'
self.filesystem.cwd = '/foo'
self.assertEqual('/foo/bar', self.filesystem.absnormpath(path))
def test_absolute_path_remains_unchanged(self):
path = '/foo/bar'
self.assertEqual(path, self.filesystem.absnormpath(path))
def test_dotted_path_is_normalized(self):
path = '/foo/..'
self.assertEqual('/', self.filesystem.absnormpath(path))
path = 'foo/../bar'
self.assertEqual('/bar', self.filesystem.absnormpath(path))
def test_dot_path_is_normalized(self):
path = '.'
self.assertEqual('/', self.filesystem.absnormpath(path))
class GetPathComponentsTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = '/'
def test_root_path_should_return_empty_list(self):
self.assertEqual([], self.filesystem._path_components(self.root_name))
def test_empty_path_should_return_empty_list(self):
self.assertEqual([], self.filesystem._path_components(''))
def test_relative_path_with_one_component_should_return_component(self):
self.assertEqual(['foo'], self.filesystem._path_components('foo'))
def test_absolute_path_with_one_component_should_return_component(self):
self.assertEqual(['foo'], self.filesystem._path_components('/foo'))
def test_two_level_relative_path_should_return_components(self):
self.assertEqual(['foo', 'bar'],
self.filesystem._path_components('foo/bar'))
def test_two_level_absolute_path_should_return_components(self):
self.assertEqual(['foo', 'bar'],
self.filesystem._path_components('/foo/bar'))
class FakeFilesystemUnitTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = '/'
self.fake_file = fake_filesystem.FakeFile(
'foobar', filesystem=self.filesystem)
self.fake_child = fake_filesystem.FakeDirectory(
'foobaz', filesystem=self.filesystem)
self.fake_grandchild = fake_filesystem.FakeDirectory(
'quux', filesystem=self.filesystem)
def test_new_filesystem(self):
self.assertEqual('/', self.filesystem.path_separator)
self.assertTrue(stat.S_IFDIR & self.filesystem.root.st_mode)
self.assertEqual(self.root_name, self.filesystem.root.name)
self.assertEqual({}, self.filesystem.root.contents)
def test_none_raises_type_error(self):
with self.assertRaises(TypeError):
self.filesystem.exists(None)
def test_empty_string_does_not_exist(self):
self.assertFalse(self.filesystem.exists(''))
def test_exists_root(self):
self.assertTrue(self.filesystem.exists(self.root_name))
def test_exists_unadded_file(self):
self.assertFalse(self.filesystem.exists(self.fake_file.name))
def test_not_exists_subpath_named_like_file_contents(self):
# Regression test for #219
file_path = "/foo/bar"
self.filesystem.create_file(file_path, contents='baz')
self.assertFalse(self.filesystem.exists(file_path + "/baz"))
def test_get_root_object(self):
self.assertEqual(self.filesystem.root,
self.filesystem.get_object(self.root_name))
def test_add_object_to_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual({'foobar': self.fake_file},
self.filesystem.root.contents)
def test_exists_added_file(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertTrue(self.filesystem.exists(self.fake_file.name))
def test_exists_relative_path_posix(self):
self.filesystem.is_windows_fs = False
self.filesystem.create_file('/a/b/file_one')
self.filesystem.create_file('/a/c/file_two')
self.assertTrue(self.filesystem.exists('a/b/../c/file_two'))
self.assertTrue(self.filesystem.exists('/a/c/../b/file_one'))
self.assertTrue(self.filesystem.exists('/a/c/../../a/b/file_one'))
self.assertFalse(self.filesystem.exists('a/b/../z/d'))
self.assertFalse(self.filesystem.exists('a/b/../z/../c/file_two'))
self.filesystem.cwd = '/a/c'
self.assertTrue(self.filesystem.exists('../b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/../../a/c/file_two'))
self.assertFalse(self.filesystem.exists('../z/file_one'))
self.assertFalse(self.filesystem.exists('../z/../c/file_two'))
def test_exists_relative_path_windows(self):
self.filesystem.is_windows_fs = True
self.filesystem.is_macos = False
self.filesystem.create_file('/a/b/file_one')
self.filesystem.create_file('/a/c/file_two')
self.assertTrue(self.filesystem.exists('a/b/../c/file_two'))
self.assertTrue(self.filesystem.exists('/a/c/../b/file_one'))
self.assertTrue(self.filesystem.exists('/a/c/../../a/b/file_one'))
self.assertFalse(self.filesystem.exists('a/b/../z/d'))
self.assertTrue(self.filesystem.exists('a/b/../z/../c/file_two'))
self.filesystem.cwd = '/a/c'
self.assertTrue(self.filesystem.exists('../b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/../../a/c/file_two'))
self.assertFalse(self.filesystem.exists('../z/file_one'))
self.assertTrue(self.filesystem.exists('../z/../c/file_two'))
def test_get_object_from_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.get_object('foobar'))
def test_get_nonexistent_object_from_root_error(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.get_object('foobar'))
self.assert_raises_os_error(
errno.ENOENT, self.filesystem.get_object, 'some_bogus_filename')
def test_remove_object_from_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.filesystem.remove_object(self.fake_file.name)
self.assert_raises_os_error(
errno.ENOENT, self.filesystem.get_object, self.fake_file.name)
def test_remove_nonexisten_object_from_root_error(self):
self.assert_raises_os_error(
errno.ENOENT, self.filesystem.remove_object, 'some_bogus_filename')
def test_exists_removed_file(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.filesystem.remove_object(self.fake_file.name)
self.assertFalse(self.filesystem.exists(self.fake_file.name))
def test_add_object_to_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assertEqual(
{self.fake_file.name: self.fake_file},
self.filesystem.root.get_entry(self.fake_child.name).contents)
def test_add_object_to_regular_file_error_posix(self):
self.filesystem.is_windows_fs = False
self.filesystem.add_object(self.root_name, self.fake_file)
self.assert_raises_os_error(errno.ENOTDIR,
self.filesystem.add_object,
self.fake_file.name, self.fake_file)
def test_add_object_to_regular_file_error_windows(self):
self.filesystem.is_windows_fs = True
self.filesystem.add_object(self.root_name, self.fake_file)
self.assert_raises_os_error(errno.ENOENT,
self.filesystem.add_object,
self.fake_file.name, self.fake_file)
def test_exists_file_added_to_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
path = self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)
self.assertTrue(self.filesystem.exists(path))
def test_get_object_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assertEqual(self.fake_file,
self.filesystem.get_object(
self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)))
def test_get_nonexistent_object_from_child_error(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assert_raises_os_error(errno.ENOENT, self.filesystem.get_object,
self.filesystem.joinpaths(
self.fake_child.name,
'some_bogus_filename'))
def test_remove_object_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
target_path = self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)
self.filesystem.remove_object(target_path)
self.assert_raises_os_error(errno.ENOENT, self.filesystem.get_object,
target_path)
def test_remove_object_from_child_error(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.assert_raises_os_error(
errno.ENOENT, self.filesystem.remove_object,
self.filesystem.joinpaths(self.fake_child.name,
'some_bogus_filename'))
def test_remove_object_from_non_directory_error(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assert_raises_os_error(
errno.ENOTDIR, self.filesystem.remove_object,
self.filesystem.joinpaths(
'%s' % self.fake_file.name,
'file_does_not_matter_since_parent_not_a_directory'))
def test_exists_file_removed_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
path = self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)
self.filesystem.remove_object(path)
self.assertFalse(self.filesystem.exists(path))
def test_operate_on_grandchild_directory(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_grandchild)
grandchild_directory = self.filesystem.joinpaths(
self.fake_child.name, self.fake_grandchild.name)
grandchild_file = self.filesystem.joinpaths(
grandchild_directory, self.fake_file.name)
with self.assertRaises(OSError):
self.filesystem.get_object(grandchild_file)
self.filesystem.add_object(grandchild_directory, self.fake_file)
self.assertEqual(self.fake_file,
self.filesystem.get_object(grandchild_file))
self.assertTrue(self.filesystem.exists(grandchild_file))
self.filesystem.remove_object(grandchild_file)
with self.assertRaises(OSError):
self.filesystem.get_object(grandchild_file)
self.assertFalse(self.filesystem.exists(grandchild_file))
def test_create_directory_in_root_directory(self):
path = 'foo'
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
def test_create_directory_in_root_directory_already_exists_error(self):
path = 'foo'
self.filesystem.create_dir(path)
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_dir, path)
def test_create_directory(self):
path = 'foo/bar/baz'
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
# Create second directory to make sure first is OK.
path = '%s/quux' % path
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
def test_create_directory_already_exists_error(self):
path = 'foo/bar/baz'
self.filesystem.create_dir(path)
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_dir, path)
def test_create_file_in_read_only_directory_raises_in_posix(self):
self.filesystem.is_windows_fs = False
dir_path = '/foo/bar'
self.filesystem.create_dir(dir_path, perm_bits=0o555)
file_path = dir_path + '/baz'
if not is_root():
self.assert_raises_os_error(errno.EACCES,
self.filesystem.create_file,
file_path)
else:
self.filesystem.create_file(file_path)
self.assertTrue(self.filesystem.exists(file_path))
def test_create_file_in_read_only_directory_possible_in_windows(self):
self.filesystem.is_windows_fs = True
dir_path = 'C:/foo/bar'
self.filesystem.create_dir(dir_path, perm_bits=0o555)
file_path = dir_path + '/baz'
self.filesystem.create_file(file_path)
self.assertTrue(self.filesystem.exists(file_path))
def test_create_file_in_current_directory(self):
path = 'foo'
contents = 'dummy data'
self.filesystem.create_file(path, contents=contents)
self.assertTrue(self.filesystem.exists(path))
self.assertFalse(self.filesystem.exists(os.path.dirname(path)))
path = './%s' % path
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
def test_create_file_in_root_directory(self):
path = '/foo'
contents = 'dummy data'
self.filesystem.create_file(path, contents=contents)
new_file = self.filesystem.get_object(path)
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
self.assertEqual(os.path.basename(path), new_file.name)
self.assertTrue(stat.S_IFREG & new_file.st_mode)
self.assertEqual(contents, new_file.contents)
def test_create_file_with_size_but_no_content_creates_large_file(self):
path = 'large_foo_bar'
self.filesystem.create_file(path, st_size=100000000)
new_file = self.filesystem.get_object(path)
self.assertEqual(None, new_file.contents)
self.assertEqual(100000000, new_file.st_size)
def test_create_file_in_root_directory_already_exists_error(self):
path = 'foo'
self.filesystem.create_file(path)
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_file, path)
def test_create_file(self):
path = 'foo/bar/baz'
retval = self.filesystem.create_file(path, contents='dummy_data')
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
new_file = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_file.name)
if IS_WIN:
self.assertEqual(1, new_file.st_uid)
self.assertEqual(1, new_file.st_gid)
else:
self.assertEqual(os.getuid(), new_file.st_uid)
self.assertEqual(os.getgid(), new_file.st_gid)
self.assertEqual(new_file, retval)
def test_create_file_with_changed_ids(self):
path = 'foo/bar/baz'
set_uid(42)
set_gid(2)
self.filesystem.create_file(path)
self.assertTrue(self.filesystem.exists(path))
new_file = self.filesystem.get_object(path)
self.assertEqual(42, new_file.st_uid)
self.assertEqual(2, new_file.st_gid)
reset_ids()
def test_empty_file_created_for_none_contents(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
path = 'foo/bar/baz'
self.filesystem.create_file(path, contents=None)
with fake_open(path) as f:
self.assertEqual('', f.read())
def test_create_file_with_incorrect_mode_type(self):
with self.assertRaises(TypeError):
self.filesystem.create_file('foo', 'bar')
def test_create_file_already_exists_error(self):
path = 'foo/bar/baz'
self.filesystem.create_file(path, contents='dummy_data')
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_file, path)
def test_create_link(self):
path = 'foo/bar/baz'
target_path = 'foo/bar/quux'
new_file = self.filesystem.create_symlink(path, 'quux')
# Neither the path nor the final target exists before we actually
# write to one of them, even though the link appears in the file
# system.
self.assertFalse(self.filesystem.exists(path))
self.assertFalse(self.filesystem.exists(target_path))
self.assertTrue(stat.S_IFLNK & new_file.st_mode)
# but once we write the linked to file, they both will exist.
self.filesystem.create_file(target_path)
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(target_path))
def test_resolve_object(self):
target_path = 'dir/target'
target_contents = '0123456789ABCDEF'
link_name = 'x'
self.filesystem.create_dir('dir')
self.filesystem.create_file('dir/target', contents=target_contents)
self.filesystem.create_symlink(link_name, target_path)
obj = self.filesystem.resolve(link_name)
self.assertEqual('target', obj.name)
self.assertEqual(target_contents, obj.contents)
def check_lresolve_object(self):
target_path = 'dir/target'
target_contents = '0123456789ABCDEF'
link_name = 'x'
self.filesystem.create_dir('dir')
self.filesystem.create_file('dir/target', contents=target_contents)
self.filesystem.create_symlink(link_name, target_path)
obj = self.filesystem.lresolve(link_name)
self.assertEqual(link_name, obj.name)
self.assertEqual(target_path, obj.contents)
def test_lresolve_object_windows(self):
self.filesystem.is_windows_fs = True
self.check_lresolve_object()
def test_lresolve_object_posix(self):
self.filesystem.is_windows_fs = False
self.check_lresolve_object()
def check_directory_access_on_file(self, error_subtype):
self.filesystem.create_file('not_a_dir')
self.assert_raises_os_error(
error_subtype, self.filesystem.resolve, 'not_a_dir/foo')
self.assert_raises_os_error(
error_subtype, self.filesystem.lresolve, 'not_a_dir/foo/bar')
def test_directory_access_on_file_windows(self):
self.filesystem.is_windows_fs = True
self.check_directory_access_on_file(errno.ENOENT)
def test_directory_access_on_file_posix(self):
self.filesystem.is_windows_fs = False
self.check_directory_access_on_file(errno.ENOTDIR)
def test_pickle_fs(self):
"""Regression test for #445"""
import pickle
self.filesystem.open_files = []
p = pickle.dumps(self.filesystem)
fs = pickle.loads(p)
self.assertEqual(str(fs.root), str(self.filesystem.root))
self.assertEqual(fs.mount_points, self.filesystem.mount_points)
class CaseInsensitiveFakeFilesystemTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.filesystem.is_case_sensitive = False
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.path = self.os.path
def test_get_object(self):
self.filesystem.create_dir('/foo/bar')
self.filesystem.create_file('/foo/bar/baz')
self.assertTrue(self.filesystem.get_object('/Foo/Bar/Baz'))
def test_remove_object(self):
self.filesystem.create_dir('/foo/bar')
self.filesystem.create_file('/foo/bar/baz')
self.filesystem.remove_object('/Foo/Bar/Baz')
self.assertFalse(self.filesystem.exists('/foo/bar/baz'))
def test_exists(self):
self.filesystem.create_dir('/Foo/Bar')
self.assertTrue(self.filesystem.exists('/Foo/Bar'))
self.assertTrue(self.filesystem.exists('/foo/bar'))
self.filesystem.create_file('/foo/Bar/baz')
self.assertTrue(self.filesystem.exists('/Foo/bar/BAZ'))
self.assertTrue(self.filesystem.exists('/foo/bar/baz'))
def test_create_directory_with_different_case_root(self):
self.filesystem.create_dir('/Foo/Bar')
self.filesystem.create_dir('/foo/bar/baz')
dir1 = self.filesystem.get_object('/Foo/Bar')
dir2 = self.filesystem.get_object('/foo/bar')
self.assertEqual(dir1, dir2)
def test_create_file_with_different_case_dir(self):
self.filesystem.create_dir('/Foo/Bar')
self.filesystem.create_file('/foo/bar/baz')
dir1 = self.filesystem.get_object('/Foo/Bar')
dir2 = self.filesystem.get_object('/foo/bar')
self.assertEqual(dir1, dir2)
def test_resolve_path(self):
self.filesystem.create_dir('/foo/baz')
self.filesystem.create_symlink('/Foo/Bar', './baz/bip')
self.assertEqual('/foo/baz/bip',
self.filesystem.resolve_path('/foo/bar'))
def test_isdir_isfile(self):
self.filesystem.create_file('foo/bar')
self.assertTrue(self.path.isdir('Foo'))
self.assertFalse(self.path.isfile('Foo'))
self.assertTrue(self.path.isfile('Foo/Bar'))
self.assertFalse(self.path.isdir('Foo/Bar'))
def test_getsize(self):
file_path = 'foo/bar/baz'
self.filesystem.create_file(file_path, contents='1234567')
self.assertEqual(7, self.path.getsize('FOO/BAR/BAZ'))
def test_getsize_with_looping_symlink(self):
self.filesystem.is_windows_fs = False
dir_path = '/foo/bar'
self.filesystem.create_dir(dir_path)
link_path = dir_path + "/link"
link_target = link_path + "/link"
self.os.symlink(link_target, link_path)
self.assert_raises_os_error(
errno.ELOOP, self.os.path.getsize, link_path)
def test_get_mtime(self):
test_file = self.filesystem.create_file('foo/bar1.txt')
test_file.st_mtime = 24
self.assertEqual(24, self.path.getmtime('Foo/Bar1.TXT'))
def test_get_object_with_file_size(self):
self.filesystem.create_file('/Foo/Bar', st_size=10)
self.assertTrue(self.filesystem.get_object('/foo/bar'))
class CaseSensitiveFakeFilesystemTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.filesystem.is_case_sensitive = True
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.path = self.os.path
def test_get_object(self):
self.filesystem.create_dir('/foo/bar')
self.filesystem.create_file('/foo/bar/baz')
with self.assertRaises(OSError):
self.filesystem.get_object('/Foo/Bar/Baz')
def test_remove_object(self):
self.filesystem.create_dir('/foo/bar')
self.filesystem.create_file('/foo/bar/baz')
with self.assertRaises(OSError):
self.filesystem.remove_object('/Foo/Bar/Baz')
self.assertTrue(self.filesystem.exists('/foo/bar/baz'))
def test_exists(self):
self.filesystem.create_dir('/Foo/Bar')
self.assertTrue(self.filesystem.exists('/Foo/Bar'))
self.assertFalse(self.filesystem.exists('/foo/bar'))
self.filesystem.create_file('/foo/Bar/baz')
self.assertFalse(self.filesystem.exists('/Foo/bar/BAZ'))
self.assertFalse(self.filesystem.exists('/foo/bar/baz'))
def test_create_directory_with_different_case_root(self):
self.filesystem.create_dir('/Foo/Bar')
self.filesystem.create_dir('/foo/bar/baz')
dir1 = self.filesystem.get_object('/Foo/Bar')
dir2 = self.filesystem.get_object('/foo/bar')
self.assertNotEqual(dir1, dir2)
def test_create_file_with_different_case_dir(self):
self.filesystem.create_dir('/Foo/Bar')
self.filesystem.create_file('/foo/bar/baz')
dir1 = self.filesystem.get_object('/Foo/Bar')
dir2 = self.filesystem.get_object('/foo/bar')
self.assertNotEqual(dir1, dir2)
def test_isdir_isfile(self):
self.filesystem.create_file('foo/bar')
self.assertFalse(self.path.isdir('Foo'))
self.assertFalse(self.path.isfile('Foo'))
self.assertFalse(self.path.isfile('Foo/Bar'))
self.assertFalse(self.path.isdir('Foo/Bar'))
def test_getsize(self):
file_path = 'foo/bar/baz'
self.filesystem.create_file(file_path, contents='1234567')
with self.assertRaises(os.error):
self.path.getsize('FOO/BAR/BAZ')
def test_get_mtime(self):
test_file = self.filesystem.create_file('foo/bar1.txt')
test_file.st_mtime = 24
self.assert_raises_os_error(
errno.ENOENT, self.path.getmtime, 'Foo/Bar1.TXT')
class OsPathInjectionRegressionTest(TestCase):
"""Test faking os.path before calling os.walk.
Found when investigating a problem with
gws/tools/labrat/rat_utils_unittest, which was faking out os.path
before calling os.walk.
"""
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.os_path = os.path
# The bug was that when os.path gets faked, the FakePathModule doesn't
# get called in self.os.walk(). FakePathModule now insists that it is
# created as part of FakeOsModule.
self.os = fake_filesystem.FakeOsModule(self.filesystem)
def tearDown(self):
os.path = self.os_path
def test_create_top_level_directory(self):
top_level_dir = '/x'
self.assertFalse(self.filesystem.exists(top_level_dir))
self.filesystem.create_dir(top_level_dir)
self.assertTrue(self.filesystem.exists('/'))
self.assertTrue(self.filesystem.exists(top_level_dir))
self.filesystem.create_dir('%s/po' % top_level_dir)
self.filesystem.create_file('%s/po/control' % top_level_dir)
self.filesystem.create_file('%s/po/experiment' % top_level_dir)
self.filesystem.create_dir('%s/gv' % top_level_dir)
self.filesystem.create_file('%s/gv/control' % top_level_dir)
expected = [
('/', ['x'], []),
('/x', ['gv', 'po'], []),
('/x/gv', [], ['control']),
('/x/po', [], ['control', 'experiment']),
]
# as the result is unsorted, we have to check against sorted results
result = sorted([step for step in self.os.walk('/')],
key=lambda l: l[0])
self.assertEqual(len(expected), len(result))
for entry, expected_entry in zip(result, expected):
self.assertEqual(expected_entry[0], entry[0])
self.assertEqual(expected_entry[1], sorted(entry[1]))
self.assertEqual(expected_entry[2], sorted(entry[2]))
class FakePathModuleTest(TestCase):
def setUp(self):
self.orig_time = time.time
time.time = DummyTime(10, 1)
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.path = self.os.path
def tearDown(self):
time.time = self.orig_time
def check_abspath(self, is_windows):
# the implementation differs in Windows and Posix, so test both
self.filesystem.is_windows_fs = is_windows
filename = u'foo'
abspath = u'!%s' % filename
self.filesystem.create_file(abspath)
self.assertEqual(abspath, self.path.abspath(abspath))
self.assertEqual(abspath, self.path.abspath(filename))
self.assertEqual(abspath, self.path.abspath(u'..!%s' % filename))
def test_abspath_windows(self):
self.check_abspath(is_windows=True)
def test_abspath_posix(self):
"""abspath should return a consistent representation of a file."""
self.check_abspath(is_windows=False)
def check_abspath_bytes(self, is_windows):
"""abspath should return a consistent representation of a file."""
self.filesystem.is_windows_fs = is_windows
filename = b'foo'
abspath = b'!' + filename
self.filesystem.create_file(abspath)
self.assertEqual(abspath, self.path.abspath(abspath))
self.assertEqual(abspath, self.path.abspath(filename))
self.assertEqual(abspath, self.path.abspath(b'..!' + filename))
def test_abspath_bytes_windows(self):
self.check_abspath_bytes(is_windows=True)
def test_abspath_bytes_posix(self):
self.check_abspath_bytes(is_windows=False)
def test_abspath_deals_with_relative_non_root_path(self):
"""abspath should correctly handle relative paths from a
non-! directory.
This test is distinct from the basic functionality test because
fake_filesystem has historically been based in !.
"""
filename = '!foo!bar!baz'
file_components = filename.split(self.path.sep)
basedir = '!%s' % (file_components[0],)
self.filesystem.create_file(filename)
self.os.chdir(basedir)
self.assertEqual(basedir, self.path.abspath(self.path.curdir))
self.assertEqual('!', self.path.abspath('..'))
self.assertEqual(self.path.join(basedir, file_components[1]),
self.path.abspath(file_components[1]))
def test_abs_path_with_drive_component(self):
self.filesystem.is_windows_fs = True
self.filesystem.cwd = 'C:!foo'
self.assertEqual('C:!foo!bar', self.path.abspath('bar'))
self.assertEqual('C:!foo!bar', self.path.abspath('C:bar'))
self.assertEqual('C:!foo!bar', self.path.abspath('!foo!bar'))
def test_isabs_with_drive_component(self):
self.filesystem.is_windows_fs = False
self.assertFalse(self.path.isabs('C:!foo'))
self.assertTrue(self.path.isabs('!'))
self.filesystem.is_windows_fs = True
self.assertTrue(self.path.isabs('C:!foo'))
self.assertTrue(self.path.isabs('!'))
def test_relpath(self):
path_foo = '!path!to!foo'
path_bar = '!path!to!bar'
path_other = '!some!where!else'
with self.assertRaises(ValueError):
self.path.relpath(None)
with self.assertRaises(ValueError):
self.path.relpath('')
self.assertEqual('path!to!foo', self.path.relpath(path_foo))
self.assertEqual('..!foo',
self.path.relpath(path_foo, path_bar))
self.assertEqual('..!..!..%s' % path_other,
self.path.relpath(path_other, path_bar))
self.assertEqual('.',
self.path.relpath(path_bar, path_bar))
def test_realpath_vs_abspath(self):
self.filesystem.is_windows_fs = False
self.filesystem.create_file('!george!washington!bridge')
self.filesystem.create_symlink('!first!president',
'!george!washington')
self.assertEqual('!first!president!bridge',
self.os.path.abspath('!first!president!bridge'))
self.assertEqual('!george!washington!bridge',
self.os.path.realpath('!first!president!bridge'))
self.os.chdir('!first!president')
self.assertEqual('!george!washington!bridge',
self.os.path.realpath('bridge'))
def test_samefile(self):
file_path1 = '!foo!bar!baz'
file_path2 = '!foo!bar!boo'
self.filesystem.create_file(file_path1)
self.filesystem.create_file(file_path2)
self.assertTrue(self.path.samefile(file_path1, file_path1))
self.assertFalse(self.path.samefile(file_path1, file_path2))
self.assertTrue(
self.path.samefile(file_path1, '!foo!..!foo!bar!..!bar!baz'))
def test_exists(self):
file_path = 'foo!bar!baz'
self.filesystem.create_file(file_path)
self.assertTrue(self.path.exists(file_path))
self.assertFalse(self.path.exists('!some!other!bogus!path'))
def test_lexists(self):
file_path = 'foo!bar!baz'
self.filesystem.create_dir('foo!bar')
self.filesystem.create_symlink(file_path, 'bogus')
self.assertTrue(self.path.lexists(file_path))
self.assertFalse(self.path.exists(file_path))
self.filesystem.create_file('foo!bar!bogus')
self.assertTrue(self.path.exists(file_path))
def test_dirname_with_drive(self):
self.filesystem.is_windows_fs = True
self.assertEqual(u'c:!foo',
self.path.dirname(u'c:!foo!bar'))
self.assertEqual(b'c:!',
self.path.dirname(b'c:!foo'))
self.assertEqual(u'!foo',
self.path.dirname(u'!foo!bar'))
self.assertEqual(b'!',
self.path.dirname(b'!foo'))
self.assertEqual(u'c:foo',
self.path.dirname(u'c:foo!bar'))
self.assertEqual(b'c:',
self.path.dirname(b'c:foo'))
self.assertEqual(u'foo',
self.path.dirname(u'foo!bar'))
def test_dirname(self):
dirname = 'foo!bar'
self.assertEqual(dirname, self.path.dirname('%s!baz' % dirname))
def test_join_strings(self):
components = [u'foo', u'bar', u'baz']
self.assertEqual(u'foo!bar!baz', self.path.join(*components))
def test_join_bytes(self):
components = [b'foo', b'bar', b'baz']
self.assertEqual(b'foo!bar!baz', self.path.join(*components))
def test_expand_user(self):
if self.is_windows:
self.assertEqual(self.path.expanduser('~'),
self.os.environ['USERPROFILE'].replace('\\', '!'))
else:
self.assertEqual(self.path.expanduser('~'),
self.os.environ['HOME'].replace('/', '!'))
@unittest.skipIf(TestCase.is_windows or TestCase.is_cygwin,
'only tested on unix systems')
def test_expand_root(self):
if sys.platform == 'darwin':
roothome = '!var!root'
else:
roothome = '!root'
self.assertEqual(self.path.expanduser('~root'), roothome)
def test_getsize_path_nonexistent(self):
file_path = 'foo!bar!baz'
with self.assertRaises(os.error):
self.path.getsize(file_path)
def test_getsize_file_empty(self):
file_path = 'foo!bar!baz'
self.filesystem.create_file(file_path)
self.assertEqual(0, self.path.getsize(file_path))
def test_getsize_file_non_zero_size(self):
file_path = 'foo!bar!baz'
self.filesystem.create_file(file_path, contents='1234567')
self.assertEqual(7, self.path.getsize(file_path))
def test_getsize_dir_empty(self):
# For directories, only require that the size is non-negative.
dir_path = 'foo!bar'
self.filesystem.create_dir(dir_path)
size = self.path.getsize(dir_path)
self.assertFalse(int(size) < 0,
'expected non-negative size; actual: %s' % size)
def test_getsize_dir_non_zero_size(self):
# For directories, only require that the size is non-negative.
dir_path = 'foo!bar'
self.filesystem.create_file(self.filesystem.joinpaths(dir_path, 'baz'))
size = self.path.getsize(dir_path)
self.assertFalse(int(size) < 0,
'expected non-negative size; actual: %s' % size)
def test_isdir(self):
self.filesystem.create_file('foo!bar')
self.assertTrue(self.path.isdir('foo'))
self.assertFalse(self.path.isdir('foo!bar'))
self.assertFalse(self.path.isdir('it_dont_exist'))
def test_isdir_with_cwd_change(self):
self.filesystem.create_file('!foo!bar!baz')
self.assertTrue(self.path.isdir('!foo'))
self.assertTrue(self.path.isdir('!foo!bar'))
self.assertTrue(self.path.isdir('foo'))
self.assertTrue(self.path.isdir('foo!bar'))
self.filesystem.cwd = '!foo'
self.assertTrue(self.path.isdir('!foo'))
self.assertTrue(self.path.isdir('!foo!bar'))
self.assertTrue(self.path.isdir('bar'))
def test_isfile(self):
self.filesystem.create_file('foo!bar')
self.assertFalse(self.path.isfile('foo'))
self.assertTrue(self.path.isfile('foo!bar'))
self.assertFalse(self.path.isfile('it_dont_exist'))
def test_get_mtime(self):
test_file = self.filesystem.create_file('foo!bar1.txt')
time.time.start()
self.assertEqual(10, test_file.st_mtime)
test_file.st_mtime = 24
self.assertEqual(24, self.path.getmtime('foo!bar1.txt'))
def test_get_mtime_raises_os_error(self):
self.assertFalse(self.path.exists('it_dont_exist'))
self.assert_raises_os_error(errno.ENOENT, self.path.getmtime,
'it_dont_exist')
def test_islink(self):
self.filesystem.create_dir('foo')
self.filesystem.create_file('foo!regular_file')
self.filesystem.create_symlink('foo!link_to_file', 'regular_file')
self.assertFalse(self.path.islink('foo'))
# An object can be both a link and a file or file, according to the
# comments in Python/Lib/posixpath.py.
self.assertTrue(self.path.islink('foo!link_to_file'))
self.assertTrue(self.path.isfile('foo!link_to_file'))
self.assertTrue(self.path.isfile('foo!regular_file'))
self.assertFalse(self.path.islink('foo!regular_file'))
self.assertFalse(self.path.islink('it_dont_exist'))
def test_is_link_case_sensitive(self):
# Regression test for #306
self.filesystem.is_case_sensitive = False
self.filesystem.create_dir('foo')
self.filesystem.create_symlink('foo!bar', 'foo')
self.assertTrue(self.path.islink('foo!Bar'))
def test_ismount(self):
self.assertFalse(self.path.ismount(''))
self.assertTrue(self.path.ismount('!'))
self.assertFalse(self.path.ismount('!mount!'))
self.filesystem.add_mount_point('!mount')
self.assertTrue(self.path.ismount('!mount'))
self.assertTrue(self.path.ismount('!mount!'))
def test_ismount_with_drive_letters(self):
self.filesystem.is_windows_fs = True
self.assertTrue(self.path.ismount('!'))
self.assertTrue(self.path.ismount('c:!'))
self.assertFalse(self.path.ismount('c:'))
self.assertTrue(self.path.ismount('z:!'))
self.filesystem.add_mount_point('!mount')
self.assertTrue(self.path.ismount('!mount'))
self.assertTrue(self.path.ismount('!mount!'))
def test_ismount_with_unc_paths(self):
self.filesystem.is_windows_fs = True
self.assertTrue(self.path.ismount('!!a!'))
self.assertTrue(self.path.ismount('!!a!b'))
self.assertTrue(self.path.ismount('!!a!b!'))
self.assertFalse(self.path.ismount('!a!b!'))
self.assertFalse(self.path.ismount('!!a!b!c'))
def test_ismount_with_alternate_path_separator(self):
self.filesystem.alternative_path_separator = '!'
self.filesystem.add_mount_point('!mount')
self.assertTrue(self.path.ismount('!mount'))
self.assertTrue(self.path.ismount('!mount!'))
self.assertTrue(self.path.ismount('!mount!!'))
self.filesystem.is_windows_fs = True
self.assertTrue(self.path.ismount('Z:!'))
def test_getattr_forward_to_real_os_path(self):
"""Forwards any non-faked calls to os.path."""
self.assertTrue(hasattr(self.path, 'sep'),
'Get a faked os.path function')
private_path_function = None
if sys.version_info < (3, 6):
if self.is_windows:
private_path_function = '_get_bothseps'
else:
private_path_function = '_joinrealpath'
if private_path_function:
self.assertTrue(hasattr(self.path, private_path_function),
'Get a real os.path function '
'not implemented in fake os.path')
self.assertFalse(hasattr(self.path, 'nonexistent'))
class PathManipulationTestBase(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='|')
class CollapsePathPipeSeparatorTest(PathManipulationTestBase):
"""Tests CollapsePath (mimics os.path.normpath) using |
as path separator."""
def test_empty_path_becomes_dot_path(self):
self.assertEqual('.', self.filesystem.normpath(''))
def test_dot_path_unchanged(self):
self.assertEqual('.', self.filesystem.normpath('.'))
def test_slashes_are_not_collapsed(self):
"""Tests that '/' is not treated specially if the
path separator is '|'.
In particular, multiple slashes should not be collapsed.
"""
self.assertEqual('/', self.filesystem.normpath('/'))
self.assertEqual('/////', self.filesystem.normpath('/////'))
def test_root_path(self):
self.assertEqual('|', self.filesystem.normpath('|'))
def test_multiple_separators_collapsed_into_root_path(self):
self.assertEqual('|', self.filesystem.normpath('|||||'))
def test_all_dot_paths_removed_but_one(self):
self.assertEqual('.', self.filesystem.normpath('.|.|.|.'))
def test_all_dot_paths_removed_if_another_path_component_exists(self):
self.assertEqual('|', self.filesystem.normpath('|.|.|.|'))
self.assertEqual('foo|bar', self.filesystem.normpath('foo|.|.|.|bar'))
def test_ignores_up_level_references_starting_from_root(self):
self.assertEqual('|', self.filesystem.normpath('|..|..|..|'))
self.assertEqual(
'|', self.filesystem.normpath('|..|..|foo|bar|..|..|'))
self.filesystem.is_windows_fs = False # not an UNC path
self.assertEqual('|', self.filesystem.normpath('||..|.|..||'))
def test_conserves_up_level_references_starting_from_current_dir(self):
self.assertEqual(
'..|..', self.filesystem.normpath('..|foo|bar|..|..|..'))
def test_combine_dot_and_up_level_references_in_absolute_path(self):
self.assertEqual(
'|yes', self.filesystem.normpath('|||||.|..|||yes|no|..|.|||'))
def test_dots_in_path_collapses_to_last_path(self):
self.assertEqual(
'bar', self.filesystem.normpath('foo|..|bar'))
self.assertEqual(
'bar', self.filesystem.normpath('foo|..|yes|..|no|..|bar'))
class SplitPathTest(PathManipulationTestBase):
"""Tests SplitPath (which mimics os.path.split)
using | as path separator."""
def test_empty_path(self):
self.assertEqual(('', ''), self.filesystem.splitpath(''))
def test_no_separators(self):
self.assertEqual(('', 'ab'), self.filesystem.splitpath('ab'))
def test_slashes_do_not_split(self):
"""Tests that '/' is not treated specially if the
path separator is '|'."""
self.assertEqual(('', 'a/b'), self.filesystem.splitpath('a/b'))
def test_eliminate_trailing_separators_from_head(self):
self.assertEqual(('a', 'b'), self.filesystem.splitpath('a|b'))
self.assertEqual(('a', 'b'), self.filesystem.splitpath('a|||b'))
self.assertEqual(('|a', 'b'), self.filesystem.splitpath('|a||b'))
self.assertEqual(('a|b', 'c'), self.filesystem.splitpath('a|b|c'))
self.assertEqual(('|a|b', 'c'), self.filesystem.splitpath('|a|b|c'))
def test_root_separator_is_not_stripped(self):
self.assertEqual(('|', ''), self.filesystem.splitpath('|||'))
self.assertEqual(('|', 'a'), self.filesystem.splitpath('|a'))
self.assertEqual(('|', 'a'), self.filesystem.splitpath('|||a'))
def test_empty_tail_if_path_ends_in_separator(self):
self.assertEqual(('a|b', ''), self.filesystem.splitpath('a|b|'))
def test_empty_path_components_are_preserved_in_head(self):
self.assertEqual(('|a||b', 'c'), self.filesystem.splitpath('|a||b||c'))
class JoinPathTest(PathManipulationTestBase):
"""Tests JoinPath (which mimics os.path.join) using | as path separator."""
def test_one_empty_component(self):
self.assertEqual('', self.filesystem.joinpaths(''))
def test_multiple_empty_components(self):
self.assertEqual('', self.filesystem.joinpaths('', '', ''))
def test_separators_not_stripped_from_single_component(self):
self.assertEqual('||a||', self.filesystem.joinpaths('||a||'))
def test_one_separator_added_between_components(self):
self.assertEqual('a|b|c|d',
self.filesystem.joinpaths('a', 'b', 'c', 'd'))
def test_no_separator_added_for_components_ending_in_separator(self):
self.assertEqual('a|b|c', self.filesystem.joinpaths('a|', 'b|', 'c'))
self.assertEqual('a|||b|||c',
self.filesystem.joinpaths('a|||', 'b|||', 'c'))
def test_components_preceding_absolute_component_are_ignored(self):
self.assertEqual('|c|d',
self.filesystem.joinpaths('a', '|b', '|c', 'd'))
def test_one_separator_added_for_trailing_empty_components(self):
self.assertEqual('a|', self.filesystem.joinpaths('a', ''))
self.assertEqual('a|', self.filesystem.joinpaths('a', '', ''))
def test_no_separator_added_for_leading_empty_components(self):
self.assertEqual('a', self.filesystem.joinpaths('', 'a'))
def test_internal_empty_components_ignored(self):
self.assertEqual('a|b', self.filesystem.joinpaths('a', '', 'b'))
self.assertEqual('a|b|', self.filesystem.joinpaths('a|', '', 'b|'))
class PathSeparatorTest(TestCase):
def test_os_path_sep_matches_fake_filesystem_separator(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
fake_os = fake_filesystem.FakeOsModule(filesystem)
self.assertEqual('!', fake_os.sep)
self.assertEqual('!', fake_os.path.sep)
class NormalizeCaseTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.filesystem.is_case_sensitive = False
def test_normalize_case(self):
self.filesystem.create_file('/Foo/Bar')
self.assertEqual('/Foo/Bar',
self.filesystem._original_path('/foo/bar'))
self.assertEqual('/Foo/Bar',
self.filesystem._original_path('/FOO/BAR'))
def test_normalize_case_for_drive(self):
self.filesystem.is_windows_fs = True
self.filesystem.create_file('C:/Foo/Bar')
self.assertEqual('C:/Foo/Bar',
self.filesystem._original_path('c:/foo/bar'))
self.assertEqual('C:/Foo/Bar',
self.filesystem._original_path('C:/FOO/BAR'))
def test_normalize_case_for_non_existing_file(self):
self.filesystem.create_dir('/Foo/Bar')
self.assertEqual('/Foo/Bar/baz',
self.filesystem._original_path('/foo/bar/baz'))
self.assertEqual('/Foo/Bar/BAZ',
self.filesystem._original_path('/FOO/BAR/BAZ'))
@unittest.skipIf(not TestCase.is_windows,
'Regression test for Windows problem only')
def test_normalize_case_for_lazily_added_empty_file(self):
# regression test for specific issue with added empty real files
filesystem = fake_filesystem.FakeFilesystem()
real_dir_path = os.path.split(
os.path.dirname(os.path.abspath(__file__)))[0]
filesystem.add_real_directory(real_dir_path)
initPyPath = os.path.join(real_dir_path, '__init__.py')
self.assertEqual(initPyPath,
filesystem._original_path(initPyPath.upper()))
class AlternativePathSeparatorTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
self.filesystem.alternative_path_separator = '?'
def test_initial_value(self):
filesystem = fake_filesystem.FakeFilesystem()
if self.is_windows:
self.assertEqual('/', filesystem.alternative_path_separator)
else:
self.assertIsNone(filesystem.alternative_path_separator)
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.assertIsNone(filesystem.alternative_path_separator)
def test_alt_sep(self):
fake_os = fake_filesystem.FakeOsModule(self.filesystem)
self.assertEqual('?', fake_os.altsep)
self.assertEqual('?', fake_os.path.altsep)
def test_collapse_path_with_mixed_separators(self):
self.assertEqual('!foo!bar', self.filesystem.normpath('!foo??bar'))
def test_normalize_path_with_mixed_separators(self):
path = 'foo?..?bar'
self.assertEqual('!bar', self.filesystem.absnormpath(path))
def test_exists_with_mixed_separators(self):
self.filesystem.create_file('?foo?bar?baz')
self.filesystem.create_file('!foo!bar!xyzzy!plugh')
self.assertTrue(self.filesystem.exists('!foo!bar!baz'))
self.assertTrue(self.filesystem.exists('?foo?bar?xyzzy?plugh'))
class DriveLetterSupportTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
self.filesystem.is_windows_fs = True
def test_initial_value(self):
filesystem = fake_filesystem.FakeFilesystem()
if self.is_windows:
self.assertTrue(filesystem.is_windows_fs)
else:
self.assertFalse(filesystem.is_windows_fs)
def test_collapse_path(self):
self.assertEqual('c:!foo!bar',
self.filesystem.normpath('c:!!foo!!bar'))
def test_collapse_unc_path(self):
self.assertEqual('!!foo!bar!baz',
self.filesystem.normpath('!!foo!bar!!baz!!'))
def test_normalize_path_str(self):
self.filesystem.cwd = u''
self.assertEqual(u'c:!foo!bar',
self.filesystem.absnormpath(u'c:!foo!!bar'))
self.filesystem.cwd = u'c:!foo'
self.assertEqual(u'c:!foo!bar', self.filesystem.absnormpath(u'bar'))
def test_normalize_path_bytes(self):
self.filesystem.cwd = b''
self.assertEqual(b'c:!foo!bar',
self.filesystem.absnormpath(b'c:!foo!!bar'))
self.filesystem.cwd = b'c:!foo'
self.assertEqual(b'c:!foo!bar', self.filesystem.absnormpath(b'bar'))
def test_split_path_str(self):
self.assertEqual((u'c:!foo', u'bar'),
self.filesystem.splitpath(u'c:!foo!bar'))
self.assertEqual((u'c:!', u'foo'),
self.filesystem.splitpath(u'c:!foo'))
self.assertEqual((u'!foo', u'bar'),
self.filesystem.splitpath(u'!foo!bar'))
self.assertEqual((u'!', u'foo'),
self.filesystem.splitpath(u'!foo'))
self.assertEqual((u'c:foo', u'bar'),
self.filesystem.splitpath(u'c:foo!bar'))
self.assertEqual((u'c:', u'foo'),
self.filesystem.splitpath(u'c:foo'))
self.assertEqual((u'foo', u'bar'),
self.filesystem.splitpath(u'foo!bar'))
def test_split_path_bytes(self):
self.assertEqual((b'c:!foo', b'bar'),
self.filesystem.splitpath(b'c:!foo!bar'))
self.assertEqual((b'c:!', b'foo'),
self.filesystem.splitpath(b'c:!foo'))
self.assertEqual((b'!foo', b'bar'),
self.filesystem.splitpath(b'!foo!bar'))
self.assertEqual((b'!', b'foo'),
self.filesystem.splitpath(b'!foo'))
self.assertEqual((b'c:foo', b'bar'),
self.filesystem.splitpath(b'c:foo!bar'))
self.assertEqual((b'c:', b'foo'),
self.filesystem.splitpath(b'c:foo'))
self.assertEqual((b'foo', b'bar'),
self.filesystem.splitpath(b'foo!bar'))
def test_characters_before_root_ignored_in_join_paths(self):
self.assertEqual('c:d', self.filesystem.joinpaths('b', 'c:', 'd'))
def test_resolve_path(self):
self.assertEqual('c:!foo!bar',
self.filesystem.resolve_path('c:!foo!bar'))
def test_get_path_components(self):
self.assertEqual(['c:', 'foo', 'bar'],
self.filesystem._path_components('c:!foo!bar'))
self.assertEqual(['c:'], self.filesystem._path_components('c:'))
def test_split_drive_str(self):
self.assertEqual((u'c:', u'!foo!bar'),
self.filesystem.splitdrive(u'c:!foo!bar'))
self.assertEqual((u'', u'!foo!bar'),
self.filesystem.splitdrive(u'!foo!bar'))
self.assertEqual((u'c:', u'foo!bar'),
self.filesystem.splitdrive(u'c:foo!bar'))
self.assertEqual((u'', u'foo!bar'),
self.filesystem.splitdrive(u'foo!bar'))
def test_split_drive_bytes(self):
self.assertEqual((b'c:', b'!foo!bar'),
self.filesystem.splitdrive(b'c:!foo!bar'))
self.assertEqual((b'', b'!foo!bar'),
self.filesystem.splitdrive(b'!foo!bar'))
def test_split_drive_with_unc_path(self):
self.assertEqual(('!!foo!bar', '!baz'),
self.filesystem.splitdrive('!!foo!bar!baz'))
self.assertEqual(('', '!!foo'), self.filesystem.splitdrive('!!foo'))
self.assertEqual(('', '!!foo!!bar'),
self.filesystem.splitdrive('!!foo!!bar'))
self.assertEqual(('!!foo!bar', '!!'),
self.filesystem.splitdrive('!!foo!bar!!'))
class DiskSpaceTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='!',
total_size=100)
self.os = fake_filesystem.FakeOsModule(self.filesystem)
def test_disk_usage_on_file_creation(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
total_size = 100
self.filesystem.add_mount_point('mount', total_size)
def create_too_large_file():
with fake_open('!mount!file', 'w') as dest:
dest.write('a' * (total_size + 1))
with self.assertRaises(OSError):
create_too_large_file()
self.assertEqual(0, self.filesystem.get_disk_usage('!mount').used)
with fake_open('!mount!file', 'w') as dest:
dest.write('a' * total_size)
self.assertEqual(total_size,
self.filesystem.get_disk_usage('!mount').used)
def test_file_system_size_after_large_file_creation(self):
filesystem = fake_filesystem.FakeFilesystem(
path_separator='!', total_size=1024 * 1024 * 1024 * 100)
filesystem.create_file('!foo!baz', st_size=1024 * 1024 * 1024 * 10)
self.assertEqual((1024 * 1024 * 1024 * 100,
1024 * 1024 * 1024 * 10,
1024 * 1024 * 1024 * 90),
filesystem.get_disk_usage())
def test_file_system_size_after_binary_file_creation(self):
self.filesystem.create_file('!foo!bar', contents=b'xyzzy')
self.assertEqual((100, 5, 95), self.filesystem.get_disk_usage())
def test_file_system_size_after_ascii_string_file_creation(self):
self.filesystem.create_file('!foo!bar', contents=u'complicated')
self.assertEqual((100, 11, 89), self.filesystem.get_disk_usage())
def test_filesystem_size_after_2byte_unicode_file_creation(self):
self.filesystem.create_file('!foo!bar', contents=u'сложно',
encoding='utf-8')
self.assertEqual((100, 12, 88), self.filesystem.get_disk_usage())
def test_filesystem_size_after_3byte_unicode_file_creation(self):
self.filesystem.create_file('!foo!bar', contents=u'複雑',
encoding='utf-8')
self.assertEqual((100, 6, 94), self.filesystem.get_disk_usage())
def test_file_system_size_after_file_deletion(self):
self.filesystem.create_file('!foo!bar', contents=b'xyzzy')
self.filesystem.create_file('!foo!baz', st_size=20)
self.filesystem.remove_object('!foo!bar')
self.assertEqual((100, 20, 80), self.filesystem.get_disk_usage())
def test_file_system_size_after_directory_removal(self):
self.filesystem.create_file('!foo!bar', st_size=10)
self.filesystem.create_file('!foo!baz', st_size=20)
self.filesystem.create_file('!foo1!bar', st_size=40)
self.filesystem.remove_object('!foo')
self.assertEqual((100, 40, 60), self.filesystem.get_disk_usage())
def test_creating_file_with_fitting_content(self):
initial_usage = self.filesystem.get_disk_usage()
try:
self.filesystem.create_file('!foo!bar', contents=b'a' * 100)
except OSError:
self.fail('File with contents fitting into disk space '
'could not be written.')
self.assertEqual(initial_usage.used + 100,
self.filesystem.get_disk_usage().used)
def test_creating_file_with_content_too_large(self):
def create_large_file():
self.filesystem.create_file('!foo!bar', contents=b'a' * 101)
initial_usage = self.filesystem.get_disk_usage()
with self.assertRaises(OSError):
create_large_file()
self.assertEqual(initial_usage, self.filesystem.get_disk_usage())
def test_creating_file_with_fitting_size(self):
initial_usage = self.filesystem.get_disk_usage()
try:
self.filesystem.create_file('!foo!bar', st_size=100)
except OSError:
self.fail(
'File with size fitting into disk space could not be written.')
self.assertEqual(initial_usage.used + 100,
self.filesystem.get_disk_usage().used)
def test_creating_file_with_size_too_large(self):
initial_usage = self.filesystem.get_disk_usage()
def create_large_file():
self.filesystem.create_file('!foo!bar', st_size=101)
with self.assertRaises(OSError):
create_large_file()
self.assertEqual(initial_usage, self.filesystem.get_disk_usage())
def test_resize_file_with_fitting_size(self):
file_object = self.filesystem.create_file('!foo!bar', st_size=50)
try:
file_object.set_large_file_size(100)
file_object.set_contents(b'a' * 100)
except OSError:
self.fail(
'Resizing file failed although disk space was sufficient.')
def test_resize_file_with_size_too_large(self):
file_object = self.filesystem.create_file('!foo!bar', st_size=50)
self.assert_raises_os_error(errno.ENOSPC,
file_object.set_large_file_size, 200)
self.assert_raises_os_error(errno.ENOSPC, file_object.set_contents,
'a' * 150)
def test_file_system_size_after_directory_rename(self):
self.filesystem.create_file('!foo!bar', st_size=20)
self.os.rename('!foo', '!baz')
self.assertEqual(20, self.filesystem.get_disk_usage().used)
def test_file_system_size_after_file_rename(self):
self.filesystem.create_file('!foo!bar', st_size=20)
self.os.rename('!foo!bar', '!foo!baz')
self.assertEqual(20, self.filesystem.get_disk_usage().used)
def test_that_hard_link_does_not_change_used_size(self):
file1_path = 'test_file1'
file2_path = 'test_file2'
self.filesystem.create_file(file1_path, st_size=20)
self.assertEqual(20, self.filesystem.get_disk_usage().used)
# creating a hard link shall not increase used space
self.os.link(file1_path, file2_path)
self.assertEqual(20, self.filesystem.get_disk_usage().used)
# removing a file shall not decrease used space
# if a hard link still exists
self.os.unlink(file1_path)
self.assertEqual(20, self.filesystem.get_disk_usage().used)
self.os.unlink(file2_path)
self.assertEqual(0, self.filesystem.get_disk_usage().used)
def test_that_the_size_of_correct_mount_point_is_used(self):
self.filesystem.add_mount_point('!mount_limited', total_size=50)
self.filesystem.add_mount_point('!mount_unlimited')
self.assert_raises_os_error(errno.ENOSPC,
self.filesystem.create_file,
'!mount_limited!foo', st_size=60)
self.assert_raises_os_error(errno.ENOSPC, self.filesystem.create_file,
'!bar', st_size=110)
try:
self.filesystem.create_file('!foo', st_size=60)
self.filesystem.create_file('!mount_limited!foo', st_size=40)
self.filesystem.create_file('!mount_unlimited!foo',
st_size=1000000)
except OSError:
self.fail('File with contents fitting into '
'disk space could not be written.')
def test_that_disk_usage_of_correct_mount_point_is_used(self):
self.filesystem.add_mount_point('!mount1', total_size=20)
self.filesystem.add_mount_point('!mount1!bar!mount2', total_size=50)
self.filesystem.create_file('!foo!bar', st_size=10)
self.filesystem.create_file('!mount1!foo!bar', st_size=10)
self.filesystem.create_file('!mount1!bar!mount2!foo!bar', st_size=10)
self.assertEqual(90, self.filesystem.get_disk_usage('!foo').free)
self.assertEqual(10,
self.filesystem.get_disk_usage('!mount1!foo').free)
self.assertEqual(40, self.filesystem.get_disk_usage(
'!mount1!bar!mount2').free)
def test_set_larger_disk_size(self):
self.filesystem.add_mount_point('!mount1', total_size=20)
self.assert_raises_os_error(errno.ENOSPC,
self.filesystem.create_file, '!mount1!foo',
st_size=100)
self.filesystem.set_disk_usage(total_size=200, path='!mount1')
self.filesystem.create_file('!mount1!foo', st_size=100)
self.assertEqual(100,
self.filesystem.get_disk_usage('!mount1!foo').free)
def test_set_smaller_disk_size(self):
self.filesystem.add_mount_point('!mount1', total_size=200)
self.filesystem.create_file('!mount1!foo', st_size=100)
self.assert_raises_os_error(errno.ENOSPC,
self.filesystem.set_disk_usage,
total_size=50, path='!mount1')
self.filesystem.set_disk_usage(total_size=150, path='!mount1')
self.assertEqual(50,
self.filesystem.get_disk_usage('!mount1!foo').free)
def test_disk_size_on_unlimited_disk(self):
self.filesystem.add_mount_point('!mount1')
self.filesystem.create_file('!mount1!foo', st_size=100)
self.filesystem.set_disk_usage(total_size=1000, path='!mount1')
self.assertEqual(900,
self.filesystem.get_disk_usage('!mount1!foo').free)
def test_disk_size_on_auto_mounted_drive_on_file_creation(self):
self.filesystem.is_windows_fs = True
# drive d: shall be auto-mounted and the used size adapted
self.filesystem.create_file('d:!foo!bar', st_size=100)
self.filesystem.set_disk_usage(total_size=1000, path='d:')
self.assertEqual(self.filesystem.get_disk_usage('d:!foo').free, 900)
def test_disk_size_on_auto_mounted_drive_on_directory_creation(self):
self.filesystem.is_windows_fs = True
self.filesystem.create_dir('d:!foo!bar')
self.filesystem.create_file('d:!foo!bar!baz', st_size=100)
self.filesystem.create_file('d:!foo!baz', st_size=100)
self.filesystem.set_disk_usage(total_size=1000, path='d:')
self.assertEqual(self.filesystem.get_disk_usage('d:!foo').free, 800)
def test_copying_preserves_byte_contents(self):
source_file = self.filesystem.create_file('foo', contents=b'somebytes')
dest_file = self.filesystem.create_file('bar')
dest_file.set_contents(source_file.contents)
self.assertEqual(dest_file.contents, source_file.contents)
class MountPointTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='!',
total_size=100)
self.filesystem.add_mount_point('!foo')
self.filesystem.add_mount_point('!bar')
self.filesystem.add_mount_point('!foo!baz')
def test_that_new_mount_points_get_new_device_number(self):
self.assertEqual(1, self.filesystem.get_object('!').st_dev)
self.assertEqual(2, self.filesystem.get_object('!foo').st_dev)
self.assertEqual(3, self.filesystem.get_object('!bar').st_dev)
self.assertEqual(4, self.filesystem.get_object('!foo!baz').st_dev)
def test_that_new_directories_get_correct_device_number(self):
self.assertEqual(1, self.filesystem.create_dir('!foo1!bar').st_dev)
self.assertEqual(2, self.filesystem.create_dir('!foo!bar').st_dev)
self.assertEqual(4,
self.filesystem.create_dir('!foo!baz!foo!bar').st_dev)
def test_that_new_files_get_correct_device_number(self):
self.assertEqual(1, self.filesystem.create_file('!foo1!bar').st_dev)
self.assertEqual(2, self.filesystem.create_file('!foo!bar').st_dev)
self.assertEqual(4, self.filesystem.create_file(
'!foo!baz!foo!bar').st_dev)
def test_that_mount_point_cannot_be_added_twice(self):
self.assert_raises_os_error(errno.EEXIST,
self.filesystem.add_mount_point, '!foo')
self.assert_raises_os_error(errno.EEXIST,
self.filesystem.add_mount_point, '!foo!')
def test_that_drives_are_auto_mounted(self):
self.filesystem.is_windows_fs = True
self.filesystem.create_dir('d:!foo!bar')
self.filesystem.create_file('d:!foo!baz')
self.filesystem.create_file('z:!foo!baz')
self.assertEqual(5, self.filesystem.get_object('d:').st_dev)
self.assertEqual(5, self.filesystem.get_object('d:!foo!bar').st_dev)
self.assertEqual(5, self.filesystem.get_object('d:!foo!baz').st_dev)
self.assertEqual(6, self.filesystem.get_object('z:!foo!baz').st_dev)
def test_that_drives_are_auto_mounted_case_insensitive(self):
self.filesystem.is_windows_fs = True
self.filesystem.is_case_sensitive = False
self.filesystem.create_dir('D:!foo!bar')
self.filesystem.create_file('e:!foo!baz')
self.assertEqual(5, self.filesystem.get_object('D:').st_dev)
self.assertEqual(5, self.filesystem.get_object('d:!foo!bar').st_dev)
self.assertEqual(6, self.filesystem.get_object('e:!foo').st_dev)
self.assertEqual(6, self.filesystem.get_object('E:!Foo!Baz').st_dev)
def test_that_unc_paths_are_auto_mounted(self):
self.filesystem.is_windows_fs = True
self.filesystem.create_dir('!!foo!bar!baz')
self.filesystem.create_file('!!foo!bar!bip!bop')
self.assertEqual(5, self.filesystem.get_object('!!foo!bar').st_dev)
self.assertEqual(5, self.filesystem.get_object(
'!!foo!bar!bip!bop').st_dev)
class RealFileSystemAccessTest(TestCase):
def setUp(self):
# use the real path separator to work with the real file system
self.filesystem = fake_filesystem.FakeFilesystem()
self.fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
self.pyfakefs_path = os.path.split(
os.path.dirname(os.path.abspath(__file__)))[0]
self.root_path = os.path.split(self.pyfakefs_path)[0]
def test_add_non_existing_real_file_raises(self):
nonexisting_path = os.path.join('nonexisting', 'test.txt')
with self.assertRaises(OSError):
self.filesystem.add_real_file(nonexisting_path)
self.assertFalse(self.filesystem.exists(nonexisting_path))
def test_add_non_existing_real_directory_raises(self):
nonexisting_path = '/nonexisting'
self.assert_raises_os_error(errno.ENOENT,
self.filesystem.add_real_directory,
nonexisting_path)
self.assertFalse(self.filesystem.exists(nonexisting_path))
def test_existing_fake_file_raises(self):
real_file_path = __file__
self.filesystem.create_file(real_file_path)
self.assert_raises_os_error(errno.EEXIST,
self.filesystem.add_real_file,
real_file_path)
def test_existing_fake_directory_raises(self):
self.filesystem.create_dir(self.root_path)
self.assert_raises_os_error(errno.EEXIST,
self.filesystem.add_real_directory,
self.root_path)
def check_fake_file_stat(self, fake_file, real_file_path,
target_path=None):
if target_path is None or target_path == real_file_path:
self.assertTrue(self.filesystem.exists(real_file_path))
else:
self.assertFalse(self.filesystem.exists(real_file_path))
self.assertTrue(self.filesystem.exists(target_path))
real_stat = os.stat(real_file_path)
self.assertIsNone(fake_file._byte_contents)
self.assertEqual(fake_file.st_size, real_stat.st_size)
self.assertAlmostEqual(fake_file.st_ctime, real_stat.st_ctime,
places=5)
self.assertAlmostEqual(fake_file.st_atime, real_stat.st_atime,
places=5)
self.assertAlmostEqual(fake_file.st_mtime, real_stat.st_mtime,
places=5)
self.assertEqual(fake_file.st_uid, real_stat.st_uid)
self.assertEqual(fake_file.st_gid, real_stat.st_gid)
def check_read_only_file(self, fake_file, real_file_path):
with open(real_file_path, 'rb') as f:
real_contents = f.read()
self.assertEqual(fake_file.byte_contents, real_contents)
if not is_root():
self.assert_raises_os_error(
errno.EACCES, self.fake_open, real_file_path, 'w')
else:
with self.fake_open(real_file_path, 'w'):
pass
def check_writable_file(self, fake_file, real_file_path):
with open(real_file_path, 'rb') as f:
real_contents = f.read()
self.assertEqual(fake_file.byte_contents, real_contents)
with self.fake_open(real_file_path, 'wb') as f:
f.write(b'test')
with open(real_file_path, 'rb') as f:
real_contents1 = f.read()
self.assertEqual(real_contents1, real_contents)
with self.fake_open(real_file_path, 'rb') as f:
fake_contents = f.read()
self.assertEqual(fake_contents, b'test')
def test_add_existing_real_file_read_only(self):
real_file_path = os.path.abspath(__file__)
fake_file = self.filesystem.add_real_file(real_file_path)
self.check_fake_file_stat(fake_file, real_file_path)
self.assertEqual(fake_file.st_mode & 0o333, 0)
self.check_read_only_file(fake_file, real_file_path)
def test_add_existing_real_file_read_write(self):
real_file_path = os.path.realpath(__file__)
fake_file = self.filesystem.add_real_file(real_file_path,
read_only=False)
self.check_fake_file_stat(fake_file, real_file_path)
self.assertEqual(fake_file.st_mode, os.stat(real_file_path).st_mode)
self.check_writable_file(fake_file, real_file_path)
def test_add_real_file_to_existing_path(self):
real_file_path = os.path.abspath(__file__)
self.filesystem.create_file('/foo/bar')
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.add_real_file,
real_file_path, target_path='/foo/bar')
def test_add_real_file_to_non_existing_path(self):
real_file_path = os.path.abspath(__file__)
fake_file = self.filesystem.add_real_file(real_file_path,
target_path='/foo/bar')
self.check_fake_file_stat(fake_file, real_file_path,
target_path='/foo/bar')
def test_write_to_real_file(self):
# regression test for #470
real_file_path = os.path.abspath(__file__)
self.filesystem.add_real_file(real_file_path, read_only=False)
with self.fake_open(real_file_path, 'w') as f:
f.write('foo')
with self.fake_open(real_file_path, 'rb') as f:
self.assertEqual(b'foo', f.read())
def test_add_existing_real_directory_read_only(self):
self.filesystem.add_real_directory(self.pyfakefs_path)
self.assertTrue(self.filesystem.exists(self.pyfakefs_path))
self.assertTrue(self.filesystem.exists(
os.path.join(self.pyfakefs_path, 'fake_filesystem.py')))
self.assertTrue(self.filesystem.exists(
os.path.join(self.pyfakefs_path, 'fake_pathlib.py')))
file_path = os.path.join(self.pyfakefs_path,
'fake_filesystem_shutil.py')
fake_file = self.filesystem.resolve(file_path)
self.check_fake_file_stat(fake_file, file_path)
self.check_read_only_file(fake_file, file_path)
def test_add_existing_real_directory_tree(self):
self.filesystem.add_real_directory(self.root_path)
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', 'tests',
'fake_filesystem_test.py')))
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs',
'fake_filesystem.py')))
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', '__init__.py')))
@contextlib.contextmanager
def create_symlinks(self, symlinks):
for link in symlinks:
os.symlink(link[0], link[1])
yield
for link in symlinks:
os.unlink(link[1])
def test_add_existing_real_directory_symlink(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
real_directory = os.path.join(self.root_path, 'pyfakefs', 'tests')
symlinks = [
('..', os.path.join(
real_directory, 'fixtures', 'symlink_dir_relative')),
('../all_tests.py', os.path.join(
real_directory, 'fixtures', 'symlink_file_relative')),
(real_directory, os.path.join(
real_directory, 'fixtures', 'symlink_dir_absolute')),
(os.path.join(real_directory, 'all_tests.py'), os.path.join(
real_directory, 'fixtures', 'symlink_file_absolute')),
('/etc/something', os.path.join(
real_directory, 'fixtures', 'symlink_file_absolute_outside')),
]
self.filesystem.create_file('/etc/something')
with fake_open('/etc/something', 'w') as f:
f.write('good morning')
try:
with self.create_symlinks(symlinks):
self.filesystem.add_real_directory(
real_directory, lazy_read=False)
except OSError:
if self.is_windows:
raise unittest.SkipTest(
'Symlinks under Windows need admin privileges')
raise
for link in symlinks:
self.assertTrue(self.filesystem.islink(link[1]))
# relative
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', 'tests',
'fixtures/symlink_dir_relative')))
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', 'tests',
'fixtures/symlink_dir_relative/all_tests.py')))
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', 'tests',
'fixtures/symlink_file_relative')))
# absolute
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', 'tests',
'fixtures/symlink_dir_absolute')))
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', 'tests',
'fixtures/symlink_dir_absolute/all_tests.py')))
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', 'tests',
'fixtures/symlink_file_absolute')))
# outside
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', 'tests',
'fixtures/symlink_file_absolute_outside')))
self.assertEqual(
fake_open(os.path.join(
self.root_path, 'pyfakefs', 'tests',
'fixtures/symlink_file_absolute_outside')).read(),
'good morning'
)
def test_add_existing_real_directory_symlink_target_path(self):
real_directory = os.path.join(self.root_path, 'pyfakefs', 'tests')
symlinks = [
('..', os.path.join(
real_directory, 'fixtures', 'symlink_dir_relative')),
('../all_tests.py', os.path.join(
real_directory, 'fixtures', 'symlink_file_relative')),
]
try:
with self.create_symlinks(symlinks):
self.filesystem.add_real_directory(
real_directory, target_path='/path', lazy_read=False)
except OSError:
if self.is_windows:
raise unittest.SkipTest(
'Symlinks under Windows need admin privileges')
raise
self.assertTrue(self.filesystem.exists(
'/path/fixtures/symlink_dir_relative'))
self.assertTrue(self.filesystem.exists(
'/path/fixtures/symlink_dir_relative/all_tests.py'))
self.assertTrue(self.filesystem.exists(
'/path/fixtures/symlink_file_relative'))
def test_add_existing_real_directory_symlink_lazy_read(self):
real_directory = os.path.join(self.root_path, 'pyfakefs', 'tests')
symlinks = [
('..', os.path.join(
real_directory, 'fixtures', 'symlink_dir_relative')),
('../all_tests.py', os.path.join(
real_directory, 'fixtures', 'symlink_file_relative')),
]
try:
with self.create_symlinks(symlinks):
self.filesystem.add_real_directory(
real_directory, target_path='/path', lazy_read=True)
self.assertTrue(self.filesystem.exists(
'/path/fixtures/symlink_dir_relative'))
self.assertTrue(self.filesystem.exists(
'/path/fixtures/symlink_dir_relative/all_tests.py'))
self.assertTrue(self.filesystem.exists(
'/path/fixtures/symlink_file_relative'))
except OSError:
if self.is_windows:
raise unittest.SkipTest(
'Symlinks under Windows need admin privileges')
raise
def test_add_existing_real_directory_tree_to_existing_path(self):
self.filesystem.create_dir('/foo/bar')
self.assert_raises_os_error(errno.EEXIST,
self.filesystem.add_real_directory,
self.root_path,
target_path='/foo/bar')
def test_add_existing_real_directory_tree_to_other_path(self):
self.filesystem.add_real_directory(self.root_path,
target_path='/foo/bar')
self.assertFalse(
self.filesystem.exists(
os.path.join(self.pyfakefs_path, 'tests',
'fake_filesystem_test.py')))
self.assertTrue(
self.filesystem.exists(
os.path.join('foo', 'bar', 'pyfakefs', 'tests',
'fake_filesystem_test.py')))
self.assertFalse(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs',
'fake_filesystem.py')))
self.assertTrue(
self.filesystem.exists(
os.path.join('foo', 'bar', 'pyfakefs', '__init__.py')))
def test_get_object_from_lazily_added_real_directory(self):
self.filesystem.is_case_sensitive = True
self.filesystem.add_real_directory(self.root_path)
self.assertTrue(self.filesystem.get_object(
os.path.join(self.root_path, 'pyfakefs', 'fake_filesystem.py')))
self.assertTrue(
self.filesystem.get_object(
os.path.join(self.root_path, 'pyfakefs', '__init__.py')))
def test_add_existing_real_directory_lazily(self):
disk_size = 1024 * 1024 * 1024
real_dir_path = os.path.join(self.root_path, 'pyfakefs')
self.filesystem.set_disk_usage(disk_size, real_dir_path)
self.filesystem.add_real_directory(real_dir_path)
# the directory contents have not been read, the the disk usage
# has not changed
self.assertEqual(disk_size,
self.filesystem.get_disk_usage(real_dir_path).free)
# checking for existence shall read the directory contents
self.assertTrue(
self.filesystem.get_object(
os.path.join(real_dir_path, 'fake_filesystem.py')))
# so now the free disk space shall have decreased
self.assertGreater(disk_size,
self.filesystem.get_disk_usage(real_dir_path).free)
def test_add_existing_real_directory_not_lazily(self):
disk_size = 1024 * 1024 * 1024
self.filesystem.set_disk_usage(disk_size, self.pyfakefs_path)
self.filesystem.add_real_directory(self.pyfakefs_path, lazy_read=False)
# the directory has been read, so the file sizes have
# been subtracted from the free space
self.assertGreater(disk_size, self.filesystem.get_disk_usage(
self.pyfakefs_path).free)
def test_add_existing_real_directory_read_write(self):
self.filesystem.add_real_directory(self.pyfakefs_path, read_only=False)
self.assertTrue(self.filesystem.exists(self.pyfakefs_path))
self.assertTrue(self.filesystem.exists(
os.path.join(self.pyfakefs_path, 'fake_filesystem.py')))
self.assertTrue(self.filesystem.exists(
os.path.join(self.pyfakefs_path, 'fake_pathlib.py')))
file_path = os.path.join(self.pyfakefs_path, 'pytest_plugin.py')
fake_file = self.filesystem.resolve(file_path)
self.check_fake_file_stat(fake_file, file_path)
self.check_writable_file(fake_file, file_path)
def test_add_existing_real_paths_read_only(self):
real_file_path = os.path.realpath(__file__)
fixture_path = os.path.join(self.pyfakefs_path, 'tests', 'fixtures')
self.filesystem.add_real_paths([real_file_path, fixture_path])
fake_file = self.filesystem.resolve(real_file_path)
self.check_fake_file_stat(fake_file, real_file_path)
self.check_read_only_file(fake_file, real_file_path)
real_file_path = os.path.join(fixture_path,
'module_with_attributes.py')
fake_file = self.filesystem.resolve(real_file_path)
self.check_fake_file_stat(fake_file, real_file_path)
self.check_read_only_file(fake_file, real_file_path)
def test_add_existing_real_paths_read_write(self):
real_file_path = os.path.realpath(__file__)
fixture_path = os.path.join(self.pyfakefs_path, 'tests', 'fixtures')
self.filesystem.add_real_paths([real_file_path, fixture_path],
read_only=False)
fake_file = self.filesystem.resolve(real_file_path)
self.check_fake_file_stat(fake_file, real_file_path)
self.check_writable_file(fake_file, real_file_path)
real_file_path = os.path.join(fixture_path,
'module_with_attributes.py')
fake_file = self.filesystem.resolve(real_file_path)
self.check_fake_file_stat(fake_file, real_file_path)
self.check_writable_file(fake_file, real_file_path)
class FileSideEffectTests(TestCase):
def side_effect(self):
test_case = self
test_case.side_effect_called = False
def __side_effect(file_object):
test_case.side_effect_called = True
test_case.side_effect_file_object_content = file_object.contents
return __side_effect
def setUp(self):
# use the real path separator to work with the real file system
self.filesystem = fake_filesystem.FakeFilesystem()
self.filesystem.create_file('/a/b/file_one',
side_effect=self.side_effect())
def test_side_effect_called(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
self.side_effect_called = False
with fake_open('/a/b/file_one', 'w') as handle:
handle.write('foo')
self.assertTrue(self.side_effect_called)
def test_side_effect_file_object(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
self.side_effect_called = False
with fake_open('/a/b/file_one', 'w') as handle:
handle.write('foo')
self.assertEquals(self.side_effect_file_object_content, 'foo')
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | python | 2 | 0 | |
heat/tests/functional/test_WordPress_With_LB.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
from nose.plugins.attrib import attr
import unittest
import os
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'LB', 'F17', 'WordPress_With_LB.template'])
class WordPressWithLBFunctionalTest(unittest.TestCase):
def setUp(self):
template = 'WordPress_With_LB.template'
stack_paramstr = ';'.join(['InstanceType=m1.xlarge',
'DBUsername=dbuser',
'DBPassword=' + os.environ['OS_PASSWORD']])
self.stack = util.Stack(self, template, 'F17', 'x86_64', 'cfntools',
stack_paramstr)
self.WikiServerOne = util.Instance(self, 'WikiServerOne')
self.LBInstance = util.Instance(self, 'LoadBalancer.LB_instance')
self.MySqlDatabaseServer = util.Instance(
self,
'DatabaseServer.MySqlDatabaseServer')
def tearDown(self):
self.stack.cleanup()
def test_instance(self):
self.stack.create()
self.WikiServerOne.wait_for_boot()
self.LBInstance.wait_for_boot()
self.MySqlDatabaseServer.wait_for_boot()
self.WikiServerOne.check_cfntools()
self.LBInstance.check_cfntools()
self.MySqlDatabaseServer.check_cfntools()
self.WikiServerOne.wait_for_provisioning()
self.LBInstance.wait_for_provisioning()
self.MySqlDatabaseServer.wait_for_provisioning()
self.assertTrue(self.WikiServerOne.file_present
('/etc/wordpress/wp-config.php'))
print 'Wordpress installation detected.'
# Verify the output URL parses as expected, ie check that
# thewordpress installation is operational
stack_url = self.stack.get_stack_output("WebsiteURL")
print "Verifying stack output from WebsiteUrl=(%s)." % stack_url
ver = verify.VerifyStack()
self.assertTrue(ver.verify_wordpress(stack_url))
| []
| []
| [
"OS_PASSWORD"
]
| [] | ["OS_PASSWORD"] | python | 1 | 0 | |
celery-admin/celery_admin/celery.py | import os
import json
import time
import copy
import traceback
import requests
from functools import wraps
import chevron
from celery import Celery
from celery.utils.log import get_task_logger
from .celery_tasks import http as http_task
from .celery_tasks import socket_ping as socket_ping_task
from .celery_tasks import ssh as ssh_task
from dotenv import load_dotenv
from .secrets import decrypt, retrieve_secret_variables
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "celery_admin.settings")
logger = get_task_logger(__name__)
app = Celery("celery_admin")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
STATUS_SUCCESS_LABEL = "success"
STATUS_ERROR_LABEL = "error"
LOG_LEVEL_ERROR = "error"
LOG_LEVEL_INFO = "info"
LOG_LEVEL_DEBUG = "debug"
load_dotenv(os.environ["DOTENV_PATH"])
RECORD_TASK_RESULTS_PATH = os.environ["TASK_RESULTS_PATH"]
logger.info(f"Record task results path: {RECORD_TASK_RESULTS_PATH}")
### Global
class InternalEventException(Exception):
pass
def replace_attribute_secret_variables(value, variables):
replacements = {
(v.get("params", {}) or {}).get("name"): (v.get("params", {}) or {}).get("value")
for v in variables
}
return chevron.render(value, replacements)
def replace_secret_variables(entity, project_id):
if not project_id:
return entity
variables = retrieve_secret_variables(project_id, "ResourceProjectVariable")
for k in entity:
if type(entity[k]) == str:
entity[k] = replace_attribute_secret_variables(entity[k], variables)
return entity
def handle_task(func):
@wraps(func)
def inner(*args, **kwargs):
body = kwargs.get("body")
orig_body = copy.deepcopy(body)
t_begin = time.time()
try:
if not body:
raise Exception("No body in the task definition")
# check parameters
params = body.get("params")
if not params:
raise Exception("No params in the task definition")
# decrypt resource
resource_id = params.get("resource_id")
decrypted_resource = decrypt(resource_id)
resource_params = decrypted_resource.get("params") or {}
body["params"] = {**resource_params, **params}
replace_secret_variables(body["params"], params.get("project_id"))
try:
result = func(*args, **kwargs)
except Exception as e:
raise InternalEventException(e)
result["status"] = STATUS_SUCCESS_LABEL
return record_task_result(LOG_LEVEL_INFO, orig_body, result, t_begin=t_begin)
except InternalEventException as e:
logger.error(f"Celery exception, e={e}, exception={traceback.format_exc()}")
return record_task_result(
LOG_LEVEL_ERROR,
orig_body,
{"error": f"{e}", "status": STATUS_ERROR_LABEL},
t_begin=t_begin,
)
except Exception as e:
logger.error(
f"Celery GLOBAL exception, e={e}, exception={traceback.format_exc()}"
)
return {"error": "global_exception", "details": traceback.format_exc()}
return inner
def write_task_result_file(msg):
logger.info(f"writing... {RECORD_TASK_RESULTS_PATH}")
log_file = open(RECORD_TASK_RESULTS_PATH, "a")
log_file.write(f"{json.dumps(msg)}\n")
log_file.close()
def record_task_result(level, request_body, result, t_begin=None):
result = result or {}
if t_begin:
result["duration"] = time.time() - t_begin
logger.debug(f"record result - request body: {request_body}, result: {result}")
scheduler_id = (request_body.get("params", {}) or {}).get("scheduler_id")
status_int = 100 if result["status"] == STATUS_SUCCESS_LABEL else 0
msg = {
"level": level,
"scheduler_id": scheduler_id,
"status": result["status"],
"status_int": status_int,
"body": request_body,
"result": result,
}
write_task_result_file(msg)
return msg
### Main tasks
@app.task(bind=True)
@handle_task
def http(self, **kwargs):
return http_task.task(**kwargs)
@app.task(bind=True)
@handle_task
def socket_ping(self, **kwargs):
return socket_ping_task.task(**kwargs)
@app.task(bind=True)
@handle_task
def ssh(self, **kwargs):
return ssh_task.task(**kwargs)
@app.task(bind=True)
def debug_task(self):
# encrypted_resource = {
#
# }
dec = decrypt(
"gAAAAABgruSZwSlU_kge70MFTP474_riUujZYwWNi7Xcm-IFHcganatg7TxQ8b_WiRbQqp0XFeR0XreNjKLNQksqRGSoyWY50A=="
)
print(f"decrypted -> {dec}")
print(f"Request: {self.request!r}")
@app.task(bind=True)
@handle_task
def hello_world(self, **kwargs):
print("Hello world ;)!")
| []
| []
| [
"TASK_RESULTS_PATH",
"DOTENV_PATH"
]
| [] | ["TASK_RESULTS_PATH", "DOTENV_PATH"] | python | 2 | 0 | |
manage.py | import os
from flask_script import Manager
from app import create_app
app = create_app(os.environ['ENVIRONMENT'])
manager = Manager(app)
if __name__ == "__main__":
manager.run() | []
| []
| [
"ENVIRONMENT"
]
| [] | ["ENVIRONMENT"] | python | 1 | 0 | |
examples/simple-server-echo/app.py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import wsgiref.simple_server
from argparse import ArgumentParser
from builtins import bytes
from linebot import (
LineBotApi, WebhookParser
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage
)
from linebot.utils import PY3
# get channel_secret and channel_access_token from your environment variable
channel_secret = os.getenv('d57f500b5ac8ede281c2b196e1ad547b', None)
channel_access_token = os.getenv('Qpc1/KrZ6Vm3qlyyoYhlrBUSfk4k2HuEJco9QyKbRY3VffS8gDRz9+ZyGqSKirb3Psk3lnrjZdsDlYGgXv56B7NMDSDHxgW4/AkLuhl8oNGQqZZQYJxa8oB5NQIrx07UYV/0UcEXVPCWtQ6V/YMZUgdB04t89/1O/w1cDnyilFU=', None)
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
line_bot_api = LineBotApi(channel_access_token)
parser = WebhookParser(channel_secret)
def application(environ, start_response):
# check request path
if environ['PATH_INFO'] != '/callback':
start_response('404 Not Found', [])
return create_body('Not Found')
# check request method
if environ['REQUEST_METHOD'] != 'POST':
start_response('405 Method Not Allowed', [])
return create_body('Method Not Allowed')
# get X-Line-Signature header value
signature = environ['HTTP_X_LINE_SIGNATURE']
# get request body as text
wsgi_input = environ['wsgi.input']
content_length = int(environ['CONTENT_LENGTH'])
body = wsgi_input.read(content_length).decode('utf-8')
# parse webhook body
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
start_response('400 Bad Request', [])
return create_body('Bad Request')
# if event is MessageEvent and message is TextMessage, then echo text
for event in events:
if not isinstance(event, MessageEvent):
continue
if not isinstance(event.message, TextMessage):
continue
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text)
)
start_response('200 OK', [])
return create_body('OK')
def create_body(text):
if PY3:
return [bytes(text, 'utf-8')]
else:
return text
if __name__ == '__main__':
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
)
arg_parser.add_argument('-p', '--port', type=int, default=8000, help='port')
options = arg_parser.parse_args()
httpd = wsgiref.simple_server.make_server('', options.port, application)
httpd.serve_forever()
| []
| []
| [
"Qpc1/KrZ6Vm3qlyyoYhlrBUSfk4k2HuEJco9QyKbRY3VffS8gDRz9+ZyGqSKirb3Psk3lnrjZdsDlYGgXv56B7NMDSDHxgW4/AkLuhl8oNGQqZZQYJxa8oB5NQIrx07UYV/0UcEXVPCWtQ6V/YMZUgdB04t89/1O/w1cDnyilFU=",
"d57f500b5ac8ede281c2b196e1ad547b"
]
| [] | ["Qpc1/KrZ6Vm3qlyyoYhlrBUSfk4k2HuEJco9QyKbRY3VffS8gDRz9+ZyGqSKirb3Psk3lnrjZdsDlYGgXv56B7NMDSDHxgW4/AkLuhl8oNGQqZZQYJxa8oB5NQIrx07UYV/0UcEXVPCWtQ6V/YMZUgdB04t89/1O/w1cDnyilFU=", "d57f500b5ac8ede281c2b196e1ad547b"] | python | 2 | 0 | |
server/metadata_server.go | package server
import (
"encoding/gob"
"fmt"
"io"
"log"
"math/rand"
"net"
"os"
"strconv"
"time"
)
// Generic Message for client/server communication
type Message struct {
Command string
Args []string
}
type FileEntry interface {
Read() []ChunkEntry
Rename(string)
GetSize() int
StopNode(int)
Write(int, []Copy)
GetName() string
Date() time.Time
getChunks() []ChunkEntry
DeleteChunks()
}
type File struct {
Name string
Size int
CreatedDate time.Time
Chunks []ChunkEntry
}
type MetaServer interface {
Rename(string, string) error
FileSize(string) int
FileStat(string) (string, error)
ListFiles() string
Read(string) (FileEntry, error)
Write(string) FileEntry
stopNode() int
GetDiskCap() int
sendMsg(*Message) error
UpdateDiskCap()
GetNodeStat(interface{}) string
nodeStatByID(int) string
nodeStat() string
Run()
}
type MasterNode struct {
socket net.Listener
serverName string
diskCap int
CHUNKSIZE int
ROW int
COLUMN int
nodeMap []int
files map[string]FileEntry
PORT int
encoder *gob.Encoder
decoder *gob.Decoder
}
func (f *File) Rename(newFileName string) {
f.Name = newFileName
}
func (f *File) GetSize() int {
return f.Size
}
func (f *File) StopNode(nodeID int) {
for _, entry := range f.Chunks {
entry.stopNode(nodeID)
}
}
func (f *File) Write(nodeID int, copies []Copy) {
var chunkEntry ChunkMetadata
chunkEntry.Index = nodeID
chunkEntry.Copies = copies
f.Chunks = append(f.Chunks, &chunkEntry)
}
func (f *File) Read() []ChunkEntry {
return f.getChunks()
}
func (f *File) DeleteChunks() {
f.Chunks = f.Chunks[:0]
}
func (f *File) Date() time.Time {
return f.CreatedDate
}
func (f *File) GetName() string {
return f.Name
}
func (f *File) getChunks() []ChunkEntry {
return f.Chunks
}
func NewMasterServer(serverName string, serverConfig map[string]interface{}) *MasterNode {
const DEFAULT_ALLOCATED_DISKSPACE = 4000
var DefaultConfig = map[string]int{}
DefaultConfig["chunksize"] = 100
DefaultConfig["nodes"] = 4
var newMasterNode = MasterNode{serverName: serverName, COLUMN: 4}
if val, ok := serverConfig["port"]; ok {
if port, ok := val.(int); ok {
newMasterNode.PORT = port
} else {
log.Fatalln("invalid type for server port value, expected an integer")
}
} else {
fmt.Printf("using default port: %d\n", DefaultConfig["port"])
newMasterNode.PORT = DefaultConfig["port"]
}
if val, ok := serverConfig["chunksize"]; ok {
if chunkSize, ok := val.(int); ok {
newMasterNode.CHUNKSIZE = chunkSize
} else {
log.Fatalln("invalid type for chunksize value, expected an integer")
}
} else {
fmt.Printf("using default chunksize: %d\n", DefaultConfig["chunksize"])
newMasterNode.CHUNKSIZE = DefaultConfig["chunksize"]
}
if val, ok := serverConfig["nodes"]; ok {
if totalnodes, ok := val.(int); ok {
newMasterNode.ROW = totalnodes
} else {
log.Fatalln("invalid type for nodes value, expected an interger")
}
} else {
fmt.Printf("using default nodes: %d\n", DefaultConfig["nodes"])
newMasterNode.ROW = DefaultConfig["nodes"]
}
for i := 0; i < newMasterNode.ROW; i++ {
newMasterNode.nodeMap = append(newMasterNode.nodeMap, DEFAULT_ALLOCATED_DISKSPACE)
}
newMasterNode.files = map[string]FileEntry{}
return &newMasterNode
}
func (m *MasterNode) ListFiles() string {
var fileString string
if len(m.files) == 0 {
return fileString
}
for _, entry := range m.files {
fileString += fmt.Sprintf("%s ", entry.GetName())
}
return fileString
}
func (m *MasterNode) sendMsg(msg *Message) error {
conn, err := net.Dial("tcp", ":"+os.Getenv("CHUNK_SERVER_PORT"))
defer conn.Close()
if err != nil {
return err
}
m.encoder = gob.NewEncoder(conn)
err = m.encoder.Encode(msg)
if err != nil {
return fmt.Errorf("Error: could not accept incomming request: %v", err.Error())
}
return nil
}
func (m *MasterNode) FileSize(filename string) int {
// return file entry size with the specified filename or return -1 if entry is non-existent
if entry, ok := m.files[filename]; ok {
return entry.GetSize()
}
return -1
}
func (m *MasterNode) Rename(oldFileName string, newFileName string) error {
if entry, ok := m.files[oldFileName]; ok {
entry.Rename(newFileName)
m.files[newFileName] = entry
delete(m.files, oldFileName)
return nil
}
return fmt.Errorf("%s does not exist", oldFileName)
}
func (m *MasterNode) GetDiskCap() int {
return m.diskCap
}
func (m *MasterNode) UpdateDiskCap() {
var totalDiskCap int
for _, spaceLeft := range m.nodeMap {
totalDiskCap += spaceLeft
}
m.diskCap = totalDiskCap
}
func (m *MasterNode) FileStat(filename string) (string, error) {
if entry, ok := m.files[filename]; ok {
return fmt.Sprintf(
`file name: %s
created: %v
size: %d bytes`, entry.GetName(), entry.Date(), entry.GetSize()), nil
}
return "", fmt.Errorf("file does not exist")
}
func (m *MasterNode) GetNodeStat(param interface{}) string {
switch x := param.(type) {
case int:
return m.nodeStatByID(x)
default:
return m.nodeStat()
}
}
func (m *MasterNode) nodeStatByID(nodeID int) string {
var statString string
if nodeID > -1 && nodeID < m.ROW {
statString = fmt.Sprintf("node %d available space: %d", nodeID, m.nodeMap[nodeID])
} else {
statString = fmt.Sprintf("no Node with ID %d", nodeID)
}
return statString
}
func (m *MasterNode) nodeStat() string {
var statString string
statString = fmt.Sprintf("totaldiskspace: %d", m.diskCap)
for idx, spaceLeft := range m.nodeMap {
statString += fmt.Sprintf("node %d available space: %d\n", idx+1, spaceLeft)
}
return statString
}
func (m *MasterNode) stopNode() int {
rand.Seed(time.Now().UnixNano())
nodeID := rand.Intn(m.ROW - 1)
for _, entries := range m.files {
entries.StopNode(nodeID)
}
return nodeID
}
func (m *MasterNode) Read(fileName string) (FileEntry, error) {
if entry, ok := m.files[fileName]; ok {
return entry, nil
}
return nil, fmt.Errorf("file does not exist")
}
func (m *MasterNode) Write(filename string) FileEntry {
// if file exists return file entry else create a new entry using filename
if entry, ok := m.files[filename]; ok {
return entry
}
entry := &File{Name: filename}
m.files[entry.GetName()] = entry
return entry
}
func (m *MasterNode) Run() {
var err error
m.socket, err = net.Listen("tcp", fmt.Sprintf(":%d", m.PORT))
if err != nil {
log.Fatalf("unable to start %s server: %v\n", m.serverName, err.Error())
}
fmt.Printf("starting %v server at port %d\n", m.serverName, m.PORT)
m.UpdateDiskCap()
chunkServerConfig := map[string]interface{}{
"port": os.Getenv("CHUNK_SERVER_PORT"),
"nodes": m.ROW,
"chunksize": m.CHUNKSIZE,
"NO_PER_RACK": m.COLUMN,
}
chunkServer := NewChunkServer("chunk", chunkServerConfig)
go chunkServer.Run()
for {
conn, err := m.socket.Accept()
if err != nil {
log.Fatal(err.Error())
}
m.encoder = gob.NewEncoder(conn)
m.decoder = gob.NewDecoder(conn)
go m.handleConnection(conn)
}
}
func (m *MasterNode) handleConnection(conn net.Conn) {
defer conn.Close()
var msg Message
var err error
for {
err = m.decoder.Decode(&msg)
if err != nil {
if err == io.EOF {
break
}
log.Println("decode error: ", err.Error())
m.encoder.Encode(err.Error())
break
}
if msg.Command == "killserver" {
// end process
var rmsg struct {
Result string
Err string
}
rmsg.Result = "servers stopped running"
err = m.encoder.Encode(rmsg)
if err != nil {
log.Println(err.Error())
}
os.Exit(1)
}
m.handleClientCommands(&msg)
}
}
func (m *MasterNode) handleClientCommands(msg *Message) {
switch msg.Command {
case "stopnode":
var rmsg struct {
Result string
Err string
}
rmsg.Result = strconv.Itoa(m.stopNode())
_ = m.encoder.Encode(rmsg)
break
case "stat":
stat, err := m.FileStat(msg.Args[0])
var rmsg struct {
Result string
Err string
}
if err != nil {
rmsg.Err = err.Error()
_ = m.encoder.Encode(rmsg)
break
}
rmsg.Result = stat
err = m.encoder.Encode(rmsg)
if err != nil {
log.Println(err.Error())
}
break
case "nodestat":
var rmsg struct {
Result string
Err string
}
nodeID, err := strconv.Atoi(msg.Args[0])
if err != nil {
rmsg.Err = err.Error()
} else {
rmsg.Result = m.GetNodeStat(nodeID)
}
err = m.encoder.Encode(rmsg)
if err != nil {
log.Println(err.Error())
}
case "ls":
var rmsg struct {
Result string
Err string
}
rmsg.Result = m.ListFiles()
err := m.encoder.Encode(rmsg)
if err != nil {
log.Println(err.Error())
}
break
case "diskcapacity":
var rmsg struct {
Result string
Err string
}
rmsg.Result = fmt.Sprintf("total diskcapacity: %d", m.GetDiskCap())
_ = m.encoder.Encode(rmsg)
break
case "rename":
var rmsg struct {
Err string
}
err := m.Rename(msg.Args[0], msg.Args[1])
if err != nil {
rmsg.Err = err.Error()
}
err = m.encoder.Encode(rmsg)
if err != nil {
log.Println(err)
}
break
case "read":
var rmsg struct {
Result *File
Err string
}
filename := msg.Args[0]
entry, err := m.Read(filename)
if err != nil {
rmsg.Err = err.Error()
} else {
rmsg.Result = entry.(*File)
}
err = m.encoder.Encode(rmsg)
if err != nil {
log.Println(err.Error())
}
break
case "write":
var rmsg struct {
Result *File
Err string
}
filename := msg.Args[0]
filesize, _ := strconv.Atoi(msg.Args[0])
if filesize >= m.GetDiskCap() {
rmsg.Err = "not enough availabe disk space for file"
} else {
entry := m.Write(filename)
rmsg.Result = entry.(*File)
}
err := m.encoder.Encode(rmsg)
if err != nil {
log.Println(err.Error())
}
break
case "filesize":
var rmsg struct {
Result int
Err string
}
filename := msg.Args[0]
rmsg.Result = m.FileSize(filename)
err := m.encoder.Encode(rmsg)
if err != nil {
log.Println(err.Error())
}
break
case "updateFileEntry":
var rmsg struct {
Entry *File
}
err := m.decoder.Decode(&rmsg)
if err != nil {
log.Println(err.Error())
}
if entry, ok := m.files[msg.Args[0]]; ok {
if len(entry.getChunks()) == 0 {
for _, chunk := range rmsg.Entry.getChunks() {
m.nodeMap[chunk.Id()] = m.nodeMap[chunk.Id()] - chunk.Size()
}
}
}
rmsg.Entry.Size = rmsg.Entry.Chunks[0].Size()
m.files[msg.Args[0]] = rmsg.Entry
m.UpdateDiskCap()
break
default:
var rmsg struct {
Err string
}
rmsg.Err = fmt.Sprintf("%s is not a valid command", msg.Command)
err := m.encoder.Encode(rmsg)
if err != nil {
log.Println(err.Error())
}
break
}
}
| [
"\"CHUNK_SERVER_PORT\"",
"\"CHUNK_SERVER_PORT\""
]
| []
| [
"CHUNK_SERVER_PORT"
]
| [] | ["CHUNK_SERVER_PORT"] | go | 1 | 0 | |
server_unix.go | // Copyright (c) 2019 Andy Pan
// Copyright (c) 2018 Joshua J Baker
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// +build linux freebsd dragonfly darwin
package gnet
import (
"context"
"runtime"
"sync"
"sync/atomic"
"github.com/panjf2000/gnet/errors"
"github.com/panjf2000/gnet/internal/logging"
"github.com/panjf2000/gnet/internal/netpoll"
)
type server struct {
ln *listener // the listener for accepting new connections
lb loadBalancer // event-loops for handling events
wg sync.WaitGroup // event-loop close WaitGroup
opts *Options // options with server
once sync.Once // make sure only signalShutdown once
cond *sync.Cond // shutdown signaler
codec ICodec // codec for TCP stream
mainLoop *eventloop // main event-loop for accepting connections
inShutdown int32 // whether the server is in shutdown
tickerCtx context.Context // context for ticker
cancelTicker context.CancelFunc // function to stop the ticker
eventHandler EventHandler // user eventHandler
}
func (svr *server) isInShutdown() bool {
return atomic.LoadInt32(&svr.inShutdown) == 1
}
// waitForShutdown waits for a signal to shutdown.
func (svr *server) waitForShutdown() {
svr.cond.L.Lock()
svr.cond.Wait()
svr.cond.L.Unlock()
}
// signalShutdown signals the server to shut down.
func (svr *server) signalShutdown() {
svr.once.Do(func() {
svr.cond.L.Lock()
svr.cond.Signal()
svr.cond.L.Unlock()
})
}
func (svr *server) startEventLoops() {
svr.lb.iterate(func(i int, el *eventloop) bool {
svr.wg.Add(1)
go func() {
el.loopRun(svr.opts.LockOSThread)
svr.wg.Done()
}()
return true
})
}
func (svr *server) closeEventLoops() {
svr.lb.iterate(func(i int, el *eventloop) bool {
_ = el.poller.Close()
return true
})
}
func (svr *server) startSubReactors() {
svr.lb.iterate(func(i int, el *eventloop) bool {
svr.wg.Add(1)
go func() {
svr.activateSubReactor(el, svr.opts.LockOSThread)
svr.wg.Done()
}()
return true
})
}
func (svr *server) activateEventLoops(numEventLoop int) (err error) {
var striker *eventloop
// Create loops locally and bind the listeners.
for i := 0; i < numEventLoop; i++ {
l := svr.ln
if i > 0 && svr.opts.ReusePort {
if l, err = initListener(svr.ln.network, svr.ln.addr, svr.opts); err != nil {
return
}
}
var p *netpoll.Poller
if p, err = netpoll.OpenPoller(); err == nil {
el := new(eventloop)
el.ln = l
el.svr = svr
el.poller = p
el.buffer = make([]byte, svr.opts.ReadBufferCap)
el.connections = make(map[int]*conn)
el.eventHandler = svr.eventHandler
_ = el.poller.AddRead(el.ln.fd)
svr.lb.register(el)
// Start the ticker.
if el.idx == 0 && svr.opts.Ticker {
striker = el
}
} else {
return
}
}
// Start event-loops in background.
svr.startEventLoops()
go striker.loopTicker(svr.tickerCtx)
return
}
func (svr *server) activateReactors(numEventLoop int) error {
for i := 0; i < numEventLoop; i++ {
if p, err := netpoll.OpenPoller(); err == nil {
el := new(eventloop)
el.ln = svr.ln
el.svr = svr
el.poller = p
el.buffer = make([]byte, svr.opts.ReadBufferCap)
el.connections = make(map[int]*conn)
el.eventHandler = svr.eventHandler
svr.lb.register(el)
} else {
return err
}
}
// Start sub reactors in background.
svr.startSubReactors()
if p, err := netpoll.OpenPoller(); err == nil {
el := new(eventloop)
el.ln = svr.ln
el.idx = -1
el.svr = svr
el.poller = p
el.eventHandler = svr.eventHandler
_ = el.poller.AddRead(el.ln.fd)
svr.mainLoop = el
// Start main reactor in background.
svr.wg.Add(1)
go func() {
svr.activateMainReactor(svr.opts.LockOSThread)
svr.wg.Done()
}()
} else {
return err
}
// Start the ticker.
if svr.opts.Ticker {
go svr.mainLoop.loopTicker(svr.tickerCtx)
}
return nil
}
func (svr *server) start(numEventLoop int) error {
if svr.opts.ReusePort || svr.ln.network == "udp" {
return svr.activateEventLoops(numEventLoop)
}
return svr.activateReactors(numEventLoop)
}
func (svr *server) stop(s Server) {
// Wait on a signal for shutdown
svr.waitForShutdown()
svr.eventHandler.OnShutdown(s)
// Notify all loops to close by closing all listeners
svr.lb.iterate(func(i int, el *eventloop) bool {
logging.LogErr(el.poller.UrgentTrigger(func(_ []byte) error {
return errors.ErrServerShutdown
}))
return true
})
if svr.mainLoop != nil {
svr.ln.close()
logging.LogErr(svr.mainLoop.poller.UrgentTrigger(func(_ []byte) error {
return errors.ErrServerShutdown
}))
}
// Wait on all loops to complete reading events
svr.wg.Wait()
svr.closeEventLoops()
if svr.mainLoop != nil {
logging.LogErr(svr.mainLoop.poller.Close())
}
// Stop the ticker.
if svr.opts.Ticker {
svr.cancelTicker()
}
atomic.StoreInt32(&svr.inShutdown, 1)
}
func serve(eventHandler EventHandler, listener *listener, options *Options, protoAddr string) error {
// Figure out the proper number of event-loops/goroutines to run.
numEventLoop := 1
if options.Multicore {
numEventLoop = runtime.NumCPU()
}
if options.NumEventLoop > 0 {
numEventLoop = options.NumEventLoop
}
svr := new(server)
svr.opts = options
svr.eventHandler = eventHandler
svr.ln = listener
switch options.LB {
case RoundRobin:
svr.lb = new(roundRobinLoadBalancer)
case LeastConnections:
svr.lb = new(leastConnectionsLoadBalancer)
case SourceAddrHash:
svr.lb = new(sourceAddrHashLoadBalancer)
}
svr.cond = sync.NewCond(&sync.Mutex{})
if svr.opts.Ticker {
svr.tickerCtx, svr.cancelTicker = context.WithCancel(context.Background())
}
svr.codec = func() ICodec {
if options.Codec == nil {
return new(BuiltInFrameCodec)
}
return options.Codec
}()
server := Server{
svr: svr,
Multicore: options.Multicore,
Addr: listener.lnaddr,
NumEventLoop: numEventLoop,
ReusePort: options.ReusePort,
TCPKeepAlive: options.TCPKeepAlive,
}
switch svr.eventHandler.OnInitComplete(server) {
case None:
case Shutdown:
return nil
}
if err := svr.start(numEventLoop); err != nil {
svr.closeEventLoops()
logging.Errorf("gnet server is stopping with error: %v", err)
return err
}
defer svr.stop(server)
allServers.Store(protoAddr, svr)
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
tests/test_getconfig.py | from chaos_lambda import get_config
from ssm_cache import InvalidParameterError
from . import TestBase, ignore_warnings
import unittest
import os
import sys
class TestConfigMethods(TestBase):
@ignore_warnings
def _setTestUp(self, subfolder):
class_name = self.__class__.__name__
self._setUp(class_name, subfolder)
config = "{ \"delay\": 200, \"is_enabled\": true, \"error_code\": 404, \"exception_msg\": \"This is chaos\", \"rate\": 0.5, \"fault_type\": \"latency\"}"
self._create_params(name='test.config', value=config)
@ignore_warnings
def test_get_config(self):
method_name = sys._getframe().f_code.co_name
self._setTestUp(method_name)
_config = get_config()
self.assertEqual(_config.get("is_enabled"), True or False)
self.assertEqual(_config.get("rate"), 0.5)
self.assertEqual(_config.get("delay"), 200)
self.assertEqual(_config.get("error_code"), 404)
self.assertEqual(_config.get("exception_msg"), "This is chaos")
self.assertEqual(_config.get("fault_type"), "latency")
class TestWrongConfigMethods(TestBase):
@ignore_warnings
def _setTestUp(self, subfolder):
class_name = self.__class__.__name__
self._setUp(class_name, subfolder)
config = "{ \"is_enabled\": true, \"error_code\": 404, \"exception_msg\": \"This is chaos\", \"rate\": 0.5, \"fault_type\": \"latency\"}"
self._create_params(name='test.config', value=config)
@ignore_warnings
def test_bad_config(self):
method_name = sys._getframe().f_code.co_name
self._setTestUp(method_name)
os.environ['CHAOS_PARAM'] = 'test.conf'
with self.assertRaises(InvalidParameterError):
_config = get_config()
self.assertNotEqual(_config.get("is_enabled"), True or False)
class TestConfigNotEnabled(TestBase):
@ignore_warnings
def _setTestUp(self, subfolder):
class_name = self.__class__.__name__
self._setUp(class_name, subfolder)
config = "{ \"delay\": 200, \"is_enabled\": false, \"error_code\": 404, \"exception_msg\": \"This is chaos\", \"rate\": 0.5, \"fault_type\": \"latency\"}"
self._create_params(name='test.config', value=config)
@ignore_warnings
def test_config_not_enabled(self):
method_name = sys._getframe().f_code.co_name
self._setTestUp(method_name)
_config = get_config()
self.assertEqual(_config, None)
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"CHAOS_PARAM"
]
| [] | ["CHAOS_PARAM"] | python | 1 | 0 | |
cmd/destroy.go | package cmd
import (
"github.com/ekara-platform/cli/common"
"github.com/ekara-platform/engine/action"
"github.com/ekara-platform/engine/util"
"github.com/fatih/color"
"github.com/spf13/cobra"
"io/ioutil"
"os"
"path/filepath"
"time"
)
func init() {
// This is a descriptor-based command
applyDescriptorFlags(destroyCmd)
// Docker flags
destroyCmd.PersistentFlags().StringVar(&common.Flags.Docker.Host, "docker-host", getEnvDockerHost(), "Docker daemon host")
destroyCmd.PersistentFlags().StringVar(&common.Flags.Docker.Cert, "docker-cert-path", os.Getenv("DOCKER_CERT_PATH"), "Location of the Docker certificates")
destroyCmd.PersistentFlags().BoolVar(&common.Flags.Docker.TLS, "docker-tls-verify", os.Getenv("DOCKER_TLS_VERIFY") == "", "If present TLS is enforced for Docker daemon communication")
destroyCmd.PersistentFlags().StringVar(&common.Flags.Docker.APIVersion, "docker-api-version", os.Getenv("DOCKER_API_VERSION"), "Docker daemon API version")
// SSH flags
destroyCmd.PersistentFlags().StringVar(&common.Flags.SSH.PublicSSHKey, "public-key", "", "Custom public SSH key for the environment")
destroyCmd.PersistentFlags().StringVar(&common.Flags.SSH.PrivateSSHKey, "private-key", "", "Custom private SSH key for the environment")
rootCmd.AddCommand(destroyCmd)
}
var destroyCmd = &cobra.Command{
Use: "destroy <repository-url>",
Short: "Destroy the existing environment infrastructure.",
Long: `The destroy command will ensure that every resource from the environment is destroyed.`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
color.New(color.FgHiWhite).Println(common.LOG_DESTROYING_ENV)
if common.Flags.SSH.PrivateSSHKey != "" && common.Flags.SSH.PublicSSHKey != "" {
// Move the ssh keys into the exchange folder input
err := copyFile(common.Flags.SSH.PublicSSHKey, filepath.Join(ef.Input.Path(), util.SSHPublicKeyFileName))
if err != nil {
common.CliFeedbackNotifier.Error("Error copying the SSH public key")
os.Exit(1)
}
err = copyFile(common.Flags.SSH.PrivateSSHKey, filepath.Join(ef.Input.Path(), util.SSHPrivateKeyFileName))
if err != nil {
common.CliFeedbackNotifier.Error("Error copying the SSH private key")
os.Exit(1)
}
}
status, err := execAndWait(args[0], ef, action.DestroyActionID)
if err != nil {
common.CliFeedbackNotifier.Error("Unable to start installer: %s", err.Error())
return
}
if status == 0 {
common.CliFeedbackNotifier.Info("Destroy done in %s!", common.HumanizeDuration(time.Since(common.StartTime)))
if ef.Output.Contains("result.json") {
result, err := ioutil.ReadFile(filepath.Join(ef.Output.AdaptedPath(), "result.json"))
if err != nil {
common.CliFeedbackNotifier.Error("Unable to read destroy result: %s", err.Error())
return
}
destroyResult := action.DestroyResult{}
err = destroyResult.FromJson(string(result))
if err != nil {
common.CliFeedbackNotifier.Error("Unable to parse destroy result: %s", err.Error())
return
}
}
} else {
common.CliFeedbackNotifier.Error("Errored (%d) after %s!", status, common.HumanizeDuration(time.Since(common.StartTime)))
}
},
}
| [
"\"DOCKER_CERT_PATH\"",
"\"DOCKER_TLS_VERIFY\"",
"\"DOCKER_API_VERSION\""
]
| []
| [
"DOCKER_API_VERSION",
"DOCKER_CERT_PATH",
"DOCKER_TLS_VERIFY"
]
| [] | ["DOCKER_API_VERSION", "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY"] | go | 3 | 0 | |
tests/testproj/testproj/asgi.py | """
ASGI config for testproj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testproj.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/github.com/riywo/loginshell/loginshell.go | package loginshell
import (
"runtime"
"fmt"
"os"
"os/exec"
"os/user"
"errors"
"regexp"
"strings"
)
func Shell() (string, error) {
switch runtime.GOOS {
case "linux":
return LinuxShell()
case "darwin":
return DarwinShell()
case "windows":
return WindowsShell()
}
return "", errors.New("Undefined GOOS: " + runtime.GOOS)
}
func WindowsShell() (string, error) {
consoleApp := os.Getenv("COMSPEC")
if consoleApp == "" {
consoleApp = "cmd.exe"
}
return consoleApp, nil
}
func LinuxShell() (string, error) {
user, err := user.Current()
if err != nil {
return "", err
}
out, err := exec.Command("getent", "passwd", user.Uid).Output()
if err != nil {
return "", err
}
ent := strings.Split(strings.TrimSuffix(string(out), "\n"), ":")
return ent[6], nil
}
func DarwinShell() (string, error) {
dir := "Local/Default/Users/" + os.Getenv("USER")
out, err := exec.Command("dscl", "localhost", "-read", dir, "UserShell").Output()
if err != nil {
return "", err
}
re := regexp.MustCompile("UserShell: (/[^ ]+)\n")
matched := re.FindStringSubmatch(string(out))
shell := matched[1]
if shell == "" {
return "", errors.New(fmt.Sprintf("Invalid output: %s", string(out)))
}
return shell, nil
}
| [
"\"COMSPEC\"",
"\"USER\""
]
| []
| [
"USER",
"COMSPEC"
]
| [] | ["USER", "COMSPEC"] | go | 2 | 0 | |
torpe/torpe/asgi.py | """
ASGI config for torpe project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'torpe.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
django_root/timerecording/settings.py | """
Django settings for storyhelper project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from configurations import Configuration
import os
class Base(Configuration):
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jl(gf*+@pf9@5%#b-ly(^zqtf$&(#x=$rc7^c5ny-$m5o&@x9o'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'rest_framework',
'recorder.apps.RecorderConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'timerecording.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'timerecording.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'de-ch'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/static'
class Dev(Base):
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'timerecording_dev',
'USER': 'postgres',
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', 'passw0rd'),
'HOST': 'db',
'PORT': 5432,
}
}
class Prod(Base):
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DEBUG = True
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'timerecording',
'USER': 'postgres',
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', 'cpWhCu83Nc729JDm'),
'HOST': 'db',
'PORT': 5432,
}
}
| []
| []
| [
"POSTGRES_PASSWORD"
]
| [] | ["POSTGRES_PASSWORD"] | python | 1 | 0 | |
scripts/tests/sanitycheck/test_testinstance.py | #!/usr/bin/env python3
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=line-too-long
"""
Tests for testinstance class
"""
import os
import sys
import pytest
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
from sanitylib import TestInstance, BuildError, TestCase, SanityCheckException
TESTDATA_1 = [
(False, False, "console", "na", "qemu", False, [], (False, True)),
(False, False, "console", "native", "qemu", False, [], (False, True)),
(True, False, "console", "native", "nsim", False, [], (True, False)),
(True, True, "console", "native", "renode", False, [], (True, False)),
(False, False, "sensor", "native", "", False, [], (True, False)),
(False, False, "sensor", "na", "", False, [], (True, False)),
(False, True, "sensor", "native", "", True, [], (True, False)),
]
@pytest.mark.parametrize("build_only, slow, harness, platform_type, platform_sim, device_testing,fixture, expected", TESTDATA_1)
def test_check_build_or_run(class_testsuite, monkeypatch, all_testcases_dict, platforms_list, build_only, slow, harness, platform_type, platform_sim, device_testing, fixture, expected):
"""" Test to check the conditions for build_only and run scenarios
Scenario 1: Test when different parameters are passed, build_only and run are set correctly
Sceanrio 2: Test if build_only is enabled when the OS is Windows"""
class_testsuite.testcases = all_testcases_dict
testcase = class_testsuite.testcases.get('scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_a.check_1')
class_testsuite.platforms = platforms_list
platform = class_testsuite.get_platform("demo_board_2")
platform.type = platform_type
platform.simulation = platform_sim
testcase.harness = harness
testcase.build_only = build_only
testcase.slow = slow
testinstance = TestInstance(testcase, platform, class_testsuite.outdir)
testinstance.check_build_or_run(build_only, slow, device_testing, fixture)
b, r = expected
assert testinstance.build_only == b
assert testinstance.run == r
monkeypatch.setattr("os.name", "nt")
testinstance.check_build_or_run()
assert testinstance.build_only and not testinstance.run
TESTDATA_2 = [
(True, True, ["demo_board_2"], "native", '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y'),
(False, True, ["demo_board_2"], 'native', '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'),
(True, True, ["demo_board_2"], 'mcu', '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'),
(False, False, ["demo_board_2"], 'native', ''),
(False, True, ['demo_board_1'], 'native', ''),
(True, False, ["demo_board_2"], 'native', '\nCONFIG_ASAN=y'),
]
@pytest.mark.parametrize("enable_asan, enable_coverage, coverage_platform, platform_type, expected_content", TESTDATA_2)
def test_create_overlay(class_testsuite, all_testcases_dict, platforms_list, enable_asan, enable_coverage, coverage_platform, platform_type, expected_content):
"""Test correct content is written to testcase_extra.conf based on if conditions
TO DO: Add extra_configs to the input list"""
class_testsuite.testcases = all_testcases_dict
testcase = class_testsuite.testcases.get('scripts/tests/sanitycheck/test_data/testcases/samples/test_app/sample_test.app')
class_testsuite.platforms = platforms_list
platform = class_testsuite.get_platform("demo_board_2")
testinstance = TestInstance(testcase, platform, class_testsuite.outdir)
platform.type = platform_type
assert testinstance.create_overlay(platform, enable_asan, enable_coverage, coverage_platform) == expected_content
def test_calculate_sizes(class_testsuite, all_testcases_dict, platforms_list):
""" Test Calculate sizes method for zephyr elf"""
class_testsuite.testcases = all_testcases_dict
testcase = class_testsuite.testcases.get('scripts/tests/sanitycheck/test_data/testcases/samples/test_app/sample_test.app')
class_testsuite.platforms = platforms_list
platform = class_testsuite.get_platform("demo_board_2")
testinstance = TestInstance(testcase, platform, class_testsuite.outdir)
with pytest.raises(BuildError):
assert testinstance.calculate_sizes() == "Missing/multiple output ELF binary"
TESTDATA_3 = [
(ZEPHYR_BASE + '/scripts/tests/sanitycheck/test_data/testcases', ZEPHYR_BASE, '/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_a.check_1', '/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_a.check_1'),
(ZEPHYR_BASE, '.', 'test_a.check_1', 'test_a.check_1'),
(ZEPHYR_BASE, '/scripts/tests/sanitycheck/test_data/testcases/test_b', 'test_b.check_1', '/scripts/tests/sanitycheck/test_data/testcases/test_b/test_b.check_1'),
(os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', 'test_b.check_1', 'test_b.check_1'),
(os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '.', '.'),
(ZEPHYR_BASE, '.', 'test_a.check_1.check_2', 'test_a.check_1.check_2'),
]
@pytest.mark.parametrize("testcase_root, workdir, name, expected", TESTDATA_3)
def test_get_unique(testcase_root, workdir, name, expected):
'''Test to check if the unique name is given for each testcase root and workdir'''
unique = TestCase(testcase_root, workdir, name)
assert unique.name == expected
TESTDATA_4 = [
(ZEPHYR_BASE, '.', 'test_c', 'Tests should reference the category and subsystem with a dot as a separator.'),
(os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '', 'Tests should reference the category and subsystem with a dot as a separator.'),
]
@pytest.mark.parametrize("testcase_root, workdir, name, exception", TESTDATA_4)
def test_get_unique_exception(testcase_root, workdir, name, exception):
'''Test to check if tests reference the category and subsystem with a dot as a separator'''
with pytest.raises(SanityCheckException):
unique = TestCase(testcase_root, workdir, name)
assert unique == exception
TESTDATA_5 = [
("testcases/tests/test_ztest.c", None, ['a', 'c', 'unit_a', 'newline', 'aa', 'user', 'last']),
("testcases/tests/test_a/test_ztest_error.c", "Found a test that does not start with test_", ['1a', '1c', '2a', '2b']),
("testcases/tests/test_a/test_ztest_error_1.c", "found invalid #ifdef, #endif in ztest_test_suite()", ['unit_1a', 'unit_1b', 'Unit_1c']),
]
@pytest.mark.parametrize("test_file, expected_warnings, expected_subcases", TESTDATA_5)
def test_scan_file(test_data, test_file, expected_warnings, expected_subcases):
'''Testing scan_file method with different ztest files for warnings and results'''
testcase = TestCase("/scripts/tests/sanitycheck/test_data/testcases/tests", ".", "test_a.check_1")
results, warnings = testcase.scan_file(os.path.join(test_data, test_file))
assert sorted(results) == sorted(expected_subcases)
assert warnings == expected_warnings
TESTDATA_6 = [
("testcases/tests", ['a', 'c', 'unit_a', 'newline', 'aa', 'user', 'last']),
("testcases/tests/test_a", ['unit_1a', 'unit_1b', 'Unit_1c', '1a', '1c', '2a', '2b']),
]
@pytest.mark.parametrize("test_path, expected_subcases", TESTDATA_6)
def test_subcases(test_data, test_path, expected_subcases):
'''Testing scan path and parse subcases methods for expected subcases'''
testcase = TestCase("/scripts/tests/sanitycheck/test_data/testcases/tests", ".", "test_a.check_1")
subcases = testcase.scan_path(os.path.join(test_data, test_path))
assert sorted(subcases) == sorted(expected_subcases)
testcase.id = "test_id"
testcase.parse_subcases(test_data + test_path)
assert sorted(testcase.cases) == [testcase.id + '.' + x for x in sorted(expected_subcases)]
| []
| []
| [
"ZEPHYR_BASE"
]
| [] | ["ZEPHYR_BASE"] | python | 1 | 0 | |
VideoMe/wsgi.py | """
WSGI config for VideoMe project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'VideoMe.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
selfdrive/controls/lib/events.py | import math
import os
from enum import IntEnum
from typing import Dict, Union, Callable, List, Optional
from cereal import log, car
import cereal.messaging as messaging
from common.conversions import Conversions as CV
from common.realtime import DT_CTRL
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
from system.version import get_short_branch
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
OVERRIDE = 'override'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events: List[int] = []
self.static_events: List[int] = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self) -> List[int]:
return self.events
def __len__(self) -> int:
return len(self.events)
def add(self, event_name: int, static: bool=False) -> None:
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self) -> None:
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type: str) -> bool:
return any(event_type in EVENTS.get(e, {}) for e in self.events)
def create_alerts(self, event_types: List[str], callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}):
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.priority = priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration = int(duration / DT_CTRL)
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.alert_type = ""
self.event_type: Optional[str] = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
if not isinstance(alert2, Alert):
return False
return self.priority > alert2.priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2: str,
alert_text_1: str = "openpilot Unavailable",
visual_alert: car.CarControl.HUDControl.VisualAlert=VisualAlert.none):
super().__init__(alert_text_1, alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
AudibleAlert.refuse, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.warningSoft, 2.),
# less harsh version of SoftDisable, where the condition is user-triggered
class UserSoftDisableAlert(SoftDisableAlert):
def __init__(self, alert_text_2: str):
super().__init__(alert_text_2),
self.alert_text_1 = "openpilot will disengage"
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.warningImmediate, 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert: car.CarControl.HUDControl.AudibleAlert):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "", duration: float = 0.2, priority: Priority = Priority.LOWER, creation_delay: float = 0.):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
priority, VisualAlert.none, AudibleAlert.none, duration, creation_delay=creation_delay),
class StartupAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "Always keep hands on wheel and eyes on road", alert_status=AlertStatus.normal):
super().__init__(alert_text_1, alert_text_2,
alert_status, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 10.),
# ********** helper functions **********
def get_display_speed(speed_ms: float, metric: bool) -> str:
speed = int(round(speed_ms * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = 'km/h' if metric else 'mph'
return f"{speed} {unit}"
# ********** alert callback functions **********
AlertCallbackType = Callable[[car.CarParams, car.CarState, messaging.SubMaster, bool, int], Alert]
def soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return SoftDisableAlert(alert_text_2)
return func
def user_soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return UserSoftDisableAlert(alert_text_2)
return func
def startup_master_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
branch = get_short_branch("")
if "REPLAY" in os.environ:
branch = "replay"
return StartupAlert("MechTechTeam 08.15 061822", branch, alert_status=AlertStatus.userPrompt)
def below_engage_speed_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NoEntryAlert(f"Speed Below {get_display_speed(CP.minEnableSpeed, metric)}")
def below_steer_speed_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
f"Steer Unavailable Below {get_display_speed(CP.minSteerSpeed, metric)}",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.prompt, 0.4)
def calibration_incomplete_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
f"Drive Above {get_display_speed(MIN_SPEED_FILTER, metric)}",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2)
def no_gps_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
gps_integrated = sm['peripheralState'].pandaType in (log.PandaState.PandaType.uno, log.PandaState.PandaType.dos)
return Alert(
"Poor GPS reception",
"Hardware malfunctioning if sky is visible" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=300.)
# *** debug alerts ***
def out_of_space_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
full_perc = round(100. - sm['deviceState'].freeSpacePercent)
return NormalPermanentAlert("Out of Storage", f"{full_perc}% full")
def posenet_invalid_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
mdl = sm['modelV2'].velocity.x[0] if len(sm['modelV2'].velocity.x) else math.nan
err = CS.vEgo - mdl
msg = f"Speed Error: {err:.1f} m/s"
return NoEntryAlert(msg, alert_text_1="Posenet Speed Invalid")
def process_not_running_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
not_running = [p.name for p in sm['managerState'].processes if not p.running and p.shouldBeRunning]
msg = ', '.join(not_running)
return NoEntryAlert(msg, alert_text_1="Process Not Running")
def comm_issue_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
bs = [s for s in sm.data.keys() if not sm.all_checks([s, ])]
msg = ', '.join(bs[:4]) # can't fit too many on one line
return NoEntryAlert(msg, alert_text_1="Communication Issue Between Processes")
def camera_malfunction_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
all_cams = ('roadCameraState', 'driverCameraState', 'wideRoadCameraState')
bad_cams = [s.replace('State', '') for s in all_cams if s in sm.data.keys() and not sm.all_checks([s, ])]
return NormalPermanentAlert("Camera Malfunction", ', '.join(bad_cams))
def calibration_invalid_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
rpy = sm['liveCalibration'].rpyCalib
yaw = math.degrees(rpy[2] if len(rpy) == 3 else math.nan)
pitch = math.degrees(rpy[1] if len(rpy) == 3 else math.nan)
angles = f"Pitch: {pitch:.1f}°, Yaw: {yaw:.1f}°"
return NormalPermanentAlert("Calibration Invalid", angles)
def overheat_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
cpu = max(sm['deviceState'].cpuTempC, default=0.)
gpu = max(sm['deviceState'].gpuTempC, default=0.)
temp = max((cpu, gpu, sm['deviceState'].memoryTempC))
return NormalPermanentAlert("System Overheated", f"{temp:.0f} °C")
def low_memory_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NormalPermanentAlert("Low Memory", f"{sm['deviceState'].memoryUsagePercent}% used")
def high_cpu_usage_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
x = max(sm['deviceState'].cpuUsagePercent, default=0.)
return NormalPermanentAlert("High CPU Usage", f"{x}% used")
def modeld_lagging_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NormalPermanentAlert("Driving Model Lagging", f"{sm['modelV2'].frameDropPerc:.1f}% frames dropped")
def wrong_car_mode_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text)
def joystick_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
vals = f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%"
return NormalPermanentAlert("Joystick Mode", vals)
EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
# ********** events with no alerts **********
EventName.stockFcw: {},
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: NormalPermanentAlert("Joystick Mode"),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("System Initializing"),
},
EventName.startup: {
ET.PERMANENT: StartupAlert("Be ready to take over at any time")
},
EventName.startupMaster: {
ET.PERMANENT: startup_master_alert,
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: StartupAlert("Dashcam mode"),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: StartupAlert("Dashcam mode for unsupported car"),
},
EventName.startupNoFw: {
ET.PERMANENT: StartupAlert("Car Unrecognized",
"Check comma power connections",
alert_status=AlertStatus.userPrompt),
},
EventName.dashcamMode: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
priority=Priority.LOWEST),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: NormalPermanentAlert("Stock LKAS is on",
"Turn off stock LKAS to engage"),
},
EventName.cruiseMismatch: {
#ET.PERMANENT: ImmediateDisableAlert("openpilot failed to cancel cruise"),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
"Car Unrecognized",
priority=Priority.LOWEST),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock AEB: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.warningSoft, 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"Lane Departure Detected",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.ldw, AudibleAlert.prompt, 3.),
},
# ********** events only containing alerts that display while engaged **********
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: soft_disable_alert("Vehicle Parameter Identification Failed"),
},
EventName.steerTempUnavailableSilent: {
ET.WARNING: Alert(
"Steering Temporarily Unavailable",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.prompt, 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"Driver Distracted",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Distracted",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel: No Face Detected",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel",
"Driver Unresponsive",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Unresponsive",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Resume Driving Manually",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"STOPPED",
"Press Resume to Go",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"Steer Left to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"Steer Right to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"Car Detected in Blindspot",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.prompt, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"Changing Lanes",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"Take Control",
"Turn Exceeds Steering Limit",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Likely Hardware Issue"),
},
# Camera is not outputting frames
EventName.cameraMalfunction: {
ET.PERMANENT: camera_malfunction_alert,
ET.SOFT_DISABLE: soft_disable_alert("Camera Malfunction"),
ET.NO_ENTRY: NoEntryAlert("Camera Malfunction: Reboot Your Device"),
},
# Camera framerate too low
EventName.cameraFrameRate: {
ET.PERMANENT: NormalPermanentAlert("Camera Frame Rate Low", "Reboot your Device"),
ET.SOFT_DISABLE: soft_disable_alert("Camera Frame Rate Low"),
ET.NO_ENTRY: NoEntryAlert("Camera Frame Rate Low: Reboot Your Device"),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Likely Hardware Issue"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
# ET.PERMANENT: NormalPermanentAlert("Sensor Malfunction", "Hardware Malfunction"),
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Parking Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed",
visual_alert=VisualAlert.brakePressed),
},
EventName.pedalPressedPreEnable: {
ET.PRE_ENABLE: Alert(
"Release Pedal to Engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1, creation_delay=1.),
},
EventName.gasPressedOverride: {
ET.OVERRIDE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Adaptive Cruise Disabled"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: soft_disable_alert("Steering Temporarily Unavailable"),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable"),
},
EventName.outOfSpace: {
ET.PERMANENT: out_of_space_alert,
ET.NO_ENTRY: NoEntryAlert("Out of Storage"),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: below_engage_speed_alert,
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.PERMANENT: overheat_alert,
ET.SOFT_DISABLE: soft_disable_alert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: user_soft_disable_alert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: calibration_invalid_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: user_soft_disable_alert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: user_soft_disable_alert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt Unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: soft_disable_alert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: soft_disable_alert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: soft_disable_alert("Communication Issue between Processes"),
ET.NO_ENTRY: comm_issue_alert,
},
EventName.commIssueAvgFreq: {
ET.SOFT_DISABLE: soft_disable_alert("Low Communication Rate between Processes"),
ET.NO_ENTRY: NoEntryAlert("Low Communication Rate between Processes"),
},
EventName.controlsdLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Controls Lagging"),
ET.NO_ENTRY: NoEntryAlert("Controls Process Lagging: Reboot Your Device"),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: process_not_running_alert,
ET.SOFT_DISABLE: soft_disable_alert("Process Not Running"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: soft_disable_alert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Driving Model Lagging"),
ET.NO_ENTRY: NoEntryAlert("Driving Model Lagging"),
ET.PERMANENT: modeld_lagging_alert,
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: soft_disable_alert("Posenet Speed Invalid"),
ET.NO_ENTRY: posenet_invalid_alert,
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: soft_disable_alert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: soft_disable_alert("Low Memory: Reboot Your Device"),
ET.PERMANENT: low_memory_alert,
ET.NO_ENTRY: NoEntryAlert("Low Memory: Reboot Your Device"),
},
EventName.highCpuUsage: {
#ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),
#ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"),
ET.NO_ENTRY: high_cpu_usage_alert,
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Faulted"),
ET.PERMANENT: NormalPermanentAlert("Cruise Faulted", ""),
ET.NO_ENTRY: NoEntryAlert("Cruise Faulted"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
ET.NO_ENTRY: NoEntryAlert("Controls Mismatch"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera CRC Error - Road",
duration=1.,
creation_delay=30.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera CRC Error - Road Fisheye",
duration=1.,
creation_delay=30.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera CRC Error - Driver",
duration=1.,
creation_delay=30.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: soft_disable_alert("USB Error: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("USB Error: Reboot Your Device", ""),
ET.NO_ENTRY: NoEntryAlert("USB Error: Reboot Your Device"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.canBusMissing: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Bus Disconnected"),
ET.PERMANENT: Alert(
"CAN Bus Disconnected: Likely Faulty Cable",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Bus Disconnected: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("LKAS Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2, creation_delay=0.5),
ET.USER_DISABLE: ImmediateDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Relay Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Relay Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Relay Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
},
# When the car is driving faster than most cars in the training data, the model outputs can be unpredictable.
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Model uncertain at this speed",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 4.),
ET.NO_ENTRY: NoEntryAlert("Slow down to engage"),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.lkasDisabled: {
ET.PERMANENT: NormalPermanentAlert("LKAS Disabled: Enable LKAS to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Disabled"),
},
}
| []
| []
| []
| [] | [] | python | 0 | 0 | |
UnsupervisedMT/NMT/src/utils.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import re
import sys
import pickle
import random
import inspect
import argparse
import subprocess
from logging import getLogger
import numpy as np
import torch
from torch import optim
from .logger import create_logger
from .data.dictionary import EOS_WORD, UNK_WORD
from .adam_inverse_sqrt_with_warmup import AdamInverseSqrtWithWarmup
logger = getLogger()
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def initialize_exp(params, logger_filename='train.log'):
"""
Initialize the experience:
- dump parameters
- create a logger
- set the random seed
"""
# dump parameters
get_dump_path(params)
pickle.dump(params, open(os.path.join(params.dump_path, 'params.pkl'), 'wb'))
# get running command
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith('--'):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
command.append("'%s'" % x)
command = ' '.join(command)
params.command = command + ' --exp_id "%s"' % params.exp_id
# random seed
if params.seed >= 0:
np.random.seed(params.seed)
torch.manual_seed(params.seed)
torch.cuda.manual_seed(params.seed)
# environment variables
if 'pivo_directions' in params and len(params.pivo_directions) > 0:
os.environ["OMP_NUM_THREADS"] = "2"
os.environ["MKL_NUM_THREADS"] = "2"
# create a logger
logger = create_logger(os.path.join(params.dump_path, logger_filename))
logger.info('============ Initialized logger ============')
logger.info('\n'.join('%s: %s' % (k, str(v))
for k, v in sorted(dict(vars(params)).items())))
logger.info('The experiment will be stored in %s\n' % params.dump_path)
logger.info('Running command: %s\n' % params.command)
return logger
def get_dump_path(params):
"""
Create a directory to store the experiment.
"""
assert len(params.exp_name) > 0
dump_path = './' if params.dump_path == '' else params.dump_path
subprocess.Popen("mkdir -p %s" % dump_path, shell=True).wait()
assert os.path.isdir(dump_path)
# create the sweep path if it does not exist
sweep_path = os.path.join(dump_path, params.exp_name)
if not os.path.exists(sweep_path):
subprocess.Popen("mkdir %s" % sweep_path, shell=True).wait()
# create an ID for the job if it is not given in the parameters.
# if we run on the cluster, the job ID is the one of Chronos.
# otherwise, it is randomly generated
if params.exp_id == '':
exp_id = os.environ.get('CHRONOS_JOB_ID')
if exp_id is None:
exp_id = os.environ.get('SLURM_JOB_ID')
if exp_id is None:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
while True:
exp_id = ''.join(random.choice(chars) for _ in range(10))
if not os.path.isdir(os.path.join(sweep_path, exp_id)):
break
else:
assert exp_id.isdigit()
params.exp_id = exp_id
else:
assert os.path.isdir(os.path.join(sweep_path, params.exp_id)) # reload an experiment
# create the dump folder / update parameters
params.dump_path = os.path.join(sweep_path, params.exp_id)
if not os.path.isdir(params.dump_path):
subprocess.Popen("mkdir %s" % params.dump_path, shell=True).wait()
def get_optimizer(parameters, s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
optim_params['betas'] = (optim_params.get('beta1', 0.5), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
elif method == 'adam_inverse_sqrt':
optim_fn = AdamInverseSqrtWithWarmup
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.98))
optim_params['warmup_updates'] = optim_params.get('warmup_updates', 4000)
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn(parameters, **optim_params)
def reload_parameters(old_params, new_params, attributes):
"""
Reload the parameters of a previous model.
"""
for k, v in old_params.__dict__.items():
if k in attributes and k not in new_params:
setattr(new_params, k, v)
def reload_model(model, to_reload, attributes):
"""
Reload a previously trained model.
"""
# check parameters sizes
model_params = set(model.state_dict().keys())
to_reload_params = set(to_reload.state_dict().keys())
assert model_params == to_reload_params, (model_params - to_reload_params,
to_reload_params - model_params)
# check attributes
warnings = []
errors = []
for k in attributes:
assert type(k) is tuple or type(k) is str
k, strict = k if type(k) is tuple else (k, True)
if getattr(model, k, None) is None:
errors.append('- Attribute "%s" not found in the current model' % k)
if getattr(to_reload, k, None) is None:
errors.append('- Attribute "%s" not found in the model to reload' % k)
if getattr(model, k, None) != getattr(to_reload, k, None):
message = ('- Attribute "%s" differs between the current model (%s) '
'and the one to reload (%s)'
% (k, str(getattr(model, k)), str(getattr(to_reload, k))))
(errors if strict else warnings).append(message)
if len(warnings) > 0:
logger.warning('Different parameters:\n%s' % '\n'.join(warnings))
if len(errors) > 0:
logger.error('Incompatible parameters:\n%s' % '\n'.join(errors))
exit()
# copy saved parameters
for k in model.state_dict().keys():
if model.state_dict()[k].size() != to_reload.state_dict()[k].size():
raise Exception("Expected tensor {} of size {}, but got {}".format(
k, model.state_dict()[k].size(),
to_reload.state_dict()[k].size()
))
model.state_dict()[k].copy_(to_reload.state_dict()[k])
def clip_parameters(model, clip):
"""
Clip model weights.
"""
if clip > 0:
for x in model.parameters():
x.data.clamp_(-clip, clip)
def get_grad_norm(model):
"""
Return the norm of the parameters gradients.
"""
norm = 0
for param in model.parameters():
norm += param.grad.data.norm(2) ** 2
return np.sqrt(norm)
def parse_lambda_config(params, name):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 iterations, then will linearly increase to 1 until iteration 2000
"""
x = getattr(params, name)
split = x.split(',')
if len(split) == 1:
setattr(params, name, float(x))
setattr(params, name + '_config', None)
else:
split = [s.split(':') for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1))
setattr(params, name, float(split[0][1]))
setattr(params, name + '_config', [(int(k), float(v)) for k, v in split])
def update_lambda_value(config, n_iter):
"""
Update a lambda value according to its schedule configuration.
"""
ranges = [i for i in range(len(config) - 1) if config[i][0] <= n_iter < config[i + 1][0]]
if len(ranges) == 0:
assert n_iter >= config[-1][0]
return config[-1][1]
assert len(ranges) == 1
i = ranges[0]
x_a, y_a = config[i]
x_b, y_b = config[i + 1]
return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
def update_lambdas(params, n_total_iter):
"""
Update all lambda coefficients.
"""
if params.lambda_xe_mono_config is not None:
params.lambda_xe_mono = update_lambda_value(params.lambda_xe_mono_config, n_total_iter)
if params.lambda_xe_para_config is not None:
params.lambda_xe_para = update_lambda_value(params.lambda_xe_para_config, n_total_iter)
if params.lambda_xe_back_config is not None:
params.lambda_xe_back = update_lambda_value(params.lambda_xe_back_config, n_total_iter)
if params.lambda_xe_otfd_config is not None:
params.lambda_xe_otfd = update_lambda_value(params.lambda_xe_otfd_config, n_total_iter)
if params.lambda_xe_otfa_config is not None:
params.lambda_xe_otfa = update_lambda_value(params.lambda_xe_otfa_config, n_total_iter)
if params.lambda_dis_config is not None:
params.lambda_dis = update_lambda_value(params.lambda_dis_config, n_total_iter)
if params.lambda_lm_config is not None:
params.lambda_lm = update_lambda_value(params.lambda_lm_config, n_total_iter)
def get_mask(lengths, all_words, expand=None, ignore_first=False, batch_first=False, cuda=True):
"""
Create a mask of shape (slen, bs) or (bs, slen).
"""
bs, slen = lengths.size(0), lengths.max()
mask = torch.ByteTensor(slen, bs).zero_()
for i in range(bs):
if all_words:
mask[:lengths[i], i] = 1
else:
mask[lengths[i] - 1, i] = 1
if expand is not None:
assert type(expand) is int
mask = mask.unsqueeze(2).expand(slen, bs, expand)
if ignore_first:
mask[0].fill_(0)
if batch_first:
mask = mask.transpose(0, 1)
if cuda:
mask = mask.cuda()
return mask
def reverse_sentences(batch, lengths):
"""
Reverse sentences inside a batch.
"""
bs = lengths.size(0)
assert batch.size(1) == bs
new_batch = batch.clone()
inv_idx = torch.arange(lengths.max() - 1, -1, -1)
for i in range(bs):
new_batch[:lengths[i], i].copy_(new_batch[:, i][inv_idx[-lengths[i]:]])
return new_batch
def restore_segmentation(path):
"""
Take a file segmented with BPE and restore it to its original segmentation.
"""
assert os.path.isfile(path)
restore_cmd = "sed -i -r 's/(@@ )|(@@ ?$)//g' %s"
subprocess.Popen(restore_cmd % path, shell=True).wait()
def create_word_masks(params, data):
"""
Create masks for allowed / forbidden output words.
"""
if not hasattr(params, 'vocab') or len(params.vocab) == 0:
return
params.vocab_mask_pos = []
params.vocab_mask_neg = []
for lang, n_words in zip(params.langs, params.n_words):
dico = data['dico'][lang]
vocab = data['vocab'][lang]
words = [EOS_WORD, UNK_WORD] + list(vocab)
mask_pos = set([dico.index(w) for w in words])
mask_neg = [i for i in range(n_words) if i not in mask_pos]
params.vocab_mask_pos.append(torch.LongTensor(sorted(mask_pos)))
params.vocab_mask_neg.append(torch.LongTensor(sorted(mask_neg)))
| []
| []
| [
"MKL_NUM_THREADS",
"OMP_NUM_THREADS",
"SLURM_JOB_ID",
"CHRONOS_JOB_ID"
]
| [] | ["MKL_NUM_THREADS", "OMP_NUM_THREADS", "SLURM_JOB_ID", "CHRONOS_JOB_ID"] | python | 4 | 0 | |
python_transport/wirepas_gateway/transport_service.py | # Copyright 2019 Wirepas Ltd licensed under Apache License, Version 2.0
#
# See file LICENSE for full license details.
#
import logging
import os
from time import time
from uuid import getnode
from threading import Thread
import wirepas_messaging
from wirepas_gateway.dbus.dbus_client import BusClient
from wirepas_gateway.protocol.topic_helper import TopicGenerator, TopicParser
from wirepas_gateway.protocol.mqtt_wrapper import MQTTWrapper
from wirepas_gateway.utils import ParserHelper
from wirepas_gateway.utils import LoggerHelper
from wirepas_messaging.gateway.api import (
GatewayResultCode,
GatewayState,
GatewayAPIParsingException,
)
from wirepas_gateway import __version__ as transport_version
from wirepas_gateway import __pkg_name__
# This constant is the actual API level implemented by this transport module (cf WP-RM-128)
IMPLEMENTED_API_VERSION = 1
class TransportService(BusClient):
"""
Implementation of gateway to backend protocol
Get all the events from DBUS and publih it with right format
for gateways
"""
# Maximum hop limit to send a packet is limited to 15 by API (4 bits)
MAX_HOP_LIMIT = 15
def __init__(self, settings, logger=None, **kwargs):
self.logger = logger or logging.getLogger(__name__)
self.logger.info("Version is: %s", transport_version)
super(TransportService, self).__init__(
logger=logger,
c_extension=(settings.full_python is False),
ignored_ep_filter=settings.ignored_endpoints_filter,
**kwargs
)
self.gw_id = settings.gateway_id
self.gw_model = settings.gateway_model
self.gw_version = settings.gateway_version
self.whitened_ep_filter = settings.whitened_endpoints_filter
last_will_topic = TopicGenerator.make_status_topic(self.gw_id)
last_will_message = wirepas_messaging.gateway.api.StatusEvent(
self.gw_id, GatewayState.OFFLINE
).payload
self.mqtt_wrapper = MQTTWrapper(
settings,
self.logger,
self._on_mqtt_wrapper_termination_cb,
self._on_connect,
last_will_topic,
last_will_message,
)
self.mqtt_wrapper.start()
self.logger.info("Gateway started with id: %s", self.gw_id)
def _on_mqtt_wrapper_termination_cb(self):
"""
Callback used to be informed when the MQTT wrapper has exited
It is not a normal situation and better to exit the program
to have a change to restart from a clean session
"""
self.logger.error("MQTT wrapper ends. Terminate the program")
self.stop_dbus_client()
def _set_status(self):
event_online = wirepas_messaging.gateway.api.StatusEvent(
self.gw_id, GatewayState.ONLINE
)
topic = TopicGenerator.make_status_topic(self.gw_id)
self.mqtt_wrapper.publish(topic, event_online.payload, qos=1, retain=True)
def _on_connect(self):
# Register for get gateway info
topic = TopicGenerator.make_get_gateway_info_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(topic, self._on_get_gateway_info_cmd_received)
# Register for get configs request
topic = TopicGenerator.make_get_configs_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(topic, self._on_get_configs_cmd_received)
# Register for set config request for any sink
topic = TopicGenerator.make_set_config_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(topic, self._on_set_config_cmd_received)
# Register for send data request for any sink on the gateway
topic = TopicGenerator.make_send_data_request_topic(self.gw_id)
self.logger.debug("Subscribing to: %s", topic)
# It is important to have a qos of 2 and also from the publisher as 1 could generate
# duplicated packets and we don't know the consequences on end
# application
self.mqtt_wrapper.subscribe(topic, self._on_send_data_cmd_received, qos=2)
# Register for otap commands for any sink on the gateway
topic = TopicGenerator.make_otap_status_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(topic, self._on_otap_status_request_received)
topic = TopicGenerator.make_otap_load_scratchpad_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(
topic, self._on_otap_upload_scratchpad_request_received
)
topic = TopicGenerator.make_otap_process_scratchpad_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(
topic, self._on_otap_process_scratchpad_request_received
)
self._set_status()
self.logger.info("MQTT connected!")
def on_data_received(
self,
sink_id,
timestamp,
src,
dst,
src_ep,
dst_ep,
travel_time,
qos,
hop_count,
data,
):
if self.whitened_ep_filter is not None and dst_ep in self.whitened_ep_filter:
# Only publish payload size but not the payload
self.logger.debug("Filtering payload data")
data_size = data.__len__()
data = None
else:
data_size = None
event = wirepas_messaging.gateway.api.ReceivedDataEvent(
gw_id=self.gw_id,
sink_id=sink_id,
rx_time_ms_epoch=timestamp,
src=src,
dst=dst,
src_ep=src_ep,
dst_ep=dst_ep,
travel_time_ms=travel_time,
qos=qos,
data=data,
data_size=data_size,
hop_count=hop_count,
)
sink = self.sink_manager.get_sink(sink_id)
if sink is None:
# It can happen at sink connection as messages can be received
# before sinks are identified
self.logger.info(
"Message received from unknown sink at the moment %s", sink_id
)
return
network_address = sink.get_network_address()
topic = TopicGenerator.make_received_data_topic(
self.gw_id, sink_id, network_address, src_ep, dst_ep
)
self.logger.debug("Sending data to: %s", topic)
# Set qos to 1 to avoid loading too much the broker
# unique id in event header can be used for duplicate filtering in
# backends
self.mqtt_wrapper.publish(topic, event.payload, qos=1)
def on_stack_started(self, name):
sink = self.sink_manager.get_sink(name)
if sink is None:
self.logger.error("Sink started %s error: unknown sink", name)
return
# Generate a setconfig answer with req_id of 0
response = wirepas_messaging.gateway.api.SetConfigResponse(
0, self.gw_id, GatewayResultCode.GW_RES_OK, sink.sink_id, sink.read_config()
)
topic = TopicGenerator.make_set_config_response_topic(self.gw_id, sink.sink_id)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
def _send_asynchronous_get_configs_response(self):
# Create a list of different sink configs
configs = []
for sink in self.sink_manager.get_sinks():
config = sink.read_config()
if config is not None:
configs.append(config)
# Generate a setconfig answer with req_id of 0 as not from
# a real request
response = wirepas_messaging.gateway.api.GetConfigsResponse(
0, self.gw_id, GatewayResultCode.GW_RES_OK, configs
)
topic = TopicGenerator.make_get_configs_response_topic(self.gw_id)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
def deferred_thread(fn):
"""
Decorator to handle a request on its own Thread
to avoid blocking the calling Thread on I/O.
It creates a new Thread but it shouldn't impact the performances
as requests are not supposed to be really frequent (few per seconds)
"""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
def on_sink_connected(self, name):
self.logger.info("Sink connected, sending new configs")
self._send_asynchronous_get_configs_response()
def on_sink_disconnected(self, name):
self.logger.info("Sink disconnected, sending new configs")
self._send_asynchronous_get_configs_response()
@deferred_thread
def _on_send_data_cmd_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("Request to send data")
try:
request = wirepas_messaging.gateway.api.SendDataRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
# Get the sink-id from topic
_, sink_id = TopicParser.parse_send_data_topic(message.topic)
self.logger.debug("Request for sink %s", sink_id)
sink = self.sink_manager.get_sink(sink_id)
if sink is not None:
if request.hop_limit > self.MAX_HOP_LIMIT:
res = GatewayResultCode.GW_RES_INVALID_MAX_HOP_COUNT
else:
res = sink.send_data(
request.destination_address,
request.source_endpoint,
request.destination_endpoint,
request.qos,
request.initial_delay_ms,
request.data_payload,
request.is_unack_csma_ca,
request.hop_limit,
)
else:
self.logger.warning("No sink with id: %s", sink_id)
# No sink with this id
res = GatewayResultCode.GW_RES_INVALID_SINK_ID
# Answer to backend
response = wirepas_messaging.gateway.api.SendDataResponse(
request.req_id, self.gw_id, res, sink_id
)
topic = TopicGenerator.make_send_data_response_topic(self.gw_id, sink_id)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
@deferred_thread
def _on_get_configs_cmd_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("Config request received")
try:
request = wirepas_messaging.gateway.api.GetConfigsRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
# Create a list of different sink configs
configs = []
for sink in self.sink_manager.get_sinks():
config = sink.read_config()
if config is not None:
configs.append(config)
response = wirepas_messaging.gateway.api.GetConfigsResponse(
request.req_id, self.gw_id, GatewayResultCode.GW_RES_OK, configs
)
topic = TopicGenerator.make_get_configs_response_topic(self.gw_id)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
def _on_get_gateway_info_cmd_received(self, client, userdata, message):
# pylint: disable=unused-argument
"""
This function doesn't need the decorator @deferred_thread as request is handled
without I/O
"""
self.logger.info("Gateway info request received")
try:
request = wirepas_messaging.gateway.api.GetGatewayInfoRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
response = wirepas_messaging.gateway.api.GetGatewayInfoResponse(
request.req_id,
self.gw_id,
GatewayResultCode.GW_RES_OK,
current_time_s_epoch=int(time()),
gateway_model=self.gw_model,
gateway_version=self.gw_version,
implemented_api_version=IMPLEMENTED_API_VERSION,
)
topic = TopicGenerator.make_get_gateway_info_response_topic(self.gw_id)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
@deferred_thread
def _on_set_config_cmd_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("Set config request received")
try:
request = wirepas_messaging.gateway.api.SetConfigRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
self.logger.debug("Set sink config: %s", request)
sink = self.sink_manager.get_sink(request.sink_id)
if sink is not None:
res = sink.write_config(request.new_config)
new_config = sink.read_config()
else:
res = GatewayResultCode.GW_RES_INVALID_SINK_ID
new_config = None
response = wirepas_messaging.gateway.api.SetConfigResponse(
request.req_id, self.gw_id, res, request.sink_id, new_config
)
topic = TopicGenerator.make_set_config_response_topic(
self.gw_id, request.sink_id
)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
@deferred_thread
def _on_otap_status_request_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("OTAP status request received")
try:
request = wirepas_messaging.gateway.api.GetScratchpadStatusRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
sink = self.sink_manager.get_sink(request.sink_id)
if sink is not None:
d = sink.get_scratchpad_status()
response = wirepas_messaging.gateway.api.GetScratchpadStatusResponse(
request.req_id,
self.gw_id,
GatewayResultCode.GW_RES_OK,
request.sink_id,
d["stored_scartchpad"],
d["stored_status"],
d["stored_type"],
d["processed_scartchpad"],
d["firmware_area_id"],
)
else:
response = wirepas_messaging.gateway.api.GetScratchpadStatusResponse(
request.req_id,
self.gw_id,
GatewayResultCode.GW_RES_INVALID_SINK_ID,
request.sink_id,
)
topic = TopicGenerator.make_otap_status_response_topic(
self.gw_id, request.sink_id
)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
@deferred_thread
def _on_otap_upload_scratchpad_request_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("OTAP upload request received")
try:
request = wirepas_messaging.gateway.api.UploadScratchpadRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
self.logger.info("OTAP upload request received for %s", request.sink_id)
sink = self.sink_manager.get_sink(request.sink_id)
if sink is not None:
res = sink.upload_scratchpad(request.seq, request.scratchpad)
else:
res = GatewayResultCode.GW_RES_INVALID_SINK_ID
response = wirepas_messaging.gateway.api.UploadScratchpadResponse(
request.req_id, self.gw_id, res, request.sink_id
)
topic = TopicGenerator.make_otap_upload_scratchpad_response_topic(
self.gw_id, request.sink_id
)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
@deferred_thread
def _on_otap_process_scratchpad_request_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("OTAP process request received")
try:
request = wirepas_messaging.gateway.api.ProcessScratchpadRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
sink = self.sink_manager.get_sink(request.sink_id)
if sink is not None:
res = sink.process_scratchpad()
else:
res = GatewayResultCode.GW_RES_INVALID_SINK_ID
response = wirepas_messaging.gateway.api.ProcessScratchpadResponse(
request.req_id, self.gw_id, res, request.sink_id
)
topic = TopicGenerator.make_otap_process_scratchpad_response_topic(
self.gw_id, request.sink_id
)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
def parse_setting_list(list_setting):
""" This function parse ep list specified from setting file or cmd line
Input list has following format [1, 5, 10-15] as a string or list of string
and is expended as a single list [1, 5, 10, 11, 12, 13, 14, 15]
Args:
list_setting(str or list): the list from setting file or cmd line.
Returns: A single list of ep
"""
if isinstance(list_setting, str):
# List is a string from cmd line
list_setting = list_setting.replace("[", "")
list_setting = list_setting.replace("]", "")
list_setting = list_setting.split(",")
single_list = []
for ep in list_setting:
# Check if ep is directly an int
if isinstance(ep, int):
if ep < 0 or ep > 255:
raise SyntaxError("EP out of bound")
single_list.append(ep)
continue
# Check if ep is a single ep as string
try:
ep = int(ep)
if ep < 0 or ep > 255:
raise SyntaxError("EP out of bound")
single_list.append(ep)
continue
except ValueError:
# Probably a range
pass
# Check if ep is a range
try:
ep = ep.replace("'", "")
lower, upper = ep.split("-")
lower = int(lower)
upper = int(upper)
if lower > upper or lower < 0 or upper > 255:
raise SyntaxError("Wrong EP range value")
single_list += list(range(lower, upper + 1))
except (AttributeError, ValueError):
raise SyntaxError("Wrong EP range format")
return single_list
def _check_duplicate(args, old_param, new_param, default, logger):
old_param_val = getattr(args, old_param, default)
new_param_val = getattr(args, new_param, default)
if new_param_val == old_param_val:
# Nothing to update
return
if old_param_val != default:
# Old param is set, check if new_param is also set
if new_param_val == default:
setattr(args, new_param, old_param_val)
logger.warning(
"Param %s is deprecated, please use %s instead", old_param, new_param
)
else:
logger.error(
"Param %s and %s cannot be set at the same time", old_param, new_param
)
exit()
def _update_parameters(settings, logger):
"""
Function to handle the backward compatibility with old parameters name
Args:
settings: Full parameters
Returns: None
"""
_check_duplicate(settings, "host", "mqtt_hostname", None, logger)
_check_duplicate(settings, "port", "mqtt_port", 8883, logger)
_check_duplicate(settings, "username", "mqtt_username", None, logger)
_check_duplicate(settings, "password", "mqtt_password", None, logger)
_check_duplicate(settings, "tlsfile", "mqtt_certfile", None, logger)
_check_duplicate(
settings, "unsecure_authentication", "mqtt_force_unsecure", False, logger
)
_check_duplicate(settings, "gwid", "gateway_id", None, logger)
if settings.gateway_id is None:
settings.gateway_id = str(getnode())
# Parse EP list that should not be published
if settings.ignored_endpoints_filter is not None:
try:
settings.ignored_endpoints_filter = parse_setting_list(
settings.ignored_endpoints_filter
)
logger.debug("Ignored endpoints are: %s", settings.ignored_endpoints_filter)
except SyntaxError as e:
logger.error("Wrong format for ignored_endpoints_filter EP list (%s)", e)
exit()
if settings.whitened_endpoints_filter is not None:
try:
settings.whitened_endpoints_filter = parse_setting_list(
settings.whitened_endpoints_filter
)
logger.debug(
"Whitened endpoints are: {}".format(settings.whitened_endpoints_filter)
)
except SyntaxError as e:
logger.error("Wrong format for whitened_endpoints_filter EP list (%s)", e)
exit()
def _check_parameters(settings, logger):
if settings.mqtt_force_unsecure and settings.mqtt_certfile:
# If tls cert file is provided, unsecure authentication cannot
# be set
logger.error("Cannot give certfile and disable secure authentication")
exit()
try:
if set(settings.ignored_endpoints_filter) & set(
settings.whitened_endpoints_filter
):
logger.error("Some endpoints are both ignored and whitened")
exit()
except TypeError:
# One of the filter list is None
pass
def main():
"""
Main service for transport module
"""
parse = ParserHelper(
description="Wirepas Gateway Transport service arguments",
version=transport_version,
)
parse.add_file_settings()
parse.add_mqtt()
parse.add_gateway_config()
parse.add_filtering_config()
parse.add_deprecated_args()
settings = parse.settings()
# Set default debug level
debug_level = "info"
try:
debug_level = os.environ["DEBUG_LEVEL"]
print(
"Deprecated environment variable DEBUG_LEVEL "
"(it will be dropped from version 2.x onwards)"
" please use WM_DEBUG_LEVEL instead."
)
except KeyError:
pass
try:
debug_level = os.environ["WM_DEBUG_LEVEL"]
except KeyError:
pass
log = LoggerHelper(module_name=__pkg_name__, level=debug_level)
logger = log.setup()
_update_parameters(settings, logger)
# after this stage, mqtt deprecated argument cannot be used
_check_parameters(settings, logger)
TransportService(settings=settings, logger=logger).run()
if __name__ == "__main__":
main()
| []
| []
| [
"WM_DEBUG_LEVEL",
"DEBUG_LEVEL"
]
| [] | ["WM_DEBUG_LEVEL", "DEBUG_LEVEL"] | python | 2 | 0 | |
db.go | package main
import (
"gopkg.in/mgo.v2"
"log"
"os"
)
func DbConnect() *mgo.Session {
session, err := mgo.Dial(os.Getenv("MONGOHQ_URL"))
if err != nil {
panic(err)
}
col := session.DB("goapi").C("tweets")
err = col.Insert(&Tweet{Body: "Hello World"})
if err != nil {
log.Fatal(err)
}
return session
}
| [
"\"MONGOHQ_URL\""
]
| []
| [
"MONGOHQ_URL"
]
| [] | ["MONGOHQ_URL"] | go | 1 | 0 | |
cmd/ucloudcli/main.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"net/http"
"net/url"
"os"
"golang.org/x/net/http/httpproxy"
"yunion.io/x/structarg"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud/ucloud"
_ "yunion.io/x/onecloud/pkg/multicloud/ucloud/shell"
"yunion.io/x/onecloud/pkg/util/shellutils"
)
type BaseOptions struct {
Help bool `help:"Show help" default:"false"`
Debug bool `help:"Show debug" default:"false"`
AccessKey string `help:"Access key" default:"$UCLOUD_ACCESS_KEY" metavar:"UCLOUD_ACCESS_KEY"`
Secret string `help:"Secret" default:"$UCLOUD_SECRET" metavar:"UCLOUD_SECRET"`
RegionId string `help:"RegionId" default:"$UCLOUD_REGION" metavar:"UCLOUD_REGION"`
ProjectId string `help:"ProjectId" default:"$UCLOUD_PROJECT" metavar:"UCLOUD_PROJECT"`
SUBCOMMAND string `help:"ucloudcli subcommand" subcommand:"true"`
}
func getSubcommandParser() (*structarg.ArgumentParser, error) {
parse, e := structarg.NewArgumentParser(&BaseOptions{},
"ucloudcli",
"Command-line interface to ucloud API.",
`See "ucloudcli help COMMAND" for help on a specific command.`)
if e != nil {
return nil, e
}
subcmd := parse.GetSubcommand()
if subcmd == nil {
return nil, fmt.Errorf("No subcommand argument.")
}
type HelpOptions struct {
SUBCOMMAND string `help:"sub-command name"`
}
shellutils.R(&HelpOptions{}, "help", "Show help of a subcommand", func(args *HelpOptions) error {
helpstr, e := subcmd.SubHelpString(args.SUBCOMMAND)
if e != nil {
return e
} else {
fmt.Print(helpstr)
return nil
}
})
for _, v := range shellutils.CommandTable {
_, e := subcmd.AddSubParser(v.Options, v.Command, v.Desc, v.Callback)
if e != nil {
return nil, e
}
}
return parse, nil
}
func showErrorAndExit(e error) {
fmt.Fprintf(os.Stderr, "%s", e)
fmt.Fprintln(os.Stderr)
os.Exit(1)
}
func newClient(options *BaseOptions) (*ucloud.SRegion, error) {
if len(options.AccessKey) == 0 {
return nil, fmt.Errorf("Missing accessKey")
}
if len(options.Secret) == 0 {
return nil, fmt.Errorf("Missing secret")
}
cfg := &httpproxy.Config{
HTTPProxy: os.Getenv("HTTP_PROXY"),
HTTPSProxy: os.Getenv("HTTPS_PROXY"),
NoProxy: os.Getenv("NO_PROXY"),
}
cfgProxyFunc := cfg.ProxyFunc()
proxyFunc := func(req *http.Request) (*url.URL, error) {
return cfgProxyFunc(req.URL)
}
cli, err := ucloud.NewUcloudClient(
ucloud.NewUcloudClientConfig(
options.AccessKey,
options.Secret,
).ProjectId(options.ProjectId).Debug(options.Debug).
CloudproviderConfig(
cloudprovider.ProviderConfig{
ProxyFunc: proxyFunc,
},
),
)
if err != nil {
return nil, err
}
region := cli.GetRegion(options.RegionId)
if region == nil {
return nil, fmt.Errorf("No such region %s", options.RegionId)
}
return region, nil
}
func main() {
parser, e := getSubcommandParser()
if e != nil {
showErrorAndExit(e)
}
e = parser.ParseArgs(os.Args[1:], false)
options := parser.Options().(*BaseOptions)
if options.Help {
fmt.Print(parser.HelpString())
} else {
subcmd := parser.GetSubcommand()
subparser := subcmd.GetSubParser()
if e != nil {
if subparser != nil {
fmt.Print(subparser.Usage())
} else {
fmt.Print(parser.Usage())
}
showErrorAndExit(e)
} else {
suboptions := subparser.Options()
if options.SUBCOMMAND == "help" {
e = subcmd.Invoke(suboptions)
} else {
var region *ucloud.SRegion
region, e = newClient(options)
if e != nil {
showErrorAndExit(e)
}
e = subcmd.Invoke(region, suboptions)
}
if e != nil {
showErrorAndExit(e)
}
}
}
}
| [
"\"HTTP_PROXY\"",
"\"HTTPS_PROXY\"",
"\"NO_PROXY\""
]
| []
| [
"HTTP_PROXY",
"HTTPS_PROXY",
"NO_PROXY"
]
| [] | ["HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"] | go | 3 | 0 | |
out/python/IpfsPinningSDK/api_client.py | """
IPFS Pinning Service API
## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers - For use in client mode by IPFS nodes and GUI-based applications ### Document scope and intended audience The intended audience of this document is **IPFS developers** building pinning service clients or servers compatible with this OpenAPI spec. Your input and feedback are welcome and valuable as we develop this API spec. Please join the design discussion at [github.com/ipfs/pinning-services-api-spec](https://github.com/ipfs/pinning-services-api-spec). **IPFS users** should see the tutorial at [docs.ipfs.io/how-to/work-with-pinning-services](https://docs.ipfs.io/how-to/work-with-pinning-services/) instead. ### Related resources The latest version of this spec and additional resources can be found at: - Specification: https://github.com/ipfs/pinning-services-api-spec/raw/main/ipfs-pinning-service.yaml - Docs: https://ipfs.github.io/pinning-services-api-spec/ - Clients and services: https://github.com/ipfs/pinning-services-api-spec#adoption # Schemas This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. ### requestid Unique identifier of a pin request. When a pin is created, the service responds with unique `requestid` that can be later used for pin removal. When the same `cid` is pinned again, a different `requestid` is returned to differentiate between those pin requests. Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects ### Pin object  The `Pin` object is a representation of a pin request. It includes the `cid` of data to be pinned, as well as optional metadata in `name`, `origins`, and `meta`. ### Pin status response  The `PinStatus` object is a representation of the current state of a pinning operation. It includes the original `pin` object, along with the current `status` and globally unique `requestid` of the entire pinning request, which can be used for future status checks and management. Addresses in the `delegates` array are peers delegated by the pinning service for facilitating direct file transfers (more details in the provider hints section). Any additional vendor-specific information is returned in optional `info`. # The pin lifecycle  ## Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: - `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ## Checking status of in-progress pinning `status` (in `PinStatus`) may indicate a pending state (`queued` or `pinning`). This means the data behind `Pin.cid` was not found on the pinning service and is being fetched from the IPFS network at large, which may take time. In this case, the user can periodically check pinning progress via `GET /pins/{requestid}` until pinning is successful, or the user decides to remove the pending pin. ## Replacing an existing pin object The user can replace an existing pin object via `POST /pins/{requestid}`. This is a shortcut for removing a pin object identified by `requestid` and creating a new one in a single API call that protects against undesired garbage collection of blocks common to both pins. Useful when updating a pin representing a huge dataset where most of blocks did not change. The new pin object `requestid` is returned in the `PinStatus` response. The old pin object is deleted automatically. ## Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. # Provider hints A pinning service will use the DHT and other discovery methods to locate pinned content; however, it is a good practice to provide additional provider hints to speed up the discovery phase and start the transfer immediately, especially if a client has the data in their own datastore or already knows of other providers. The most common scenario is a client putting its own IPFS node's multiaddrs in `Pin.origins`, and then attempt to connect to every multiaddr returned by a pinning service in `PinStatus.delegates` to initiate transfer. At the same time, a pinning service will try to connect to multiaddrs provided by the client in `Pin.origins`. This ensures data transfer starts immediately (without waiting for provider discovery over DHT), and mutual direct dial between a client and a service works around peer routing issues in restrictive network topologies, such as NATs, firewalls, etc. **NOTE:** Connections to multiaddrs in `origins` and `delegates` arrays should be attempted in best-effort fashion, and dial failure should not fail the pinning operation. When unable to act on explicit provider hints, DHT and other discovery methods should be used as a fallback by a pinning service. **NOTE:** All multiaddrs MUST end with `/p2p/{peerID}` and SHOULD be fully resolved and confirmed to be dialable from the public internet. Avoid sending addresses from local networks. # Custom metadata Pinning services are encouraged to add support for additional features by leveraging the optional `Pin.meta` and `PinStatus.info` fields. While these attributes can be application- or vendor-specific, we encourage the community at large to leverage these attributes as a sandbox to come up with conventions that could become part of future revisions of this API. ## Pin metadata String keys and values passed in `Pin.meta` are persisted with the pin object. This is an opt-in feature: It is OK for a client to omit or ignore these optional attributes, and doing so should not impact the basic pinning functionality. Potential uses: - `Pin.meta[app_id]`: Attaching a unique identifier to pins created by an app enables meta-filtering pins per app - `Pin.meta[vendor_policy]`: Vendor-specific policy (for example: which region to use, how many copies to keep) ### Filtering based on metadata The contents of `Pin.meta` can be used as an advanced search filter for situations where searching by `name` and `cid` is not enough. Metadata key matching rule is `AND`: - lookup returns pins that have `meta` with all key-value pairs matching the passed values - pin metadata may have more keys, but only ones passed in the query are used for filtering The wire format for the `meta` when used as a query parameter is a [URL-escaped](https://en.wikipedia.org/wiki/Percent-encoding) stringified JSON object. A lookup example for pins that have a `meta` key-value pair `{\"app_id\":\"UUID\"}` is: - `GET /pins?meta=%7B%22app_id%22%3A%22UUID%22%7D` ## Pin status info Additional `PinStatus.info` can be returned by pinning service. Potential uses: - `PinStatus.info[status_details]`: more info about the current status (queue position, percentage of transferred data, summary of where data is stored, etc); when `PinStatus.status=failed`, it could provide a reason why a pin operation failed (e.g. lack of funds, DAG too big, etc.) - `PinStatus.info[dag_size]`: the size of pinned data, along with DAG overhead - `PinStatus.info[raw_size]`: the size of data without DAG overhead (eg. unixfs) - `PinStatus.info[pinned_until]`: if vendor supports time-bound pins, this could indicate when the pin will expire # Pagination and filtering Pin objects can be listed by executing `GET /pins` with optional parameters: - When no filters are provided, the endpoint will return a small batch of the 10 most recently created items, from the latest to the oldest. - The number of returned items can be adjusted with the `limit` parameter (implicit default is 10). - If the value in `PinResults.count` is bigger than the length of `PinResults.results`, the client can infer there are more results that can be queried. - To read more items, pass the `before` filter with the timestamp from `PinStatus.created` found in the oldest item in the current batch of results. Repeat to read all results. - Returned results can be fine-tuned by applying optional `after`, `cid`, `name`, `status`, or `meta` filters. > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import json
import atexit
import mimetypes
from multiprocessing.pool import ThreadPool
import io
import os
import re
import typing
from urllib.parse import quote
from urllib3.fields import RequestField
from IpfsPinningSDK import rest
from IpfsPinningSDK.configuration import Configuration
from IpfsPinningSDK.exceptions import ApiTypeError, ApiValueError, ApiException
from IpfsPinningSDK.model_utils import (
ModelNormal,
ModelSimple,
ModelComposed,
check_allowed_values,
check_validations,
date,
datetime,
deserialize_file,
file_type,
model_to_dict,
none_type,
validate_and_convert_types
)
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'OpenAPI-Generator/1.0.0/python'
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[typing.Union[int, float, typing.Tuple]] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None
):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
if header_params['Content-Type'].startswith("multipart"):
post_params = self.parameters_to_multipart(post_params,
(dict) )
# body
if body:
body = self.sanitize_for_serialization(body)
# auth setting
self.update_params_for_auth(header_params, query_params,
auth_settings, resource_path, method, body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
try:
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
except ApiException as e:
e.body = e.body.decode('utf-8')
raise e
self.last_response = response_data
return_data = response_data
if not _preload_content:
return (return_data)
return return_data
# deserialize response data
if response_type:
if response_type != (file_type,):
encoding = "utf-8"
content_type = response_data.getheader('content-type')
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
if match:
encoding = match.group(1)
response_data.data = response_data.data.decode(encoding)
return_data = self.deserialize(
response_data,
response_type,
_check_type
)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def parameters_to_multipart(self, params, collection_types):
"""Get parameters as list of tuples, formatting as json if value is collection_types
:param params: Parameters as list of two-tuples
:param dict collection_types: Parameter collection types
:return: Parameters as list of tuple or urllib3.fields.RequestField
"""
new_params = []
if collection_types is None:
collection_types = (dict)
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if isinstance(v, collection_types): # v is instance of collection_type, formatting as application/json
v = json.dumps(v, ensure_ascii=False).encode("utf-8")
field = RequestField(k, v)
field.make_multipart(content_type="application/json; charset=utf-8")
new_params.append(field)
else:
new_params.append((k, v))
return new_params
@classmethod
def sanitize_for_serialization(cls, obj):
"""Prepares data for transmission before it is sent with the rest client
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
If obj is io.IOBase, return the bytes
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if isinstance(obj, (ModelNormal, ModelComposed)):
return {
key: cls.sanitize_for_serialization(val) for key, val in model_to_dict(obj, serialize=True).items()
}
elif isinstance(obj, io.IOBase):
return cls.get_file_data_and_close_file(obj)
elif isinstance(obj, (str, int, float, none_type, bool)):
return obj
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
elif isinstance(obj, ModelSimple):
return cls.sanitize_for_serialization(obj.value)
elif isinstance(obj, (list, tuple)):
return [cls.sanitize_for_serialization(item) for item in obj]
if isinstance(obj, dict):
return {key: cls.sanitize_for_serialization(val) for key, val in obj.items()}
raise ApiValueError('Unable to prepare type {} for serialization'.format(obj.__class__.__name__))
def deserialize(self, response, response_type, _check_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param _check_type: boolean, whether to check the types of the data
received from the server
:type _check_type: bool
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == (file_type,):
content_disposition = response.getheader("Content-Disposition")
return deserialize_file(response.data, self.configuration,
content_disposition=content_disposition)
# fetch data from response object
try:
received_data = json.loads(response.data)
except ValueError:
received_data = response.data
# store our data under the key of 'received_data' so users have some
# context if they are deserializing a string and the data type is wrong
deserialized_data = validate_and_convert_types(
received_data,
response_type,
['received_data'],
True,
_check_type,
configuration=self.configuration
)
return deserialized_data
def call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
async_req: typing.Optional[bool] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[typing.Union[int, float, typing.Tuple]] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None
):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param files: key -> field name, value -> a list of open file
objects for `multipart/form-data`.
:type files: dict
:param async_req bool: execute request asynchronously
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:type collection_formats: dict, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _check_type: boolean describing if the data back from the server
should have its type checked.
:type _check_type: bool, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_check_type)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _check_type))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
@staticmethod
def get_file_data_and_close_file(file_instance: io.IOBase) -> bytes:
file_data = file_instance.read()
file_instance.close()
return file_data
def files_parameters(self, files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None):
"""Builds form parameters.
:param files: None or a dict with key=param_name and
value is a list of open file objects
:return: List of tuples of form parameters with file data
"""
if files is None:
return []
params = []
for param_name, file_instances in files.items():
if file_instances is None:
# if the file field is nullable, skip None values
continue
for file_instance in file_instances:
if file_instance is None:
# if the file field is nullable, skip None values
continue
if file_instance.closed is True:
raise ApiValueError(
"Cannot read a closed file. The passed in file_type "
"for %s must be open." % param_name
)
filename = os.path.basename(file_instance.name)
filedata = self.get_file_data_and_close_file(file_instance)
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([param_name, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings,
resource_path, method, body):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param resource_path: A string representation of the HTTP request resource path.
:param method: A string representation of the HTTP request method.
:param body: A object representing the body of the HTTP request.
The object type is the return value of _encoder.default().
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
if auth_setting['type'] != 'http-signature':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
class Endpoint(object):
def __init__(self, settings=None, params_map=None, root_map=None,
headers_map=None, api_client=None, callable=None):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (tuple/None): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map['all'].extend([
'async_req',
'_host_index',
'_preload_content',
'_request_timeout',
'_return_http_data_only',
'_check_input_type',
'_check_return_type'
])
self.params_map['nullable'].extend(['_request_timeout'])
self.validations = root_map['validations']
self.allowed_values = root_map['allowed_values']
self.openapi_types = root_map['openapi_types']
extra_types = {
'async_req': (bool,),
'_host_index': (none_type, int),
'_preload_content': (bool,),
'_request_timeout': (none_type, float, (float,), [float], int, (int,), [int]),
'_return_http_data_only': (bool,),
'_check_input_type': (bool,),
'_check_return_type': (bool,)
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map['attribute_map']
self.location_map = root_map['location_map']
self.collection_format_map = root_map['collection_format_map']
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map['enum']:
if param in kwargs:
check_allowed_values(
self.allowed_values,
(param,),
kwargs[param]
)
for param in self.params_map['validation']:
if param in kwargs:
check_validations(
self.validations,
(param,),
kwargs[param],
configuration=self.api_client.configuration
)
if kwargs['_check_input_type'] is False:
return
for key, value in kwargs.items():
fixed_val = validate_and_convert_types(
value,
self.openapi_types[key],
[key],
False,
kwargs['_check_input_type'],
configuration=self.api_client.configuration
)
kwargs[key] = fixed_val
def __gather_params(self, kwargs):
params = {
'body': None,
'collection_format': {},
'file': {},
'form': [],
'header': {},
'path': {},
'query': []
}
for param_name, param_value in kwargs.items():
param_location = self.location_map.get(param_name)
if param_location is None:
continue
if param_location:
if param_location == 'body':
params['body'] = param_value
continue
base_name = self.attribute_map[param_name]
if (param_location == 'form' and
self.openapi_types[param_name] == (file_type,)):
params['file'][param_name] = [param_value]
elif (param_location == 'form' and
self.openapi_types[param_name] == ([file_type],)):
# param_value is already a list
params['file'][param_name] = param_value
elif param_location in {'form', 'query'}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {'form', 'query'}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params['collection_format'][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
""" This method is invoked when endpoints are called
Example:
api_instance = PinsApi()
api_instance.pins_get # this is an instance of the class Endpoint
api_instance.pins_get() # this invokes api_instance.pins_get.__call__()
which then invokes the callable functions stored in that endpoint at
api_instance.pins_get.callable or self.callable in this class
"""
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
try:
index = self.api_client.configuration.server_operation_index.get(
self.settings['operation_id'], self.api_client.configuration.server_index
) if kwargs['_host_index'] is None else kwargs['_host_index']
server_variables = self.api_client.configuration.server_operation_variables.get(
self.settings['operation_id'], self.api_client.configuration.server_variables
)
_host = self.api_client.configuration.get_host_from_settings(
index, variables=server_variables, servers=self.settings['servers']
)
except IndexError:
if self.settings['servers']:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s" %
len(self.settings['servers'])
)
_host = None
for key, value in kwargs.items():
if key not in self.params_map['all']:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" %
(key, self.settings['operation_id'])
)
# only throw this nullable ApiValueError if _check_input_type
# is False, if _check_input_type==True we catch this case
# in self.__validate_inputs
if (key not in self.params_map['nullable'] and value is None
and kwargs['_check_input_type'] is False):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" %
(key, self.settings['operation_id'])
)
for key in self.params_map['required']:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings['operation_id'])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map['accept']
if accept_headers_list:
params['header']['Accept'] = self.api_client.select_header_accept(
accept_headers_list)
content_type_headers_list = self.headers_map['content_type']
if content_type_headers_list:
header_list = self.api_client.select_header_content_type(
content_type_headers_list)
params['header']['Content-Type'] = header_list
return self.api_client.call_api(
self.settings['endpoint_path'], self.settings['http_method'],
params['path'],
params['query'],
params['header'],
body=params['body'],
post_params=params['form'],
files=params['file'],
response_type=self.settings['response_type'],
auth_settings=self.settings['auth'],
async_req=kwargs['async_req'],
_check_type=kwargs['_check_return_type'],
_return_http_data_only=kwargs['_return_http_data_only'],
_preload_content=kwargs['_preload_content'],
_request_timeout=kwargs['_request_timeout'],
_host=_host,
collection_formats=params['collection_format'])
| []
| []
| []
| [] | [] | python | null | null | null |
pantheon-bundle/src/main/java/com/redhat/pantheon/sling/PantheonRepositoryInitializer.java | package com.redhat.pantheon.sling;
import org.apache.sling.api.resource.ModifiableValueMap;
import org.apache.sling.api.resource.PersistenceException;
import org.apache.sling.api.resource.ResourceResolver;
import org.apache.sling.jcr.api.SlingRepository;
import org.apache.sling.jcr.api.SlingRepositoryInitializer;
import org.osgi.service.component.annotations.Activate;
import org.osgi.service.component.annotations.Component;
import org.osgi.service.component.annotations.Reference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.jcr.RepositoryException;
/**
* Created by ben on 3/7/19.
*/
@Component(service = SlingRepositoryInitializer.class)
public class PantheonRepositoryInitializer implements SlingRepositoryInitializer {
private static final Logger log = LoggerFactory.getLogger(PantheonRepositoryInitializer.class);
private ServiceResourceResolverProvider serviceResourceResolverProvider;
@Activate
public PantheonRepositoryInitializer(@Reference ServiceResourceResolverProvider serviceResourceResolverProvider) {
this.serviceResourceResolverProvider = serviceResourceResolverProvider;
}
@Override
public void processRepository(SlingRepository slingRepository) throws Exception {
setSyncServiceUrl();
setPortalUrl();
setFrontEndRedirect();
}
private void setSyncServiceUrl() throws RepositoryException, PersistenceException {
try (ResourceResolver resourceResolver = serviceResourceResolverProvider.getServiceResourceResolver()) {
String syncServiceUrl = getSyncServiceUrl();
if (syncServiceUrl != null) {
resourceResolver.getResource("/conf/pantheon")
.adaptTo(ModifiableValueMap.class)
.put("pant:syncServiceUrl", syncServiceUrl);
resourceResolver.commit();
log.info("Synchronization service URL: " + syncServiceUrl);
} else {
log.info("Environment Variable SYNC_SERVICE_URL is not set.");
}
}
}
private void setFrontEndRedirect() throws RepositoryException, PersistenceException {
try (ResourceResolver resourceResolver = serviceResourceResolverProvider.getServiceResourceResolver()) {
resourceResolver.getResource("/content")
.adaptTo(ModifiableValueMap.class)
.put("sling:resourceType", "sling:redirect");
resourceResolver.getResource("/content")
.adaptTo(ModifiableValueMap.class)
.put("sling:target", "/pantheon");
resourceResolver.commit();
log.info("Setting /pantheon redirect on /content");
}
}
private void setPortalUrl() throws RepositoryException, PersistenceException {
try (ResourceResolver resourceResolver = serviceResourceResolverProvider.getServiceResourceResolver()) {
String portalUrl = getPortalUrl();
if (portalUrl != null) {
resourceResolver.getResource("/conf/pantheon")
.adaptTo(ModifiableValueMap.class)
.put("pant:portalUrl", portalUrl);
resourceResolver.commit();
log.info("Portal URL: " + portalUrl);
} else {
log.info("Environment Variable PORTAL_URL is not set.");
}
}
}
/**
* Retrieves the environment variable value for the sync service url
*/
String getSyncServiceUrl() {
return System.getenv("SYNC_SERVICE_URL");
}
/**
* Retrieves the environment variable value for the portal url
*/
String getPortalUrl() {
return System.getenv("PORTAL_URL");
}
}
| [
"\"SYNC_SERVICE_URL\"",
"\"PORTAL_URL\""
]
| []
| [
"PORTAL_URL",
"SYNC_SERVICE_URL"
]
| [] | ["PORTAL_URL", "SYNC_SERVICE_URL"] | java | 2 | 0 | |
vendor/github.com/influxdata/influxdb/services/snapshotter/client.go | package snapshotter
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/tcp"
)
// Client provides an API for the snapshotter service.
type Client struct {
host string
}
// NewClient returns a new *Client.
func NewClient(host string) *Client {
return &Client{host: host}
}
// MetastoreBackup returns a snapshot of the meta store.
func (c *Client) MetastoreBackup() (*meta.Data, error) {
req := &Request{
Type: RequestMetastoreBackup,
}
b, err := c.doRequest(req)
if err != nil {
return nil, err
}
// Check the magic.
magic := binary.BigEndian.Uint64(b[:8])
if magic != BackupMagicHeader {
return nil, errors.New("invalid metadata received")
}
i := 8
// Size of the meta store bytes.
length := int(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
metaBytes := b[i : i+length]
i += int(length)
// Unpack meta data.
var data meta.Data
if err := data.UnmarshalBinary(metaBytes); err != nil {
return nil, fmt.Errorf("unmarshal: %s", err)
}
return &data, nil
}
// doRequest sends a request to the snapshotter service and returns the result.
func (c *Client) doRequest(req *Request) ([]byte, error) {
// Connect to snapshotter service.
conn, err := tcp.Dial("tcp", c.host, MuxHeader)
if err != nil {
return nil, err
}
defer conn.Close()
// Write the request
if err := json.NewEncoder(conn).Encode(req); err != nil {
return nil, fmt.Errorf("encode snapshot request: %s", err)
}
// Read snapshot from the connection
var buf bytes.Buffer
_, err = io.Copy(&buf, conn)
return buf.Bytes(), err
}
| []
| []
| []
| [] | [] | go | null | null | null |
threebody.py | import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
from random import *
import numpy as np
import time
import sys
# initiate pygame and clock
pygame.init()
clock = pygame.time.Clock()
game_font = pygame.font.SysFont('ubuntu', 15)
# dimensions
WIDTH = 1540
HEIGHT = 865
# gravitational constant
g = 4.4
#Color Section
BLACK = (0,0,0)
GREY = (128,128,128) #mercury
YELLOWISH = (165,124,27) #venus
BLUE = (0,0,225) #for earth
RED = (198, 123, 92) #mars
BROWN = (144, 97, 77) #jupiter
CARMEL = (195, 161, 113) #saturn
URANUS_BLUE = (79, 208, 231) #uranus
NEPTUNE = (62, 84, 232) #neptune
WHITE = (255, 255, 255) #for text
YELLOW = (255, 255, 0) #for sun
DARK_GREY = (80,78,81) #orbit
# set up surface plane
surface = pygame.display.set_mode((WIDTH, HEIGHT)) # ((width, height))
pygame.display.set_caption('3 body')
surface.fill(BLACK)
# trails
global trails_active
trails_active = True
# trails button
trails_button = pygame.Rect(0, 0, 100, 50)
trails_button_surface = game_font.render("TRAILS", True, (0, 0, 0))
pygame.draw.rect(surface, WHITE, trails_button)
surface.blit(trails_button_surface, (50, 10))
# exit button
exit_button = pygame.Rect(WIDTH-100, 0, 100, 50)
exit_button_surface = game_font.render("EXIT", True, (0, 0, 0))
pygame.draw.rect(surface, WHITE, exit_button)
surface.blit(exit_button_surface, (WIDTH-90, 10))
# reset button
reset_button = pygame.Rect(WIDTH/2 - 50, 0, 100, 50)
reset_button_surface = game_font.render("RESET", True, (0, 0, 0))
pygame.draw.rect(surface, WHITE, reset_button)
surface.blit(reset_button_surface, (WIDTH/2 - 30, 10))
### body object
class Body(object):
def __init__(self, m, x, y, c):
"""
mass m is passed, random at source. Position x,y is passed,
fixed at source. Initial acceleration is not passed, set to random.
Initial acceleration not passed, set to 0. Colour passed.
Radius passed, fixed at source.
"""
self.mass = m
self.position = np.array([x, y])
self.last_position = np.array([x, y])
self.velocity = np.array([randint(-1,1), randint(-1,1)])
self.accel = np.array([0, 0])
self.color = c
self.radius = m * 1 # density is 1
def applyForce(self, force):
# apply forces to a body
f = force / self.mass
self.accel = np.add(self.accel, f)
def update(self):
# update position based on velocity and reset accel
self.velocity = np.add(self.velocity, self.accel)
self.last_position = self.position
self.position = np.add(self.position, self.velocity)
self.accel = 0
if(self.position[0] > WIDTH) or (self.position[0] < 0) or (self.position[1] > HEIGHT) or (self.position[1] < 0):
self.randomize_position()
print("object left screen")
def display(self):
# draw over old object location
pygame.draw.circle(surface, BLACK, (int(self.last_position[0]), int(self.last_position[1])), self.radius) # (drawLayer, color, (coordinates), radius)
# draw trail (Thickness set to 5, color white)
if trails_active == True:
pygame.draw.line(surface, WHITE, (int(self.last_position[0]), int(self.last_position[1])), (int(self.position[0]), int(self.position[1])), 5)
# draw new object location
pygame.draw.circle(surface, self.color, (int(self.position[0]), int(self.position[1])), self.radius)
def attract(self, m, g):
# gravitational code rewritten from Daniel Shiffman's "Nature of Code"
force = self.position - m.position
distance = np.linalg.norm(force)
distance = constrain(distance, 5.0, 25.0)
force = normalize(force)
strength = (g * self.mass * m.mass) / float(distance * distance)
force = force * strength
return force
def randomize_position(self):
self.position[0] = randrange(1000)
self.position[1] = randrange(600)
self.velocity = np.array([0, 0])
return
############################## set up and draw
def setup():
# 3bodies
body1 = Body(randint(0, 10), 700, 200, BLUE)
body2 = Body(randint(0, 10), 600, 200, RED)
body3 = Body(randint(0, 10), 500, 286, YELLOW)
# list of all bodies
global bodies
bodies = [body1, body2, body3]
return
def draw():
# for each body: apply forces, update position, and draw
for body in bodies:
for other_body in bodies:
if (body != other_body):
global g
force = other_body.attract(body, g)
body.applyForce(force)
body.update()
body.display()
# Re-draw buttons
pygame.draw.rect(surface, WHITE, trails_button)
surface.blit(trails_button_surface, (10, 10))
pygame.draw.rect(surface, WHITE, exit_button)
surface.blit(exit_button_surface, (WIDTH-90, 10))
pygame.draw.rect(surface, WHITE, reset_button)
surface.blit(reset_button_surface, (WIDTH/2 - 30, 10))
return
############################## mathematical functions
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def normalize(force):
normal = np.linalg.norm(force, ord=1)
if normal == 0:
normal = np.finfo(force.dtype).eps
return force / normal
############################## main loop
if __name__ == "__main__":
# initial set up
setup()
while True:
# draw bodies to screen
draw()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = event.pos
# trails button
if trails_button.collidepoint(mouse_pos):
#print("trails button pushed")
if trails_active == True:
trails_active = False
surface.fill(BLACK)
else:
trails_active = True
if exit_button.collidepoint(mouse_pos):
pygame.quit()
sys.exit()
if reset_button.collidepoint(mouse_pos):
for body in bodies:
body.randomize_position()
surface.fill(BLACK)
pygame.display.update()
time.sleep(0.05) | []
| []
| [
"PYGAME_HIDE_SUPPORT_PROMPT"
]
| [] | ["PYGAME_HIDE_SUPPORT_PROMPT"] | python | 1 | 0 | |
cmd/gardenlet/app/gardenlet.go | // Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"bufio"
"context"
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
cmdutils "github.com/gardener/gardener/cmd/utils"
gardencore "github.com/gardener/gardener/pkg/apis/core"
gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
gardencoreinformers "github.com/gardener/gardener/pkg/client/core/informers/externalversions"
"github.com/gardener/gardener/pkg/client/kubernetes"
"github.com/gardener/gardener/pkg/client/kubernetes/clientmap"
clientmapbuilder "github.com/gardener/gardener/pkg/client/kubernetes/clientmap/builder"
"github.com/gardener/gardener/pkg/client/kubernetes/clientmap/keys"
"github.com/gardener/gardener/pkg/features"
"github.com/gardener/gardener/pkg/gardenlet/apis/config"
configv1alpha1 "github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1"
configvalidation "github.com/gardener/gardener/pkg/gardenlet/apis/config/validation"
"github.com/gardener/gardener/pkg/gardenlet/bootstrap"
"github.com/gardener/gardener/pkg/gardenlet/bootstrap/certificate"
"github.com/gardener/gardener/pkg/gardenlet/controller"
seedcontroller "github.com/gardener/gardener/pkg/gardenlet/controller/seed"
gardenletfeatures "github.com/gardener/gardener/pkg/gardenlet/features"
"github.com/gardener/gardener/pkg/healthz"
"github.com/gardener/gardener/pkg/logger"
"github.com/gardener/gardener/pkg/server"
"github.com/gardener/gardener/pkg/utils"
kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
"github.com/gardener/gardener/pkg/utils/secrets"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
certificatesv1 "k8s.io/api/certificates/v1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
eventsv1 "k8s.io/api/events/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
kubernetesclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/record"
"k8s.io/component-base/version"
"k8s.io/component-base/version/verflag"
)
// Options has all the context and parameters needed to run a Gardenlet.
type Options struct {
// ConfigFile is the location of the Gardenlet's configuration file.
ConfigFile string
config *config.GardenletConfiguration
scheme *runtime.Scheme
codecs serializer.CodecFactory
}
// AddFlags adds flags for a specific Gardenlet to the specified FlagSet.
func (o *Options) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "The path to the configuration file.")
}
// NewOptions returns a new Options object.
func NewOptions() (*Options, error) {
o := &Options{
config: new(config.GardenletConfiguration),
}
o.scheme = runtime.NewScheme()
o.codecs = serializer.NewCodecFactory(o.scheme)
if err := config.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := configv1alpha1.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := gardencore.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := gardencorev1beta1.AddToScheme(o.scheme); err != nil {
return nil, err
}
return o, nil
}
// loadConfigFromFile loads the content of file and decodes it as a
// GardenletConfiguration object.
func (o *Options) loadConfigFromFile(file string) (*config.GardenletConfiguration, error) {
data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
return o.decodeConfig(data)
}
// decodeConfig decodes data as a GardenletConfiguration object.
func (o *Options) decodeConfig(data []byte) (*config.GardenletConfiguration, error) {
gardenletConfig := &config.GardenletConfiguration{}
if _, _, err := o.codecs.UniversalDecoder().Decode(data, nil, gardenletConfig); err != nil {
return nil, err
}
return gardenletConfig, nil
}
func (o *Options) configFileSpecified() error {
if len(o.ConfigFile) == 0 {
return fmt.Errorf("missing Gardenlet config file")
}
return nil
}
// Validate validates all the required options.
func (o *Options) validate(args []string) error {
if len(args) != 0 {
return errors.New("arguments are not supported")
}
return nil
}
func run(ctx context.Context, o *Options) error {
if len(o.ConfigFile) > 0 {
c, err := o.loadConfigFromFile(o.ConfigFile)
if err != nil {
return fmt.Errorf("unable to read the configuration file: %v", err)
}
if errs := configvalidation.ValidateGardenletConfiguration(c, nil, false); len(errs) > 0 {
return fmt.Errorf("errors validating the configuration: %+v", errs)
}
o.config = c
}
// Add feature flags
if err := gardenletfeatures.FeatureGate.SetFromMap(o.config.FeatureGates); err != nil {
return err
}
kubernetes.UseCachedRuntimeClients = gardenletfeatures.FeatureGate.Enabled(features.CachedRuntimeClients)
if gardenletfeatures.FeatureGate.Enabled(features.ReversedVPN) &&
(!gardenletfeatures.FeatureGate.Enabled(features.APIServerSNI) ||
gardenletfeatures.FeatureGate.Enabled(features.KonnectivityTunnel)) {
return fmt.Errorf("inconsistent feature gate: APIServerSNI is required for ReversedVPN (APIServerSNI: %t, ReversedVPN: %t) and ReversedVPN is not compatible with KonnectivityTunnel (KonnectivityTunnel: %t)",
gardenletfeatures.FeatureGate.Enabled(features.APIServerSNI), gardenletfeatures.FeatureGate.Enabled(features.ReversedVPN), gardenletfeatures.FeatureGate.Enabled(features.KonnectivityTunnel))
}
gardenlet, err := NewGardenlet(ctx, o.config)
if err != nil {
return err
}
return gardenlet.Run(ctx)
}
// NewCommandStartGardenlet creates a *cobra.Command object with default parameters
func NewCommandStartGardenlet() *cobra.Command {
opts, err := NewOptions()
if err != nil {
panic(err)
}
cmd := &cobra.Command{
Use: "gardenlet",
Short: "Launch the Gardenlet",
Long: `In essence, the Gardener is an extension API server along with a bundle
of Kubernetes controllers which introduce new API objects in an existing Kubernetes
cluster (which is called Garden cluster) in order to use them for the management of
further Kubernetes clusters (which are called Shoot clusters).
To do that reliably and to offer a certain quality of service, it requires to control
the main components of a Kubernetes cluster (etcd, API server, controller manager, scheduler).
These so-called control plane components are hosted in Kubernetes clusters themselves
(which are called Seed clusters).`,
RunE: func(cmd *cobra.Command, args []string) error {
verflag.PrintAndExitIfRequested()
if err := opts.configFileSpecified(); err != nil {
return err
}
if err := opts.validate(args); err != nil {
return err
}
return run(cmd.Context(), opts)
},
}
flags := cmd.Flags()
verflag.AddFlags(flags)
opts.AddFlags(flags)
return cmd
}
// Gardenlet represents all the parameters required to start the
// Gardenlet.
type Gardenlet struct {
Config *config.GardenletConfiguration
Identity *gardencorev1beta1.Gardener
GardenClusterIdentity string
ClientMap clientmap.ClientMap
K8sGardenCoreInformers gardencoreinformers.SharedInformerFactory
Logger *logrus.Logger
Recorder record.EventRecorder
LeaderElection *leaderelection.LeaderElectionConfig
HealthManager healthz.Manager
CertificateManager *certificate.Manager
}
// NewGardenlet is the main entry point of instantiating a new Gardenlet.
func NewGardenlet(ctx context.Context, cfg *config.GardenletConfiguration) (*Gardenlet, error) {
if cfg == nil {
return nil, errors.New("config is required")
}
// Initialize logger
logger := logger.NewLogger(*cfg.LogLevel)
logger.Info("Starting Gardenlet...")
logger.Infof("Feature Gates: %s", gardenletfeatures.FeatureGate.String())
if flag := flag.Lookup("v"); flag != nil {
if err := flag.Value.Set(fmt.Sprintf("%d", cfg.KubernetesLogLevel)); err != nil {
return nil, err
}
}
// Prepare a Kubernetes client object for the Garden cluster which contains all the Clientsets
// that can be used to access the Kubernetes API.
if kubeconfig := os.Getenv("GARDEN_KUBECONFIG"); kubeconfig != "" {
cfg.GardenClientConnection.Kubeconfig = kubeconfig
}
if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" {
cfg.SeedClientConnection.Kubeconfig = kubeconfig
}
var (
kubeconfigFromBootstrap []byte
csrName string
seedName string
err error
)
// constructs a seed client for `SeedClientConnection.kubeconfig` or if not set,
// creates a seed client based on the service account token mounted into the gardenlet container running in Kubernetes
// when running outside of Kubernetes, `SeedClientConnection.kubeconfig` has to be set either directly or via the environment variable "KUBECONFIG"
seedClient, err := kubernetes.NewClientFromFile(
"",
cfg.SeedClientConnection.ClientConnectionConfiguration.Kubeconfig,
kubernetes.WithClientConnectionOptions(cfg.SeedClientConnection.ClientConnectionConfiguration),
kubernetes.WithDisabledCachedClient(),
)
if err != nil {
return nil, err
}
if cfg.GardenClientConnection.KubeconfigSecret != nil {
kubeconfigFromBootstrap, csrName, seedName, err = bootstrapKubeconfig(ctx, logger, seedClient.Client(), cfg)
if err != nil {
return nil, err
}
} else {
logger.Info("No kubeconfig secret given in the configuration under `.gardenClientConnection.kubeconfigSecret`. Skipping the kubeconfig bootstrap process and certificate rotation.")
}
if kubeconfigFromBootstrap == nil {
logger.Info("Falling back to the kubeconfig specified in the configuration under `.gardenClientConnection.kubeconfig`")
if len(cfg.GardenClientConnection.Kubeconfig) == 0 {
return nil, fmt.Errorf("the configuration file needs to either specify a Garden API Server kubeconfig under `.gardenClientConnection.kubeconfig` or provide bootstrapping information. " +
"To configure the Gardenlet for bootstrapping, provide the secret containing the bootstrap kubeconfig under `.gardenClientConnection.kubeconfigSecret` and also the secret name where the created kubeconfig should be stored for further use via`.gardenClientConnection.kubeconfigSecret`")
}
}
restCfg, err := kubernetes.RESTConfigFromClientConnectionConfiguration(&cfg.GardenClientConnection.ClientConnectionConfiguration, kubeconfigFromBootstrap)
if err != nil {
return nil, err
}
gardenClientMapBuilder := clientmapbuilder.NewGardenClientMapBuilder().
WithRESTConfig(restCfg).
// gardenlet does not have the required RBAC permissions for listing/watching the following resources, so let's prevent any
// attempts to cache them
WithUncached(
&gardencorev1alpha1.ExposureClass{},
&gardencorev1alpha1.ShootState{},
&gardencorev1beta1.CloudProfile{},
&gardencorev1beta1.ControllerDeployment{},
&gardencorev1beta1.Project{},
&gardencorev1beta1.SecretBinding{},
&certificatesv1.CertificateSigningRequest{},
&certificatesv1beta1.CertificateSigningRequest{},
&coordinationv1.Lease{},
&corev1.Namespace{},
&corev1.ConfigMap{},
&corev1.Event{},
&eventsv1.Event{},
)
if seedConfig := cfg.SeedConfig; seedConfig != nil {
gardenClientMapBuilder = gardenClientMapBuilder.ForSeed(seedConfig.Name)
}
seedClientMapBuilder := clientmapbuilder.NewSeedClientMapBuilder().
WithInCluster(cfg.SeedSelector == nil).
WithClientConnectionConfig(&cfg.SeedClientConnection.ClientConnectionConfiguration)
shootClientMapBuilder := clientmapbuilder.NewShootClientMapBuilder().
WithClientConnectionConfig(&cfg.ShootClientConnection.ClientConnectionConfiguration)
clientMap, err := clientmapbuilder.NewDelegatingClientMapBuilder().
WithGardenClientMapBuilder(gardenClientMapBuilder).
WithSeedClientMapBuilder(seedClientMapBuilder).
WithShootClientMapBuilder(shootClientMapBuilder).
WithLogger(logger).
Build()
if err != nil {
return nil, fmt.Errorf("failed to build ClientMap: %w", err)
}
k8sGardenClient, err := clientMap.GetClient(ctx, keys.ForGarden())
if err != nil {
return nil, fmt.Errorf("failed to get garden client: %w", err)
}
// Delete bootstrap auth data if certificate was newly acquired
if len(csrName) > 0 && len(seedName) > 0 {
logger.Infof("Deleting bootstrap authentication data used to request a certificate")
if err := bootstrap.DeleteBootstrapAuth(ctx, k8sGardenClient.Client(), k8sGardenClient.Client(), csrName, seedName); err != nil {
return nil, err
}
}
// Set up leader election if enabled and prepare event recorder.
var (
leaderElectionConfig *leaderelection.LeaderElectionConfig
recorder = cmdutils.CreateRecorder(k8sGardenClient.Kubernetes(), "gardenlet")
)
if cfg.LeaderElection.LeaderElect {
seedRestCfg, err := kubernetes.RESTConfigFromClientConnectionConfiguration(&cfg.SeedClientConnection.ClientConnectionConfiguration, nil)
if err != nil {
return nil, err
}
k8sSeedClientLeaderElection, err := kubernetesclientset.NewForConfig(seedRestCfg)
if err != nil {
return nil, fmt.Errorf("failed to create client for leader election: %w", err)
}
leaderElectionConfig, err = cmdutils.MakeLeaderElectionConfig(
cfg.LeaderElection.LeaderElectionConfiguration,
*cfg.LeaderElection.LockObjectNamespace,
*cfg.LeaderElection.LockObjectName,
k8sSeedClientLeaderElection,
cmdutils.CreateRecorder(k8sSeedClientLeaderElection, "gardenlet"),
)
if err != nil {
return nil, err
}
}
identity, err := determineGardenletIdentity()
if err != nil {
return nil, err
}
gardenClusterIdentity := &corev1.ConfigMap{}
if err := k8sGardenClient.Client().Get(ctx, kutil.Key(metav1.NamespaceSystem, v1beta1constants.ClusterIdentity), gardenClusterIdentity); err != nil {
return nil, fmt.Errorf("unable to get Gardener`s cluster-identity ConfigMap: %v", err)
}
clusterIdentity, ok := gardenClusterIdentity.Data[v1beta1constants.ClusterIdentity]
if !ok {
return nil, errors.New("unable to extract Gardener`s cluster identity from cluster-identity ConfigMap")
}
// create the certificate manager to schedule certificate rotations
var certificateManager *certificate.Manager
if cfg.GardenClientConnection.KubeconfigSecret != nil {
certificateManager = certificate.NewCertificateManager(clientMap, seedClient.Client(), cfg)
}
return &Gardenlet{
Identity: identity,
GardenClusterIdentity: clusterIdentity,
Config: cfg,
Logger: logger,
Recorder: recorder,
ClientMap: clientMap,
K8sGardenCoreInformers: gardencoreinformers.NewSharedInformerFactory(k8sGardenClient.GardenCore(), 0),
LeaderElection: leaderElectionConfig,
CertificateManager: certificateManager,
}, nil
}
// Run runs the Gardenlet. This should never exit.
func (g *Gardenlet) Run(ctx context.Context) error {
controllerCtx, controllerCancel := context.WithCancel(ctx)
defer controllerCancel()
// Initialize /healthz manager.
g.HealthManager = healthz.NewPeriodicHealthz(seedcontroller.LeaseResyncGracePeriodSeconds * time.Second)
if g.CertificateManager != nil {
g.CertificateManager.ScheduleCertificateRotation(controllerCtx, controllerCancel, g.Recorder)
}
// Start HTTPS server.
if g.Config.Server.HTTPS.TLS == nil {
g.Logger.Info("No TLS server certificates provided... self-generating them now...")
_, _, tempDir, err := secrets.SelfGenerateTLSServerCertificate("gardenlet", []string{
"gardenlet",
fmt.Sprintf("gardenlet.%s", v1beta1constants.GardenNamespace),
fmt.Sprintf("gardenlet.%s.svc", v1beta1constants.GardenNamespace),
}, nil)
if err != nil {
return err
}
g.Config.Server.HTTPS.TLS = &config.TLSServer{
ServerCertPath: filepath.Join(tempDir, secrets.DataKeyCertificate),
ServerKeyPath: filepath.Join(tempDir, secrets.DataKeyPrivateKey),
}
g.Logger.Info("TLS server certificates successfully self-generated.")
}
go server.
NewBuilder().
WithBindAddress(g.Config.Server.HTTPS.BindAddress).
WithPort(g.Config.Server.HTTPS.Port).
WithTLS(g.Config.Server.HTTPS.TLS.ServerCertPath, g.Config.Server.HTTPS.TLS.ServerKeyPath).
WithHandler("/metrics", promhttp.Handler()).
WithHandlerFunc("/healthz", healthz.HandlerFunc(g.HealthManager)).
Build().
Start(ctx)
// Prepare a reusable run function.
run := func(ctx context.Context) error {
g.HealthManager.Start()
return g.startControllers(ctx)
}
leaderElectionCtx, leaderElectionCancel := context.WithCancel(context.Background())
// If leader election is enabled, run via LeaderElector until done and exit.
if g.LeaderElection != nil {
g.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
g.Logger.Info("Acquired leadership, starting controllers.")
if err := run(controllerCtx); err != nil {
g.Logger.Errorf("failed to run gardenlet controllers: %v", err)
}
leaderElectionCancel()
},
OnStoppedLeading: func() {
g.Logger.Info("Lost leadership, terminating.")
controllerCancel()
},
}
leaderElector, err := leaderelection.NewLeaderElector(*g.LeaderElection)
if err != nil {
return fmt.Errorf("couldn't create leader elector: %v", err)
}
leaderElector.Run(leaderElectionCtx)
return nil
}
// Leader election is disabled, thus run directly until done.
leaderElectionCancel()
err := run(controllerCtx)
if err != nil {
g.Logger.Errorf("failed to run gardenlet controllers: %v", err)
}
return err
}
func (g *Gardenlet) startControllers(ctx context.Context) error {
return controller.NewGardenletControllerFactory(
g.ClientMap,
g.K8sGardenCoreInformers,
g.Config,
g.Identity,
g.GardenClusterIdentity,
g.Recorder,
g.HealthManager,
).Run(ctx)
}
// We want to determine the Docker container id of the currently running Gardenlet because
// we need to identify for still ongoing operations whether another Gardenlet instance is
// still operating the respective Shoots. When running locally, we generate a random string because
// there is no container id.
func determineGardenletIdentity() (*gardencorev1beta1.Gardener, error) {
var (
validID = regexp.MustCompile(`([0-9a-f]{64})`)
gardenletID string
gardenletName string
err error
)
gardenletName, err = os.Hostname()
if err != nil {
return nil, fmt.Errorf("unable to get hostname: %v", err)
}
// If running inside a Kubernetes cluster (as container) we can read the container id from the proc file system.
// Otherwise generate a random string for the gardenletID
if cGroupFile, err := os.Open("/proc/self/cgroup"); err == nil {
defer cGroupFile.Close()
reader := bufio.NewReader(cGroupFile)
var cgroupV1 string
for {
line, err := reader.ReadString('\n')
if err != nil {
break
}
// Store cgroup-v1 result for fall back
if strings.HasPrefix(line, "1:name=systemd") {
cgroupV1 = line
}
// Always prefer cgroup-v2
if strings.HasPrefix(line, "0::") {
if id := extractID(line); validID.MatchString(id) {
gardenletID = id
break
}
}
}
// Fall-back to cgroup-v1 if possible
if len(gardenletID) == 0 && len(cgroupV1) > 0 {
gardenletID = extractID(cgroupV1)
}
}
if gardenletID == "" {
gardenletID, err = utils.GenerateRandomString(64)
if err != nil {
return nil, fmt.Errorf("unable to generate gardenletID: %v", err)
}
}
return &gardencorev1beta1.Gardener{
ID: gardenletID,
Name: gardenletName,
Version: version.Get().GitVersion,
}, nil
}
func extractID(line string) string {
var (
id string
splitBySlash = strings.Split(line, "/")
)
if len(splitBySlash) == 0 {
return ""
}
id = strings.TrimSpace(splitBySlash[len(splitBySlash)-1])
id = strings.TrimSuffix(id, ".scope")
id = strings.TrimPrefix(id, "docker-")
return id
}
| [
"\"GARDEN_KUBECONFIG\"",
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG",
"GARDEN_KUBECONFIG"
]
| [] | ["KUBECONFIG", "GARDEN_KUBECONFIG"] | go | 2 | 0 | |
cmd/openshift-tests/openshift-tests.go | package main
import (
"flag"
goflag "flag"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
utilflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
"k8s.io/kubectl/pkg/util/templates"
"github.com/openshift/library-go/pkg/serviceability"
"github.com/openshift/origin/pkg/monitor"
"github.com/openshift/origin/pkg/monitor/resourcewatch/cmd"
testginkgo "github.com/openshift/origin/pkg/test/ginkgo"
"github.com/openshift/origin/pkg/version"
exutil "github.com/openshift/origin/test/extended/util"
)
func main() {
logs.InitLogs()
defer logs.FlushLogs()
rand.Seed(time.Now().UTC().UnixNano())
pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
root := &cobra.Command{
Long: templates.LongDesc(`
OpenShift Tests
This command verifies behavior of an OpenShift cluster by running remote tests against
the cluster API that exercise functionality. In general these tests may be disruptive
or require elevated privileges - see the descriptions of each test suite.
`),
}
root.AddCommand(
newRunCommand(),
newRunUpgradeCommand(),
newRunTestCommand(),
newRunMonitorCommand(),
cmd.NewRunResourceWatchCommand(),
)
pflag.CommandLine = pflag.NewFlagSet("empty", pflag.ExitOnError)
flag.CommandLine = flag.NewFlagSet("empty", flag.ExitOnError)
exutil.InitStandardFlags()
if err := func() error {
defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop()
return root.Execute()
}(); err != nil {
if ex, ok := err.(testginkgo.ExitError); ok {
os.Exit(ex.Code)
}
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}
func newRunMonitorCommand() *cobra.Command {
monitorOpt := &monitor.Options{
Out: os.Stdout,
ErrOut: os.Stderr,
}
cmd := &cobra.Command{
Use: "run-monitor",
Short: "Continuously verify the cluster is functional",
Long: templates.LongDesc(`
Run a continuous verification process
`),
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
return monitorOpt.Run()
},
}
return cmd
}
func newRunCommand() *cobra.Command {
opt := &testginkgo.Options{
Suites: staticSuites,
}
cmd := &cobra.Command{
Use: "run SUITE",
Short: "Run a test suite",
Long: templates.LongDesc(`
Run a test suite against an OpenShift server
This command will run one of the following suites against a cluster identified by the current
KUBECONFIG file. See the suite description for more on what actions the suite will take.
If you specify the --dry-run argument, the names of each individual test that is part of the
suite will be printed, one per line. You may filter this list and pass it back to the run
command with the --file argument. You may also pipe a list of test names, one per line, on
standard input by passing "-f -".
`) + testginkgo.SuitesString(opt.Suites, "\n\nAvailable test suites:\n\n"),
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
return mirrorToFile(opt, func() error {
if !opt.DryRun {
fmt.Fprintf(os.Stderr, "%s version: %s\n", filepath.Base(os.Args[0]), version.Get().String())
}
config, err := decodeProvider(opt.Provider, opt.DryRun, true)
if err != nil {
return err
}
opt.Provider = config.ToJSONString()
matchFn, err := initializeTestFramework(exutil.TestContext, config, opt.DryRun)
if err != nil {
return err
}
opt.MatchFn = matchFn
err = opt.Run(args)
if !opt.DryRun && len(args) > 0 && strings.HasPrefix(args[0], "openshift/csi") {
printStorageCapabilities(opt.Out)
}
return err
})
},
}
bindOptions(opt, cmd.Flags())
return cmd
}
func newRunUpgradeCommand() *cobra.Command {
opt := &testginkgo.Options{Suites: upgradeSuites}
upgradeOpt := &UpgradeOptions{}
cmd := &cobra.Command{
Use: "run-upgrade SUITE",
Short: "Run an upgrade suite",
Long: templates.LongDesc(`
Run an upgrade test suite against an OpenShift server
This command will run one of the following suites against a cluster identified by the current
KUBECONFIG file. See the suite description for more on what actions the suite will take.
If you specify the --dry-run argument, the actions the suite will take will be printed to the
output.
Supported options:
* abort-at=NUMBER - Set to a number between 0 and 100 to control the percent of operators
at which to stop the current upgrade and roll back to the current version.
* disrupt-reboot=POLICY - During upgrades, periodically reboot master nodes. If set to 'graceful'
the reboot will allow the node to shut down services in an orderly fashion. If set to 'force' the
machine will terminate immediately without clean shutdown.
`) + testginkgo.SuitesString(opt.Suites, "\n\nAvailable upgrade suites:\n\n"),
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
return mirrorToFile(opt, func() error {
if len(upgradeOpt.ToImage) == 0 {
return fmt.Errorf("--to-image must be specified to run an upgrade test")
}
if len(args) > 0 {
for _, suite := range opt.Suites {
if suite.Name == args[0] {
upgradeOpt.Suite = suite.Name
upgradeOpt.JUnitDir = opt.JUnitDir
value := upgradeOpt.ToEnv()
if _, err := initUpgrade(value); err != nil {
return err
}
opt.SuiteOptions = value
break
}
}
}
config, err := decodeProvider(opt.Provider, opt.DryRun, true)
if err != nil {
return err
}
opt.Provider = config.ToJSONString()
matchFn, err := initializeTestFramework(exutil.TestContext, config, opt.DryRun)
if err != nil {
return err
}
opt.MatchFn = matchFn
return opt.Run(args)
})
},
}
bindOptions(opt, cmd.Flags())
bindUpgradeOptions(upgradeOpt, cmd.Flags())
return cmd
}
func newRunTestCommand() *cobra.Command {
testOpt := &testginkgo.TestOptions{
Out: os.Stdout,
ErrOut: os.Stderr,
}
cmd := &cobra.Command{
Use: "run-test NAME",
Short: "Run a single test by name",
Long: templates.LongDesc(`
Execute a single test
This executes a single test by name. It is used by the run command during suite execution but may also
be used to test in isolation while developing new tests.
`),
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
upgradeOpts, err := initUpgrade(os.Getenv("TEST_SUITE_OPTIONS"))
if err != nil {
return err
}
config, err := decodeProvider(os.Getenv("TEST_PROVIDER"), testOpt.DryRun, false)
if err != nil {
return err
}
if _, err := initializeTestFramework(exutil.TestContext, config, testOpt.DryRun); err != nil {
return err
}
exutil.TestContext.ReportDir = upgradeOpts.JUnitDir
exutil.WithCleanup(func() { err = testOpt.Run(args) })
return err
},
}
cmd.Flags().BoolVar(&testOpt.DryRun, "dry-run", testOpt.DryRun, "Print the test to run without executing them.")
return cmd
}
// mirrorToFile ensures a copy of all output goes to the provided OutFile, including
// any error returned from fn. The function returns fn() or any error encountered while
// attempting to open the file.
func mirrorToFile(opt *testginkgo.Options, fn func() error) error {
if opt.Out == nil {
opt.Out = os.Stdout
}
if opt.ErrOut == nil {
opt.ErrOut = os.Stderr
}
if len(opt.OutFile) == 0 {
return fn()
}
f, err := os.OpenFile(opt.OutFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640)
if err != nil {
return err
}
opt.Out = io.MultiWriter(opt.Out, f)
opt.ErrOut = io.MultiWriter(opt.ErrOut, f)
exitErr := fn()
if exitErr != nil {
fmt.Fprintf(f, "error: %s", exitErr)
}
if err := f.Close(); err != nil {
fmt.Fprintf(opt.ErrOut, "error: Unable to close output file\n")
}
return exitErr
}
func bindOptions(opt *testginkgo.Options, flags *pflag.FlagSet) {
flags.BoolVar(&opt.DryRun, "dry-run", opt.DryRun, "Print the tests to run without executing them.")
flags.BoolVar(&opt.PrintCommands, "print-commands", opt.PrintCommands, "Print the sub-commands that would be executed instead.")
flags.StringVar(&opt.JUnitDir, "junit-dir", opt.JUnitDir, "The directory to write test reports to.")
flags.StringVar(&opt.Provider, "provider", opt.Provider, "The cluster infrastructure provider. Will automatically default to the correct value.")
flags.StringVarP(&opt.TestFile, "file", "f", opt.TestFile, "Create a suite from the newline-delimited test names in this file.")
flags.StringVar(&opt.Regex, "run", opt.Regex, "Regular expression of tests to run.")
flags.StringVarP(&opt.OutFile, "output-file", "o", opt.OutFile, "Write all test output to this file.")
flags.IntVar(&opt.Count, "count", opt.Count, "Run each test a specified number of times. Defaults to 1 or the suite's preferred value.")
flags.DurationVar(&opt.Timeout, "timeout", opt.Timeout, "Set the maximum time a test can run before being aborted. This is read from the suite by default, but will be 10 minutes otherwise.")
flags.BoolVar(&opt.IncludeSuccessOutput, "include-success", opt.IncludeSuccessOutput, "Print output from successful tests.")
flags.IntVar(&opt.Parallelism, "max-parallel-tests", opt.Parallelism, "Maximum number of tests running in parallel. 0 defaults to test suite recommended value, which is different in each suite.")
}
| [
"\"OPENSHIFT_PROFILE\"",
"\"TEST_SUITE_OPTIONS\"",
"\"TEST_PROVIDER\""
]
| []
| [
"OPENSHIFT_PROFILE",
"TEST_SUITE_OPTIONS",
"TEST_PROVIDER"
]
| [] | ["OPENSHIFT_PROFILE", "TEST_SUITE_OPTIONS", "TEST_PROVIDER"] | go | 3 | 0 | |
main.go | package main
import (
"flag"
"fmt"
"os"
"github.com/razzkumar/PR-Automation/aws-client"
"github.com/razzkumar/PR-Automation/cloudfront"
"github.com/razzkumar/PR-Automation/logger"
"github.com/razzkumar/PR-Automation/s3"
"github.com/razzkumar/PR-Automation/utils"
)
func main() {
// Setting env variable
awsRegion := os.Getenv("AWS_REGION")
if awsRegion == "" {
err := os.Setenv("AWS_REGION", "us-east-2")
if err != nil {
logger.FailOnError(err, "Fail to set AWS_REGION")
}
}
var action string
// Getting action wether delete or create
var repo utils.ProjectInfo
flag.StringVar(&action, "action", "", "It's create or delete s3 bucket")
flag.Parse()
if action == "" {
logger.FailOnNoFlag("Please provide action what to do [deploy,delete,create]")
}
if os.Getenv("GITHUB_EVENT_NAME") == "pull_request" && (action == "create" || action == "delete") {
repo = utils.GetPRInfo(repo)
} else {
repo = utils.GetInfo(repo, action)
}
// Getting session of aws
sess := awsclient.GetSession()
switch action {
case "deploy":
err := s3.Deploy(repo, sess)
invErr := cloudfront.Invalidation(repo.CloudfrontId, sess)
logger.FailOnError(invErr, "Fail to invalitate")
logger.FailOnError(err, "Error on Deployment")
case "create":
err := s3.DeployAndComment(repo, sess)
logger.FailOnError(err, "Error on Deployment and commit")
case "delete":
err := s3.Delete(repo.Bucket, sess)
logger.FailOnError(err, "Error while Delete")
default:
err := fmt.Errorf("Nothing to do")
logger.FailOnError(err, "Default case")
}
}
| [
"\"AWS_REGION\"",
"\"GITHUB_EVENT_NAME\""
]
| []
| [
"GITHUB_EVENT_NAME",
"AWS_REGION"
]
| [] | ["GITHUB_EVENT_NAME", "AWS_REGION"] | go | 2 | 0 | |
tests/test_speech.py | import os, json, array, pytest, mock
import auroraapi
from tests.mocks import *
from auroraapi.globals import _config
from auroraapi.api import APIException
from auroraapi.audio import *
from auroraapi.text import Text
from auroraapi.speech import *
class TestSpeech(object):
def setup(self):
try:
_config.app_id = os.environ["APP_ID"]
_config.app_token = os.environ["APP_TOKEN"]
_config.device_id = os.environ["DEVICE_ID"]
except:
pass
def teardown(self):
_config.app_id = None
_config.app_token = None
_config.device_id = None
def test_create_no_argument(self):
with pytest.raises(TypeError):
Speech()
def test_create_none(self):
with pytest.raises(TypeError):
Speech(None)
def test_create_wrong_type(self):
with pytest.raises(TypeError):
Speech("string")
def test_create(self):
with open("tests/assets/hw.wav", "rb") as f:
Speech(AudioFile(f.read()))
def test_text(self):
with open("tests/assets/hw.wav", "rb") as f:
s = Speech(AudioFile(f.read()))
t = s.text()
assert isinstance(t, Text)
assert t.text.lower().strip() == "hello world"
class TestSpeechNoCreds(object):
def test_text(self):
with pytest.raises(APIException):
with open("tests/assets/hw.wav", "rb") as f:
Speech(AudioFile(f.read())).text()
class TestListen(object):
def setup(self):
with open("tests/assets/hw.wav", "rb") as f:
self.audio_file = AudioFile(f.read())
try:
_config.app_id = os.environ["APP_ID"]
_config.app_token = os.environ["APP_TOKEN"]
_config.device_id = os.environ["DEVICE_ID"]
except:
pass
def teardown(self):
_config.app_id = None
_config.app_token = None
_config.device_id = None
def test_listen(self):
with mock.patch('auroraapi.audio._pyaudio_record', new=mock_pyaudio_record):
s = listen()
assert isinstance(s, Speech)
assert isinstance(s.audio, AudioFile)
assert len(self.audio_file.audio) == len(s.audio.audio)
def test_continuously_listen(self):
with mock.patch('auroraapi.audio._pyaudio_record', new=mock_pyaudio_record):
for s in continuously_listen():
assert isinstance(s, Speech)
assert isinstance(s.audio, AudioFile)
assert len(self.audio_file.audio) == len(s.audio.audio)
break
def test_listen_and_transcribe(self):
with mock.patch('auroraapi.audio._pyaudio_record', new=mock_pyaudio_record):
t = listen_and_transcribe()
assert isinstance(t, Text)
assert t.text.lower().strip() == "hello world"
def test_continuously_listen_and_transcribe(self):
with mock.patch('auroraapi.audio._pyaudio_record', new=mock_pyaudio_record):
for t in continuously_listen_and_transcribe():
assert isinstance(t, Text)
assert t.text.lower().strip() == "hello world"
break
| []
| []
| [
"DEVICE_ID",
"APP_ID",
"APP_TOKEN"
]
| [] | ["DEVICE_ID", "APP_ID", "APP_TOKEN"] | python | 3 | 0 | |
nipype/pipeline/plugins/slurmgraph.py | # -*- coding: utf-8 -*-
"""Parallel workflow execution via SLURM
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import open
import os
import sys
from ...interfaces.base import CommandLine
from .base import (GraphPluginBase, logger)
def node_completed_status(checknode):
"""
A function to determine if a node has previously completed it's work
:param checknode: The node to check the run status
:return: boolean value True indicates that the node does not need to be run.
"""
""" TODO: place this in the base.py file and refactor """
node_state_does_not_require_overwrite = (checknode.overwrite is False or
(checknode.overwrite is None and not
checknode._interface.always_run)
)
hash_exists = False
try:
hash_exists, _, _, _ = checknode.hash_exists()
except Exception:
hash_exists = False
return (hash_exists and node_state_does_not_require_overwrite)
class SLURMGraphPlugin(GraphPluginBase):
"""Execute using SLURM
The plugin_args input to run can be used to control the SGE execution.
Currently supported options are:
- template : template to use for batch job submission
- qsub_args : arguments to be prepended to the job execution script in the
qsub call
"""
_template = "#!/bin/bash"
def __init__(self, **kwargs):
if 'plugin_args' in kwargs and kwargs['plugin_args']:
if 'retry_timeout' in kwargs['plugin_args']:
self._retry_timeout = kwargs['plugin_args']['retry_timeout']
if 'max_tries' in kwargs['plugin_args']:
self._max_tries = kwargs['plugin_args']['max_tries']
if 'template' in kwargs['plugin_args']:
self._template = kwargs['plugin_args']['template']
if os.path.isfile(self._template):
self._template = open(self._template).read()
if 'sbatch_args' in kwargs['plugin_args']:
self._sbatch_args = kwargs['plugin_args']['sbatch_args']
if 'dont_resubmit_completed_jobs' in kwargs['plugin_args']:
self._dont_resubmit_completed_jobs = kwargs['plugin_args']['dont_resubmit_completed_jobs']
else:
self._dont_resubmit_completed_jobs = False
super(SLURMGraphPlugin, self).__init__(**kwargs)
def _submit_graph(self, pyfiles, dependencies, nodes):
def make_job_name(jobnumber, nodeslist):
"""
- jobnumber: The index number of the job to create
- nodeslist: The name of the node being processed
- return: A string representing this job to be displayed by SLURM
"""
job_name = 'j{0}_{1}'.format(jobnumber, nodeslist[jobnumber]._id)
# Condition job_name to be a valid bash identifier (i.e. - is invalid)
job_name = job_name.replace('-', '_').replace('.', '_').replace(':', '_')
return job_name
batch_dir, _ = os.path.split(pyfiles[0])
submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh')
cache_doneness_per_node = dict()
if self._dont_resubmit_completed_jobs: # A future parameter for controlling this behavior could be added here
for idx, pyscript in enumerate(pyfiles):
node = nodes[idx]
node_status_done = node_completed_status(node)
# if the node itself claims done, then check to ensure all
# dependancies are also done
if node_status_done and idx in dependencies:
for child_idx in dependencies[idx]:
if child_idx in cache_doneness_per_node:
child_status_done = cache_doneness_per_node[child_idx]
else:
child_status_done = node_completed_status(nodes[child_idx])
node_status_done = node_status_done and child_status_done
cache_doneness_per_node[idx] = node_status_done
with open(submitjobsfile, 'wt') as fp:
fp.writelines('#!/usr/bin/env bash\n')
fp.writelines('# Condense format attempted\n')
for idx, pyscript in enumerate(pyfiles):
node = nodes[idx]
if cache_doneness_per_node.get(idx, False):
continue
else:
template, sbatch_args = self._get_args(
node, ["template", "sbatch_args"])
batch_dir, name = os.path.split(pyscript)
name = '.'.join(name.split('.')[:-1])
batchscript = '\n'.join((template,
'%s %s' % (sys.executable, pyscript)))
batchscriptfile = os.path.join(batch_dir,
'batchscript_%s.sh' % name)
batchscriptoutfile = batchscriptfile + '.o'
batchscripterrfile = batchscriptfile + '.e'
with open(batchscriptfile, 'wt') as batchfp:
batchfp.writelines(batchscript)
batchfp.close()
deps = ''
if idx in dependencies:
values = ''
for jobid in dependencies[idx]:
# Avoid dependancies of done jobs
if not self._dont_resubmit_completed_jobs or cache_doneness_per_node[jobid] == False:
values += "${{{0}}}:".format(make_job_name(jobid, nodes))
if values != '': # i.e. if some jobs were added to dependency list
values = values.rstrip(':')
deps = '--dependency=afterok:%s' % values
jobname = make_job_name(idx, nodes)
# Do not use default output locations if they are set in self._sbatch_args
stderrFile = ''
if self._sbatch_args.count('-e ') == 0:
stderrFile = '-e {errFile}'.format(
errFile=batchscripterrfile)
stdoutFile = ''
if self._sbatch_args.count('-o ') == 0:
stdoutFile = '-o {outFile}'.format(
outFile=batchscriptoutfile)
full_line = '{jobNm}=$(sbatch {outFileOption} {errFileOption} {extraSBatchArgs} {dependantIndex} -J {jobNm} {batchscript} | awk \'/^Submitted/ {{print $4}}\')\n'.format(
jobNm=jobname,
outFileOption=stdoutFile,
errFileOption=stderrFile,
extraSBatchArgs=sbatch_args,
dependantIndex=deps,
batchscript=batchscriptfile)
fp.writelines(full_line)
cmd = CommandLine('bash', environ=dict(os.environ),
terminal_output='allatonce')
cmd.inputs.args = '%s' % submitjobsfile
cmd.run()
logger.info('submitted all jobs to queue')
| []
| []
| []
| [] | [] | python | 0 | 0 | |
config/config.go | package config
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"bitbucket.org/enroute-mobi/ara/logger"
yaml "gopkg.in/yaml.v2"
)
type DatabaseConfig struct {
Name string
User string
Password string
Host string
Port uint
}
type config struct {
DB DatabaseConfig
ApiKey string
Debug bool
LogStash string
BigQueryProjectID string
BigQueryDatasetPrefix string
BigQueryTest string
Sentry string
Syslog bool
ColorizeLog bool
LoadMaxInsert int
FakeUUIDRealFormat bool
}
var Config = config{}
func LoadConfig(path string) error {
// Default values
Config.LoadMaxInsert = 100000
env := Environment()
logger.Log.Debugf("Loading %s configuration", env)
configPath, err := getConfigDirectory(path)
if err != nil {
return err
}
// general config
files := []string{"config.yml", fmt.Sprintf("%s.yml", env)}
for _, file := range files {
data, err := getConfigFileContent(configPath, file)
if err != nil {
return err
}
yaml.Unmarshal(data, &Config)
}
// database config
LoadDatabaseConfig(configPath)
// Test env variables
bigQueryTestEnv := os.Getenv("ARA_BIGQUERY_TEST")
if bigQueryTestEnv != "" {
Config.BigQueryTest = bigQueryTestEnv
}
bigQueryPrefixEnv := os.Getenv("ARA_BIGQUERY_PREFIX")
if bigQueryPrefixEnv != "" {
Config.BigQueryDatasetPrefix = bigQueryPrefixEnv
}
fakeUUIDReal := os.Getenv("ARA_FAKEUUID_REAL")
if strings.ToLower(fakeUUIDReal) == "true" {
Config.FakeUUIDRealFormat = true
}
logger.Log.Syslog = Config.Syslog
logger.Log.Debug = Config.Debug
logger.Log.Color = Config.ColorizeLog
return nil
}
func (c *config) ValidBQConfig() bool {
return c.BigQueryTestMode() || (c.BigQueryProjectID != "" && c.BigQueryDatasetPrefix != "")
}
func (c *config) BigQueryTestMode() bool {
return c.BigQueryTest != ""
}
var environment string
func SetEnvironment(env string) {
environment = env
}
func Environment() string {
if environment == "" {
env := os.Getenv("ARA_ENV")
if env == "" {
logger.Log.Debugf("ARA_ENV not set, default environment is development")
env = "development"
}
environment = env
}
return environment
}
func LoadDatabaseConfig(configPath string) error {
data, err := getConfigFileContent(configPath, "database.yml")
if err != nil {
return err
}
rawYaml := make(map[interface{}]interface{})
err = yaml.Unmarshal(data, &rawYaml)
if err != nil {
return err
}
databaseYaml := rawYaml[Environment()].(map[interface{}]interface{})
Config.DB.Name = databaseYaml["name"].(string)
Config.DB.User = databaseYaml["user"].(string)
Config.DB.Password = databaseYaml["password"].(string)
Config.DB.Port = uint(databaseYaml["port"].(int))
if databaseYaml["host"] != nil {
Config.DB.Host = databaseYaml["host"].(string)
}
return nil
}
func getConfigDirectory(path string) (string, error) {
paths := [3]string{
path,
os.Getenv("ARA_CONFIG"),
fmt.Sprintf("%s/src/bitbucket.org/enroute-mobi/ara/config", os.Getenv("GOPATH")),
}
for _, directoryPath := range paths {
if found := checkDirectory("config", directoryPath); found {
return directoryPath, nil
}
}
return "", errors.New("can't find config directory")
}
func GetTemplateDirectory() (string, error) {
paths := [2]string{
os.Getenv("ARA_ROOT"),
fmt.Sprintf("%s/src/bitbucket.org/enroute-mobi/ara", os.Getenv("GOPATH")),
}
for _, directoryPath := range paths {
templatePath := filepath.Join(directoryPath, "/siri/templates")
if found := checkDirectory("template", templatePath); found {
return templatePath, nil
}
}
return "", errors.New("can't find template directory")
}
func checkDirectory(kind, path string) bool {
if path == "" {
return false
}
if _, err := os.Stat(path); err == nil {
logger.Log.Debugf("Found %v directory at %s", kind, path)
return true
}
logger.Log.Debugf("Can't find %v directory at %s", kind, path)
return false
}
func getConfigFileContent(path, file string) ([]byte, error) {
// Check file at location
filePath := strings.Join([]string{path, file}, "/")
if _, err := os.Stat(filePath); err == nil {
data, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, err
}
return data, nil
}
logger.Log.Debugf("Can't find %s config file in %s", file, path)
return nil, fmt.Errorf("can't find %s configuration file", file)
}
| [
"\"ARA_BIGQUERY_TEST\"",
"\"ARA_BIGQUERY_PREFIX\"",
"\"ARA_FAKEUUID_REAL\"",
"\"ARA_ENV\"",
"\"ARA_CONFIG\"",
"\"GOPATH\"",
"\"ARA_ROOT\"",
"\"GOPATH\""
]
| []
| [
"ARA_BIGQUERY_PREFIX",
"ARA_FAKEUUID_REAL",
"ARA_ROOT",
"ARA_CONFIG",
"ARA_ENV",
"ARA_BIGQUERY_TEST",
"GOPATH"
]
| [] | ["ARA_BIGQUERY_PREFIX", "ARA_FAKEUUID_REAL", "ARA_ROOT", "ARA_CONFIG", "ARA_ENV", "ARA_BIGQUERY_TEST", "GOPATH"] | go | 7 | 0 | |
models/config/client_common.go | // Copyright 2016 fatedier, [email protected]
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"os"
"strconv"
"strings"
ini "github.com/vaughan0/go-ini"
)
var ClientCommonCfg *ClientCommonConf
// client common config
type ClientCommonConf struct {
ConfigFile string
ServerAddr string
ServerPort int64
HttpProxy string
LogFile string
LogWay string
LogLevel string
LogMaxDays int64
PrivilegeToken string
PoolCount int
TcpMux bool
User string
LoginFailExit bool
Start map[string]struct{}
HeartBeatInterval int64
HeartBeatTimeout int64
// added by liudf
UseEncryption bool
UseCompressed bool
}
func GetDeaultClientCommonConf() *ClientCommonConf {
return &ClientCommonConf{
ConfigFile: "./frpc.ini",
ServerAddr: "0.0.0.0",
ServerPort: 7000,
HttpProxy: "",
LogFile: "console",
LogWay: "console",
LogLevel: "info",
LogMaxDays: 3,
PrivilegeToken: "",
PoolCount: 1,
TcpMux: true,
User: "",
LoginFailExit: true,
Start: make(map[string]struct{}),
HeartBeatInterval: 30,
HeartBeatTimeout: 90,
UseEncryption: false,
UseCompressed: false,
}
}
func LoadClientCommonConf(conf ini.File) (cfg *ClientCommonConf, err error) {
var (
tmpStr string
ok bool
v int64
)
cfg = GetDeaultClientCommonConf()
tmpStr, ok = conf.Get("common", "server_addr")
if ok {
cfg.ServerAddr = tmpStr
}
tmpStr, ok = conf.Get("common", "server_port")
if ok {
cfg.ServerPort, _ = strconv.ParseInt(tmpStr, 10, 64)
}
tmpStr, ok = conf.Get("common", "http_proxy")
if ok {
cfg.HttpProxy = tmpStr
} else {
// get http_proxy from env
cfg.HttpProxy = os.Getenv("http_proxy")
}
tmpStr, ok = conf.Get("common", "log_file")
if ok {
cfg.LogFile = tmpStr
if cfg.LogFile == "console" {
cfg.LogWay = "console"
} else {
cfg.LogWay = "file"
}
}
tmpStr, ok = conf.Get("common", "log_level")
if ok {
cfg.LogLevel = tmpStr
}
tmpStr, ok = conf.Get("common", "log_max_days")
if ok {
cfg.LogMaxDays, _ = strconv.ParseInt(tmpStr, 10, 64)
}
tmpStr, ok = conf.Get("common", "privilege_token")
if ok {
cfg.PrivilegeToken = tmpStr
}
tmpStr, ok = conf.Get("common", "pool_count")
if ok {
v, err = strconv.ParseInt(tmpStr, 10, 64)
if err != nil {
cfg.PoolCount = 1
} else {
cfg.PoolCount = int(v)
}
}
tmpStr, ok = conf.Get("common", "tcp_mux")
if ok && tmpStr == "false" {
cfg.TcpMux = false
} else {
cfg.TcpMux = true
}
tmpStr, ok = conf.Get("common", "user")
if ok {
cfg.User = tmpStr
}
tmpStr, ok = conf.Get("common", "start")
if ok {
proxyNames := strings.Split(tmpStr, ",")
for _, name := range proxyNames {
cfg.Start[name] = struct{}{}
}
}
tmpStr, ok = conf.Get("common", "login_fail_exit")
if ok && tmpStr == "false" {
cfg.LoginFailExit = false
} else {
cfg.LoginFailExit = true
}
tmpStr, ok = conf.Get("common", "heartbeat_timeout")
if ok {
v, err = strconv.ParseInt(tmpStr, 10, 64)
if err != nil {
err = fmt.Errorf("Parse conf error: heartbeat_timeout is incorrect")
return
} else {
cfg.HeartBeatTimeout = v
}
}
tmpStr, ok = conf.Get("common", "heartbeat_interval")
if ok {
v, err = strconv.ParseInt(tmpStr, 10, 64)
if err != nil {
err = fmt.Errorf("Parse conf error: heartbeat_interval is incorrect")
return
} else {
cfg.HeartBeatInterval = v
}
}
if cfg.HeartBeatInterval <= 0 {
err = fmt.Errorf("Parse conf error: heartbeat_interval is incorrect")
return
}
if cfg.HeartBeatTimeout < cfg.HeartBeatInterval {
err = fmt.Errorf("Parse conf error: heartbeat_timeout is incorrect, heartbeat_timeout is less than heartbeat_interval")
return
}
return
}
| [
"\"http_proxy\""
]
| []
| [
"http_proxy"
]
| [] | ["http_proxy"] | go | 1 | 0 | |
title_classification/title_cnn.py | import os
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
from keras.layers import *
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras import backend as K
#os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.set_session(session)
LR_BASE = 0.01
EPOCHS = 100
data_directory = os.path.join(os.path.split(os.getcwd())[0], 'data')
train = pd.read_csv(os.path.join(data_directory, 'train_split.csv'))
valid = pd.read_csv(os.path.join(data_directory, 'valid_split.csv'))
train = train.loc[(train['Category'] >= 0) & (train['Category'] <= 16)]
valid = valid.loc[(train['Category'] >= 0) & (valid['Category'] <= 16)]
train_x, train_y = train['title'].values, train['Category'].values
valid_x, valid_y = valid['title'].values, valid['Category'].values
y_train = keras.utils.np_utils.to_categorical(train_y)
y_valid = keras.utils.np_utils.to_categorical(valid_y)
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(train_x)
X_train = tokenizer.texts_to_sequences(train_x)
X_valid = tokenizer.texts_to_sequences(valid_x)
vocab_size = len(tokenizer.word_index) + 1
maxlen = 10
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_valid = pad_sequences(X_valid, padding='post', maxlen=maxlen)
k_regularizer = keras.regularizers.l2(0.0015)
def ConvolutionalBlock(input_shape, num_filters):
model = keras.models.Sequential()
#1st conv layer
model.add(Conv1D(filters=num_filters, kernel_size=3, kernel_regularizer=k_regularizer,
kernel_initializer='he_normal',
strides=1, padding="same", input_shape=input_shape))
model.add(BatchNormalization())
model.add(Activation("relu"))
#2nd conv layer
model.add(Conv1D(filters=num_filters, kernel_regularizer=k_regularizer,
kernel_initializer='he_normal',
kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization())
model.add(Activation("relu"))
return model
def conv_shape(conv):
return conv.get_shape().as_list()[1:]
def vdcnn_model(num_filters, num_classes, sequence_max_length, vocab_size, embedding_dim, learning_rate=LR_BASE):
inputs = Input(shape=(sequence_max_length, ), name='input')
embedded_seq = Embedding(vocab_size, embedding_dim, embeddings_initializer='he_normal', embeddings_regularizer=k_regularizer,
input_length=sequence_max_length)(inputs)
embedded_seq = BatchNormalization()(embedded_seq)
#1st Layer
conv = Conv1D(filters=64, kernel_size=3, strides=2, kernel_regularizer=k_regularizer,
kernel_initializer='he_normal',
padding="same")(embedded_seq)
#ConvBlocks
for i in range(len(num_filters)):
conv = ConvolutionalBlock(conv_shape(conv), num_filters[i])(conv)
conv = MaxPooling1D(pool_size=3, strides=2, padding="same")(conv)
"""def _top_k(x):
x = tf.transpose(x, [0, 2, 1])
k_max = tf.nn.top_k(x, k=top_k)
return tf.reshape(k_max[0], (-1, num_filters[-1] * top_k))
k_max = Lambda(_top_k, output_shape=(num_filters[-1] * top_k,))(conv)"""
#fully connected layers
# in original paper they didn't used dropouts
flat = Flatten()(conv)
flat = Dropout(0.3)(flat)
fc1 = Dense(512, activation='relu', kernel_initializer='he_normal',kernel_regularizer=k_regularizer)(flat)
fc1 = Dropout(0.3)(fc1)
fc2 = Dense(512, activation='relu', kernel_initializer='he_normal',
kernel_regularizer=k_regularizer)(fc1)
fc2 = Dropout(0.3)(fc2)
out = Dense(num_classes, activation='softmax',
kernel_regularizer=k_regularizer)(fc2)
#optimizer
sgd = keras.optimizers.SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True)
model = keras.models.Model(inputs=inputs, outputs=out)
model.compile(optimizer=sgd,
loss='categorical_crossentropy', metrics=['accuracy'])
return model
num_filters = [64, 128, 256, 512]
model = vdcnn_model(num_filters=num_filters, num_classes=17, vocab_size=vocab_size,
sequence_max_length=maxlen, embedding_dim=50)
model.summary()
"""
def create_embedding_matrix(filepath, word_index, embedding_dim):
# Adding again 1 because of reserved 0 index
vocab_size = len(word_index) + 1
embedding_matrix = np.zeros((vocab_size, embedding_dim))
with open(filepath) as f:
for line in f:
word, *vector = line.split()
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)[:embedding_dim]
return embedding_matrix
tokenizer = Tokenizer(num_words=10000)
tokenizer.fit_on_texts(train_x)
X_train = tokenizer.texts_to_sequences(train_x)
X_valid = tokenizer.texts_to_sequences(valid_x)
vocab_size = len(tokenizer.word_index) + 1
maxlen = 10
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_valid = pad_sequences(X_valid, padding='post', maxlen=maxlen)
embedding_dim = 100
embedding_matrix = create_embedding_matrix(
'data/glove_word_embeddings/glove.6B.50d.txt',
tokenizer.word_index, embedding_dim)
model = keras.models.Sequential()
model.add(layers.Embedding(vocab_size, embedding_dim,
weights=[embedding_matrix],
input_length=maxlen,
trainable=True))
model.add(layers.GlobalMaxPool1D())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model = keras.models.Sequential()
model.add(layers.Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
input_length=maxlen))
model.add(layers.Conv1D(64, 5, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(500, activation='relu'))
model.add(layers.Dense(58, activation='softmax'))
decay = LR_BASE/(EPOCHS)
sgd = keras.optimizers.SGD(lr=LR_BASE, decay=decay,
momentum=0.9, nesterov=True)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()"""
print(X_train.shape)
print(y_train.shape)
history = model.fit(X_train, y_train,
epochs=1000,
verbose=True,
validation_data=(X_valid, y_valid),
batch_size=4096)
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_valid, y_valid, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES",
"KMP_DUPLICATE_LIB_OK"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES", "KMP_DUPLICATE_LIB_OK"] | python | 3 | 0 | |
pkg/file/home_windows.go | // Copyright 2018 Adam Shannon
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package file
import (
"os"
)
func homeDir() string {
return os.Getenv("USERPROFILE")
}
| [
"\"USERPROFILE\""
]
| []
| [
"USERPROFILE"
]
| [] | ["USERPROFILE"] | go | 1 | 0 | |
pkg/registry/digest/digest_test.go | package digest_test
import (
"fmt"
"github.com/containrrr/watchtower/internal/actions/mocks"
"github.com/containrrr/watchtower/pkg/registry/digest"
wtTypes "github.com/containrrr/watchtower/pkg/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/ghttp"
"net/http"
"os"
"testing"
"time"
)
func TestDigest(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(GinkgoT(), "Digest Suite")
}
var (
DockerHubCredentials = &wtTypes.RegistryCredentials{
Username: os.Getenv("CI_INTEGRATION_TEST_REGISTRY_DH_USERNAME"),
Password: os.Getenv("CI_INTEGRATION_TEST_REGISTRY_DH_PASSWORD"),
}
GHCRCredentials = &wtTypes.RegistryCredentials{
Username: os.Getenv("CI_INTEGRATION_TEST_REGISTRY_GH_USERNAME"),
Password: os.Getenv("CI_INTEGRATION_TEST_REGISTRY_GH_PASSWORD"),
}
)
func SkipIfCredentialsEmpty(credentials *wtTypes.RegistryCredentials, fn func()) func() {
if credentials.Username == "" {
return func() {
Skip("Username missing. Skipping integration test")
}
} else if credentials.Password == "" {
return func() {
Skip("Password missing. Skipping integration test")
}
} else {
return fn
}
}
var _ = Describe("Digests", func() {
mockId := "mock-id"
mockName := "mock-container"
mockImage := "ghcr.io/k6io/operator:latest"
mockCreated := time.Now()
mockDigest := "ghcr.io/k6io/operator@sha256:d68e1e532088964195ad3a0a71526bc2f11a78de0def85629beb75e2265f0547"
mockContainer := mocks.CreateMockContainerWithDigest(
mockId,
mockName,
mockImage,
mockCreated,
mockDigest)
When("a digest comparison is done", func() {
It("should return true if digests match",
SkipIfCredentialsEmpty(GHCRCredentials, func() {
creds := fmt.Sprintf("%s:%s", GHCRCredentials.Username, GHCRCredentials.Password)
matches, err := digest.CompareDigest(mockContainer, creds)
Expect(err).NotTo(HaveOccurred())
Expect(matches).To(Equal(true))
}),
)
It("should return false if digests differ", func() {
})
It("should return an error if the registry isn't available", func() {
})
})
When("using different registries", func() {
It("should work with DockerHub",
SkipIfCredentialsEmpty(DockerHubCredentials, func() {
fmt.Println(DockerHubCredentials != nil) // to avoid crying linters
}),
)
It("should work with GitHub Container Registry",
SkipIfCredentialsEmpty(GHCRCredentials, func() {
fmt.Println(GHCRCredentials != nil) // to avoid crying linters
}),
)
})
When("sending a HEAD request", func() {
var server *ghttp.Server
BeforeEach(func() {
server = ghttp.NewServer()
})
AfterEach(func() {
server.Close()
})
It("should use a custom user-agent", func() {
server.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyHeader(http.Header{
"User-Agent": []string{"Watchtower/v0.0.0-unknown"},
}),
ghttp.RespondWith(http.StatusOK, "", http.Header{
digest.ContentDigestHeader: []string{
mockDigest,
},
}),
),
)
dig, err := digest.GetDigest(server.URL(), "token")
println(dig)
Expect(server.ReceivedRequests()).Should(HaveLen(1))
Expect(err).NotTo(HaveOccurred())
Expect(dig).To(Equal(mockDigest))
})
})
})
| [
"\"CI_INTEGRATION_TEST_REGISTRY_DH_USERNAME\"",
"\"CI_INTEGRATION_TEST_REGISTRY_DH_PASSWORD\"",
"\"CI_INTEGRATION_TEST_REGISTRY_GH_USERNAME\"",
"\"CI_INTEGRATION_TEST_REGISTRY_GH_PASSWORD\""
]
| []
| [
"CI_INTEGRATION_TEST_REGISTRY_DH_PASSWORD",
"CI_INTEGRATION_TEST_REGISTRY_GH_USERNAME",
"CI_INTEGRATION_TEST_REGISTRY_GH_PASSWORD",
"CI_INTEGRATION_TEST_REGISTRY_DH_USERNAME"
]
| [] | ["CI_INTEGRATION_TEST_REGISTRY_DH_PASSWORD", "CI_INTEGRATION_TEST_REGISTRY_GH_USERNAME", "CI_INTEGRATION_TEST_REGISTRY_GH_PASSWORD", "CI_INTEGRATION_TEST_REGISTRY_DH_USERNAME"] | go | 4 | 0 | |
crossfit/crossfit/asgi.py | """
ASGI config for crossfit project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crossfit.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go | // +build acceptance compute servers
package v2
import (
"os"
"testing"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/acceptance/tools"
"github.com/rackspace/gophercloud/openstack"
"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
"github.com/rackspace/gophercloud/pagination"
th "github.com/rackspace/gophercloud/testhelper"
)
func TestListServers(t *testing.T) {
client, err := newClient()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
t.Logf("ID\tRegion\tName\tStatus\tIPv4\tIPv6")
pager := servers.List(client, servers.ListOpts{})
count, pages := 0, 0
pager.EachPage(func(page pagination.Page) (bool, error) {
pages++
t.Logf("---")
servers, err := servers.ExtractServers(page)
if err != nil {
return false, err
}
for _, s := range servers {
t.Logf("%s\t%s\t%s\t%s\t%s\t\n", s.ID, s.Name, s.Status, s.AccessIPv4, s.AccessIPv6)
count++
}
return true, nil
})
t.Logf("--------\n%d servers listed on %d pages.\n", count, pages)
}
func networkingClient() (*gophercloud.ServiceClient, error) {
opts, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
provider, err := openstack.AuthenticatedClient(opts)
if err != nil {
return nil, err
}
return openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{
Name: "neutron",
Region: os.Getenv("OS_REGION_NAME"),
})
}
func createServer(t *testing.T, client *gophercloud.ServiceClient, choices *ComputeChoices) (*servers.Server, error) {
if testing.Short() {
t.Skip("Skipping test that requires server creation in short mode.")
}
var network networks.Network
networkingClient, err := networkingClient()
if err != nil {
t.Fatalf("Unable to create a networking client: %v", err)
}
pager := networks.List(networkingClient, networks.ListOpts{Name: "public", Limit: 1})
pager.EachPage(func(page pagination.Page) (bool, error) {
networks, err := networks.ExtractNetworks(page)
if err != nil {
t.Errorf("Failed to extract networks: %v", err)
return false, err
}
if len(networks) == 0 {
t.Fatalf("No networks to attach to server")
return false, err
}
network = networks[0]
return false, nil
})
name := tools.RandomString("ACPTTEST", 16)
t.Logf("Attempting to create server: %s\n", name)
pwd := tools.MakeNewPassword("")
server, err := servers.Create(client, servers.CreateOpts{
Name: name,
FlavorRef: choices.FlavorID,
ImageRef: choices.ImageID,
Networks: []servers.Network{
servers.Network{UUID: network.ID},
},
AdminPass: pwd,
}).Extract()
if err != nil {
t.Fatalf("Unable to create server: %v", err)
}
th.AssertEquals(t, pwd, server.AdminPass)
return server, err
}
func TestCreateDestroyServer(t *testing.T) {
choices, err := ComputeChoicesFromEnv()
if err != nil {
t.Fatal(err)
}
client, err := newClient()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
server, err := createServer(t, client, choices)
if err != nil {
t.Fatalf("Unable to create server: %v", err)
}
defer func() {
servers.Delete(client, server.ID)
t.Logf("Server deleted.")
}()
if err = waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatalf("Unable to wait for server: %v", err)
}
}
func TestUpdateServer(t *testing.T) {
client, err := newClient()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
choices, err := ComputeChoicesFromEnv()
if err != nil {
t.Fatal(err)
}
server, err := createServer(t, client, choices)
if err != nil {
t.Fatal(err)
}
defer servers.Delete(client, server.ID)
if err = waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatal(err)
}
alternateName := tools.RandomString("ACPTTEST", 16)
for alternateName == server.Name {
alternateName = tools.RandomString("ACPTTEST", 16)
}
t.Logf("Attempting to rename the server to %s.", alternateName)
updated, err := servers.Update(client, server.ID, servers.UpdateOpts{Name: alternateName}).Extract()
if err != nil {
t.Fatalf("Unable to rename server: %v", err)
}
if updated.ID != server.ID {
t.Errorf("Updated server ID [%s] didn't match original server ID [%s]!", updated.ID, server.ID)
}
err = tools.WaitFor(func() (bool, error) {
latest, err := servers.Get(client, updated.ID).Extract()
if err != nil {
return false, err
}
return latest.Name == alternateName, nil
})
}
func TestActionChangeAdminPassword(t *testing.T) {
t.Parallel()
client, err := newClient()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
choices, err := ComputeChoicesFromEnv()
if err != nil {
t.Fatal(err)
}
server, err := createServer(t, client, choices)
if err != nil {
t.Fatal(err)
}
defer servers.Delete(client, server.ID)
if err = waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatal(err)
}
randomPassword := tools.MakeNewPassword(server.AdminPass)
res := servers.ChangeAdminPassword(client, server.ID, randomPassword)
if res.Err != nil {
t.Fatal(err)
}
if err = waitForStatus(client, server, "PASSWORD"); err != nil {
t.Fatal(err)
}
if err = waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatal(err)
}
}
func TestActionReboot(t *testing.T) {
t.Parallel()
client, err := newClient()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
choices, err := ComputeChoicesFromEnv()
if err != nil {
t.Fatal(err)
}
server, err := createServer(t, client, choices)
if err != nil {
t.Fatal(err)
}
defer servers.Delete(client, server.ID)
if err = waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatal(err)
}
res := servers.Reboot(client, server.ID, "aldhjflaskhjf")
if res.Err == nil {
t.Fatal("Expected the SDK to provide an ArgumentError here")
}
t.Logf("Attempting reboot of server %s", server.ID)
res = servers.Reboot(client, server.ID, servers.OSReboot)
if res.Err != nil {
t.Fatalf("Unable to reboot server: %v", err)
}
if err = waitForStatus(client, server, "REBOOT"); err != nil {
t.Fatal(err)
}
if err = waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatal(err)
}
}
func TestActionRebuild(t *testing.T) {
t.Parallel()
client, err := newClient()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
choices, err := ComputeChoicesFromEnv()
if err != nil {
t.Fatal(err)
}
server, err := createServer(t, client, choices)
if err != nil {
t.Fatal(err)
}
defer servers.Delete(client, server.ID)
if err = waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatal(err)
}
t.Logf("Attempting to rebuild server %s", server.ID)
rebuildOpts := servers.RebuildOpts{
Name: tools.RandomString("ACPTTEST", 16),
AdminPass: tools.MakeNewPassword(server.AdminPass),
ImageID: choices.ImageID,
}
rebuilt, err := servers.Rebuild(client, server.ID, rebuildOpts).Extract()
if err != nil {
t.Fatal(err)
}
if rebuilt.ID != server.ID {
t.Errorf("Expected rebuilt server ID of [%s]; got [%s]", server.ID, rebuilt.ID)
}
if err = waitForStatus(client, rebuilt, "REBUILD"); err != nil {
t.Fatal(err)
}
if err = waitForStatus(client, rebuilt, "ACTIVE"); err != nil {
t.Fatal(err)
}
}
func resizeServer(t *testing.T, client *gophercloud.ServiceClient, server *servers.Server, choices *ComputeChoices) {
if err := waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatal(err)
}
t.Logf("Attempting to resize server [%s]", server.ID)
opts := &servers.ResizeOpts{
FlavorRef: choices.FlavorIDResize,
}
if res := servers.Resize(client, server.ID, opts); res.Err != nil {
t.Fatal(res.Err)
}
if err := waitForStatus(client, server, "VERIFY_RESIZE"); err != nil {
t.Fatal(err)
}
}
func TestActionResizeConfirm(t *testing.T) {
t.Parallel()
choices, err := ComputeChoicesFromEnv()
if err != nil {
t.Fatal(err)
}
client, err := newClient()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
server, err := createServer(t, client, choices)
if err != nil {
t.Fatal(err)
}
defer servers.Delete(client, server.ID)
resizeServer(t, client, server, choices)
t.Logf("Attempting to confirm resize for server %s", server.ID)
if res := servers.ConfirmResize(client, server.ID); res.Err != nil {
t.Fatal(err)
}
if err = waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatal(err)
}
}
func TestActionResizeRevert(t *testing.T) {
t.Parallel()
choices, err := ComputeChoicesFromEnv()
if err != nil {
t.Fatal(err)
}
client, err := newClient()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
server, err := createServer(t, client, choices)
if err != nil {
t.Fatal(err)
}
defer servers.Delete(client, server.ID)
resizeServer(t, client, server, choices)
t.Logf("Attempting to revert resize for server %s", server.ID)
if res := servers.RevertResize(client, server.ID); res.Err != nil {
t.Fatal(err)
}
if err = waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatal(err)
}
}
func TestServerMetadata(t *testing.T) {
t.Parallel()
choices, err := ComputeChoicesFromEnv()
th.AssertNoErr(t, err)
client, err := newClient()
if err != nil {
t.Fatalf("Unable to create a compute client: %v", err)
}
server, err := createServer(t, client, choices)
if err != nil {
t.Fatal(err)
}
defer servers.Delete(client, server.ID)
if err = waitForStatus(client, server, "ACTIVE"); err != nil {
t.Fatal(err)
}
metadata, err := servers.UpdateMetadata(client, server.ID, servers.MetadataOpts{
"foo": "bar",
"this": "that",
}).Extract()
th.AssertNoErr(t, err)
t.Logf("UpdateMetadata result: %+v\n", metadata)
err = servers.DeleteMetadatum(client, server.ID, "foo").ExtractErr()
th.AssertNoErr(t, err)
metadata, err = servers.CreateMetadatum(client, server.ID, servers.MetadatumOpts{
"foo": "baz",
}).Extract()
th.AssertNoErr(t, err)
t.Logf("CreateMetadatum result: %+v\n", metadata)
metadata, err = servers.Metadatum(client, server.ID, "foo").Extract()
th.AssertNoErr(t, err)
t.Logf("Metadatum result: %+v\n", metadata)
th.AssertEquals(t, "baz", metadata["foo"])
metadata, err = servers.Metadata(client, server.ID).Extract()
th.AssertNoErr(t, err)
t.Logf("Metadata result: %+v\n", metadata)
metadata, err = servers.ResetMetadata(client, server.ID, servers.MetadataOpts{}).Extract()
th.AssertNoErr(t, err)
t.Logf("ResetMetadata result: %+v\n", metadata)
th.AssertDeepEquals(t, map[string]string{}, metadata)
}
| [
"\"OS_REGION_NAME\""
]
| []
| [
"OS_REGION_NAME"
]
| [] | ["OS_REGION_NAME"] | go | 1 | 0 | |
utils/comsumer_group.go | package utils
import (
"context"
"crypto/tls"
"log"
"os"
"strconv"
"strings"
"time"
"github.com/Shopify/sarama"
)
func GetConsumer(groupId string, topic string, handler sarama.ConsumerGroupHandler) {
brokerList := strings.Split(os.Getenv("KAFKA_BROKERS"), ",")
config := sarama.NewConfig()
config.Version = sarama.V2_3_0_0
config.Consumer.Return.Errors, _ = strconv.ParseBool(os.Getenv("CONSUMER_RETRY_RETURN_SUCCESSES"))
kafkaSecurity, err := strconv.ParseBool(os.Getenv("KAFKA_SECURITY_ENABLED"))
if err != nil {
log.Fatal(err)
}
if kafkaSecurity == true {
config.ClientID = "push-service"
config.Net.KeepAlive = 1 * time.Hour
config.Net.TLS.Enable = true
config.Net.SASL.Enable = true
config.Net.SASL.Handshake = true
config.Net.SASL.User = os.Getenv("KAFKA_USERNAME")
config.Net.SASL.Password = os.Getenv("KAFKA_PASSWORD")
config.Net.TLS.Config = &tls.Config{InsecureSkipVerify: true}
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256)
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} }
}
client, err := sarama.NewConsumerGroup(brokerList, groupId, config)
if err != nil {
log.Fatal(err)
}
defer func() { _ = client.Close() }()
// Track errors
go TrackGroupErrors(client)
ctx := context.Background()
for {
err = client.Consume(ctx, []string{topic}, handler)
if err != nil {
panic(err)
}
}
}
| [
"\"KAFKA_BROKERS\"",
"\"CONSUMER_RETRY_RETURN_SUCCESSES\"",
"\"KAFKA_SECURITY_ENABLED\"",
"\"KAFKA_USERNAME\"",
"\"KAFKA_PASSWORD\""
]
| []
| [
"CONSUMER_RETRY_RETURN_SUCCESSES",
"KAFKA_PASSWORD",
"KAFKA_SECURITY_ENABLED",
"KAFKA_BROKERS",
"KAFKA_USERNAME"
]
| [] | ["CONSUMER_RETRY_RETURN_SUCCESSES", "KAFKA_PASSWORD", "KAFKA_SECURITY_ENABLED", "KAFKA_BROKERS", "KAFKA_USERNAME"] | go | 5 | 0 | |
main.go | package main
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"regexp"
"strings"
"golang.org/x/crypto/ssh/terminal"
)
func main() {
if err := Run(os.Args[1:]); err == flag.ErrHelp {
os.Exit(1)
} else if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func Run(args []string) error {
// Parse command line flags.
fs := flag.NewFlagSet("bed", flag.ContinueOnError)
dryRun := fs.Bool("dry-run", false, "")
verbose := fs.Bool("v", false, "")
fs.Usage = usage
if err := fs.Parse(args); err != nil {
return err
} else if fs.NArg() == 0 {
fs.Usage()
return flag.ErrHelp
}
// Ensure either STDIN or args specify paths.
if terminal.IsTerminal(int(os.Stdin.Fd())) && fs.NArg() == 1 {
return errors.New("path required")
}
// Set logging.
log.SetFlags(0)
if !*verbose {
log.SetOutput(ioutil.Discard)
}
// Ensure BED_EDITOR or EDITOR is set.
editor := os.Getenv("BED_EDITOR")
if editor == "" {
editor = os.Getenv("EDITOR")
}
if editor == "" && !*dryRun {
return errors.New("EDITOR must be set")
}
// Extract arguments.
pattern, paths := fs.Arg(0), fs.Args()[1:]
// Read paths from stdin as well.
if !terminal.IsTerminal(int(os.Stdin.Fd())) {
buf, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
paths = append(paths, strings.Split(strings.TrimSpace(string(buf)), "\n")...)
}
// Parse regex.
re, err := regexp.Compile(pattern)
if err != nil {
return err
}
// Find all matches.
matches, err := FindAllIndexPaths(re, paths)
if err != nil {
return err
}
// If a dry run, simply print out matches to STDOUT.
if *dryRun {
for _, m := range matches {
fmt.Printf("%s: %s\n", m.Path, string(m.Data))
}
return nil
}
// Write matches to temporary file.
tmpPath, err := writeTempMatchFile(matches)
if err != nil {
return err
}
defer os.Remove(tmpPath)
// Invoke editor.
cmd, args := parseEditor(editor)
if err := exec.Command(cmd, append(args, tmpPath)...).Run(); err != nil {
return fmt.Errorf("There was a problem with editor %q", editor)
}
// Parse matches from file.
var newMatches []*Match
if buf, err := ioutil.ReadFile(tmpPath); err != nil {
return err
} else if newMatches, err = ParseMatches(buf); err != nil {
return err
}
// Apply changes.
if err := ApplyMatches(newMatches); err != nil {
return err
}
return nil
}
func parseEditor(s string) (cmd string, args []string) {
a := strings.Split(s, " ")
return a[0], a[1:]
}
func writeTempMatchFile(matches []*Match) (string, error) {
f, err := ioutil.TempFile("", "bed-")
if err != nil {
return "", err
}
defer f.Close()
for _, m := range matches {
if buf, err := m.MarshalText(); err != nil {
return "", err
} else if _, err := f.Write(buf); err != nil {
return "", err
} else if _, err := f.Write([]byte("\n")); err != nil {
return "", err
}
}
return f.Name(), nil
}
// FindAllIndexPath finds the start/end position & data of re in all paths.
func FindAllIndexPaths(re *regexp.Regexp, paths []string) ([]*Match, error) {
var matches []*Match
for _, path := range paths {
m, err := FindAllIndexPath(re, path)
if err != nil {
return nil, err
}
matches = append(matches, m...)
}
return matches, nil
}
// FindAllIndexPath finds the start/end position & data of re in path.
func FindAllIndexPath(re *regexp.Regexp, path string) ([]*Match, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
a := re.FindAllIndex(data, -1)
b := re.FindAll(data, -1)
var matches []*Match
for i := range a {
matches = append(matches, &Match{
Path: path,
Pos: a[i][0],
Len: a[i][1] - a[i][0],
Data: b[i],
})
}
return matches, nil
}
// Match contains the source & position of a match.
type Match struct {
Path string
Pos int
Len int
Data []byte
}
type matchJSON struct {
Path string `json:"path"`
Pos int `json:"pos"`
Len int `json:"len"`
}
func (m *Match) MarshalText() ([]byte, error) {
hdr, err := json.Marshal(matchJSON{Path: m.Path, Pos: m.Pos, Len: m.Len})
if err != nil {
return nil, err
}
var buf bytes.Buffer
fmt.Fprintf(&buf, "#bed:begin %s\n", hdr)
fmt.Fprintln(&buf, string(m.Data))
fmt.Fprintln(&buf, "#bed:end")
return buf.Bytes(), nil
}
func (m *Match) UnmarshalText(data []byte) error {
a := matchTextRegex.FindSubmatch(data)
if len(a) == 0 {
return errors.New("missing #bed:begin or #bed:end tags")
}
var hdr matchJSON
if err := json.Unmarshal(a[1], &hdr); err != nil {
return err
}
m.Path, m.Pos, m.Len = hdr.Path, hdr.Pos, hdr.Len
m.Data = a[2]
return nil
}
var matchTextRegex = regexp.MustCompile(`(?s)#bed:begin ([^\n]+)\n(.*?)\n#bed:end`)
// ParseMatches finds and parses all matches.
// An error is returned if match header data is not a valid header.
func ParseMatches(data []byte) ([]*Match, error) {
var matches []*Match
for _, buf := range matchTextRegex.FindAll(data, -1) {
var m Match
if err := m.UnmarshalText(buf); err != nil {
return nil, err
}
matches = append(matches, &m)
}
return matches, nil
}
// ApplyMatches writes each match's data to the specified path & position.
func ApplyMatches(matches []*Match) error {
paths, pathMatches := groupMatchesByPath(matches)
for i := range paths {
if err := applyPathMatches(paths[i], pathMatches[i]); err != nil {
return err
}
}
return nil
}
func applyPathMatches(path string, matches []*Match) error {
// Read current file data.
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
// Apply matches in order.
for i, m := range matches {
start, end := m.Pos, m.Pos+m.Len
prefix := data[:start:start]
mid := m.Data[:len(m.Data):len(m.Data)]
suffix := data[end:]
data = append(prefix, append(mid, suffix...)...)
// Apply difference in data size to later matches.
for j := i + 1; j < len(matches); j++ {
if matches[j].Pos >= m.Pos {
matches[j].Pos += len(m.Data) - m.Len
}
}
}
// Write new data back to file.
if fi, err := os.Stat(path); err != nil {
return err
} else if err := ioutil.WriteFile(path, data, fi.Mode()); err != nil {
return err
}
return nil
}
// groupMatchesByPath returns a list of paths and a list of their associated matches.
func groupMatchesByPath(matches []*Match) ([]string, [][]*Match) {
m := make(map[string][]*Match)
for i := range matches {
m[matches[i].Path] = append(m[matches[i].Path], matches[i])
}
paths, pathMatches := make([]string, 0, len(m)), make([][]*Match, 0, len(m))
for path := range m {
paths = append(paths, path)
pathMatches = append(pathMatches, m[path])
}
return paths, pathMatches
}
func usage() {
fmt.Fprintln(os.Stderr, `
bed is a bulk command line text editor.
Usage:
bed [arguments] pattern path [paths]
The command will match pattern against all provided paths and output
a series of files which contain matches. This list of matches can be
passed to an interactive editor such as vi for edits. If the editor
is closed with a 0 exit code then all changes to the matches are
applied to the original files.
Available arguments:
-dry-run
Only show matches without outputting to files.
`)
}
| [
"\"BED_EDITOR\"",
"\"EDITOR\""
]
| []
| [
"BED_EDITOR",
"EDITOR"
]
| [] | ["BED_EDITOR", "EDITOR"] | go | 2 | 0 | |
src/cmd/go/internal/modindex/read.go | // Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"go/build"
"go/build/constraint"
"go/token"
"internal/goroot"
"internal/unsafeheader"
"io/fs"
"math"
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"sort"
"strings"
"sync"
"unsafe"
"cmd/go/internal/base"
"cmd/go/internal/cache"
"cmd/go/internal/cfg"
"cmd/go/internal/fsys"
"cmd/go/internal/imports"
"cmd/go/internal/par"
"cmd/go/internal/str"
)
// enabled is used to flag off the behavior of the module index on tip.
// It will be removed before the release.
// TODO(matloob): Remove enabled once we have more confidence on the
// module index.
var enabled = func() bool {
debug := strings.Split(os.Getenv("GODEBUG"), ",")
for _, f := range debug {
if f == "goindex=0" {
return false
}
}
return true
}()
// ModuleIndex represents and encoded module index file. It is used to
// do the equivalent of build.Import of packages in the module and answer other
// questions based on the index file's data.
type ModuleIndex struct {
modroot string
od offsetDecoder
packages map[string]int // offsets of each package
packagePaths []string // paths to package directories relative to modroot; these are the keys of packages
}
var fcache par.Cache
func moduleHash(modroot string, ismodcache bool) (cache.ActionID, error) {
// We expect modules stored within the module cache to be checksummed and
// immutable, and we expect released Go modules to change only infrequently
// (when the Go version changes).
if !ismodcache || !str.HasFilePathPrefix(modroot, cfg.GOROOT) {
return cache.ActionID{}, ErrNotIndexed
}
h := cache.NewHash("moduleIndex")
fmt.Fprintf(h, "module index %s %s %v\n", runtime.Version(), indexVersion, modroot)
if strings.HasPrefix(runtime.Version(), "devel ") {
// This copy of the standard library is a development version, not a
// release. It could be based on a Git commit (like "devel go1.19-2a78e8afc0
// Wed Jun 15 00:06:24 2022 +0000") with or without changes on top of that
// commit, or it could be completly artificial due to lacking a `git` binary
// (like "devel gomote.XXXXX", as synthesized by "gomote push" as of
// 2022-06-15). Compute an inexpensive hash of its files using mtimes so
// that during development we can continue to exercise the logic for cached
// GOROOT indexes.
//
// mtimes may be granular, imprecise, and loosely updated (see
// https://apenwarr.ca/log/20181113), but we don't expect Go contributors to
// be mucking around with the import graphs in GOROOT often enough for mtime
// collisions to matter essentially ever.
//
// Note that fsys.Walk walks paths in deterministic order, so this hash
// should be completely deterministic if the files are unchanged.
err := fsys.Walk(modroot, func(path string, info fs.FileInfo, err error) error {
if err := moduleWalkErr(modroot, path, info, err); err != nil {
return err
}
if info.IsDir() {
return nil
}
fmt.Fprintf(h, "file %v %v\n", info.Name(), info.ModTime())
if info.Mode()&fs.ModeSymlink != 0 {
targ, err := fsys.Stat(path)
if err != nil {
return err
}
fmt.Fprintf(h, "target %v %v\n", targ.Name(), targ.ModTime())
}
return nil
})
if err != nil {
return cache.ActionID{}, err
}
}
return h.Sum(), nil
}
var modrootCache par.Cache
var ErrNotIndexed = errors.New("not in module index")
// Get returns the ModuleIndex for the module rooted at modroot.
// It will return ErrNotIndexed if the directory should be read without
// using the index, for instance because the index is disabled, or the packgae
// is not in a module.
func Get(modroot string) (*ModuleIndex, error) {
if !enabled || cache.DefaultDir() == "off" || cfg.BuildMod == "vendor" {
return nil, ErrNotIndexed
}
if modroot == "" {
panic("modindex.Get called with empty modroot")
}
modroot = filepath.Clean(modroot)
isModCache := str.HasFilePathPrefix(modroot, cfg.GOMODCACHE)
return openIndex(modroot, isModCache)
}
// openIndex returns the module index for modPath.
// It will return ErrNotIndexed if the module can not be read
// using the index because it contains symlinks.
func openIndex(modroot string, ismodcache bool) (*ModuleIndex, error) {
type result struct {
mi *ModuleIndex
err error
}
r := fcache.Do(modroot, func() any {
id, err := moduleHash(modroot, ismodcache)
if err != nil {
return result{nil, err}
}
data, _, err := cache.Default().GetMmap(id)
if err != nil {
// Couldn't read from modindex. Assume we couldn't read from
// the index because the module hasn't been indexed yet.
data, err = indexModule(modroot)
if err != nil {
return result{nil, err}
}
if err = cache.Default().PutBytes(id, data); err != nil {
return result{nil, err}
}
}
mi, err := fromBytes(modroot, data)
if err != nil {
return result{nil, err}
}
return result{mi, nil}
}).(result)
return r.mi, r.err
}
// fromBytes returns a *ModuleIndex given the encoded representation.
func fromBytes(moddir string, data []byte) (mi *ModuleIndex, err error) {
if !enabled {
panic("use of index")
}
// SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
// that all its errors satisfy this interface, we'll only check for these errors so that
// we don't suppress panics that could have been produced from other sources.
type addrer interface {
Addr() uintptr
}
// set PanicOnFault to true so that we can catch errors on the initial reads of the slice,
// in case it's mmapped (the common case).
old := debug.SetPanicOnFault(true)
defer func() {
debug.SetPanicOnFault(old)
if e := recover(); e != nil {
if _, ok := e.(addrer); ok {
// This panic was almost certainly caused by SetPanicOnFault.
err = fmt.Errorf("error reading module index: %v", e)
return
}
// The panic was likely not caused by SetPanicOnFault.
panic(e)
}
}()
gotVersion, unread, _ := bytes.Cut(data, []byte{'\n'})
if string(gotVersion) != indexVersion {
return nil, fmt.Errorf("bad index version string: %q", gotVersion)
}
stringTableOffset, unread := binary.LittleEndian.Uint32(unread[:4]), unread[4:]
st := newStringTable(data[stringTableOffset:])
d := decoder{unread, st}
numPackages := d.int()
packagePaths := make([]string, numPackages)
for i := range packagePaths {
packagePaths[i] = d.string()
}
packageOffsets := make([]int, numPackages)
for i := range packageOffsets {
packageOffsets[i] = d.int()
}
packages := make(map[string]int, numPackages)
for i := range packagePaths {
packages[packagePaths[i]] = packageOffsets[i]
}
return &ModuleIndex{
moddir,
offsetDecoder{data, st},
packages,
packagePaths,
}, nil
}
// Returns a list of directory paths, relative to the modroot, for
// packages contained in the module index.
func (mi *ModuleIndex) Packages() []string {
return mi.packagePaths
}
// RelPath returns the path relative to the module's root.
func (mi *ModuleIndex) RelPath(path string) string {
return str.TrimFilePathPrefix(filepath.Clean(path), mi.modroot) // mi.modroot is already clean
}
// ImportPackage is the equivalent of build.Import given the information in ModuleIndex.
func (mi *ModuleIndex) Import(bctxt build.Context, relpath string, mode build.ImportMode) (p *build.Package, err error) {
rp := mi.indexPackage(relpath)
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("error reading module index: %v", e)
}
}()
ctxt := (*Context)(&bctxt)
p = &build.Package{}
p.ImportPath = "."
p.Dir = filepath.Join(mi.modroot, rp.dir)
var pkgerr error
switch ctxt.Compiler {
case "gccgo", "gc":
default:
// Save error for end of function.
pkgerr = fmt.Errorf("import %q: unknown compiler %q", p.Dir, ctxt.Compiler)
}
if p.Dir == "" {
return p, fmt.Errorf("import %q: import of unknown directory", p.Dir)
}
// goroot and gopath
inTestdata := func(sub string) bool {
return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || str.HasPathPrefix(sub, "testdata")
}
if !inTestdata(relpath) {
// In build.go, p.Root should only be set in the non-local-import case, or in
// GOROOT or GOPATH. Since module mode only calls Import with path set to "."
// and the module index doesn't apply outside modules, the GOROOT case is
// the only case where GOROOT needs to be set.
// But: p.Root is actually set in the local-import case outside GOROOT, if
// the directory is contained in GOPATH/src
// TODO(#37015): fix that behavior in go/build and remove the gopath case
// below.
if ctxt.GOROOT != "" && str.HasFilePathPrefix(p.Dir, cfg.GOROOTsrc) && p.Dir != cfg.GOROOTsrc {
p.Root = ctxt.GOROOT
p.Goroot = true
modprefix := str.TrimFilePathPrefix(mi.modroot, cfg.GOROOTsrc)
p.ImportPath = relpath
if modprefix != "" {
p.ImportPath = filepath.Join(modprefix, p.ImportPath)
}
}
for _, root := range ctxt.gopath() {
// TODO(matloob): do we need to reimplement the conflictdir logic?
// TODO(matloob): ctxt.hasSubdir evaluates symlinks, so it
// can be slower than we'd like. Find out if we can drop this
// logic before the release.
if sub, ok := ctxt.hasSubdir(filepath.Join(root, "src"), p.Dir); ok {
p.ImportPath = sub
p.Root = root
}
}
}
if p.Root != "" {
// Set GOROOT-specific fields (sometimes for modules in a GOPATH directory).
// The fields set below (SrcRoot, PkgRoot, BinDir, PkgTargetRoot, and PkgObj)
// are only set in build.Import if p.Root != "". As noted in the comment
// on setting p.Root above, p.Root should only be set in the GOROOT case for the
// set of packages we care about, but is also set for modules in a GOPATH src
// directory.
var pkgtargetroot string
var pkga string
suffix := ""
if ctxt.InstallSuffix != "" {
suffix = "_" + ctxt.InstallSuffix
}
switch ctxt.Compiler {
case "gccgo":
pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
dir, elem := path.Split(p.ImportPath)
pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a"
case "gc":
pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
pkga = pkgtargetroot + "/" + p.ImportPath + ".a"
}
p.SrcRoot = ctxt.joinPath(p.Root, "src")
p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
p.BinDir = ctxt.joinPath(p.Root, "bin")
if pkga != "" {
p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot)
p.PkgObj = ctxt.joinPath(p.Root, pkga)
}
}
if rp.error != nil {
if errors.Is(rp.error, errCannotFindPackage) && ctxt.Compiler == "gccgo" && p.Goroot {
return p, nil
}
return p, rp.error
}
if mode&build.FindOnly != 0 {
return p, pkgerr
}
// We need to do a second round of bad file processing.
var badGoError error
badFiles := make(map[string]bool)
badFile := func(name string, err error) {
if badGoError == nil {
badGoError = err
}
if !badFiles[name] {
p.InvalidGoFiles = append(p.InvalidGoFiles, name)
badFiles[name] = true
}
}
var Sfiles []string // files with ".S"(capital S)/.sx(capital s equivalent for case insensitive filesystems)
var firstFile string
embedPos := make(map[string][]token.Position)
testEmbedPos := make(map[string][]token.Position)
xTestEmbedPos := make(map[string][]token.Position)
importPos := make(map[string][]token.Position)
testImportPos := make(map[string][]token.Position)
xTestImportPos := make(map[string][]token.Position)
allTags := make(map[string]bool)
for _, tf := range rp.sourceFiles {
name := tf.name()
if error := tf.error(); error != "" {
badFile(name, errors.New(tf.error()))
continue
} else if parseError := tf.parseError(); parseError != "" {
badFile(name, parseErrorFromString(tf.parseError()))
// Fall through: we still want to list files with parse errors.
}
var shouldBuild = true
if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles {
shouldBuild = false
} else if goBuildConstraint := tf.goBuildConstraint(); goBuildConstraint != "" {
x, err := constraint.Parse(goBuildConstraint)
if err != nil {
return p, fmt.Errorf("%s: parsing //go:build line: %v", name, err)
}
shouldBuild = ctxt.eval(x, allTags)
} else if plusBuildConstraints := tf.plusBuildConstraints(); len(plusBuildConstraints) > 0 {
for _, text := range plusBuildConstraints {
if x, err := constraint.Parse(text); err == nil {
if !ctxt.eval(x, allTags) {
shouldBuild = false
}
}
}
}
ext := nameExt(name)
if !shouldBuild || tf.ignoreFile() {
if ext == ".go" {
p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
} else if fileListForExt((*Package)(p), ext) != nil {
p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, name)
}
continue
}
// Going to save the file. For non-Go files, can stop here.
switch ext {
case ".go":
// keep going
case ".S", ".sx":
// special case for cgo, handled at end
Sfiles = append(Sfiles, name)
continue
default:
if list := fileListForExt((*Package)(p), ext); list != nil {
*list = append(*list, name)
}
continue
}
pkg := tf.pkgName()
if pkg == "documentation" {
p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
continue
}
isTest := strings.HasSuffix(name, "_test.go")
isXTest := false
if isTest && strings.HasSuffix(tf.pkgName(), "_test") && p.Name != tf.pkgName() {
isXTest = true
pkg = pkg[:len(pkg)-len("_test")]
}
if !isTest && tf.binaryOnly() {
p.BinaryOnly = true
}
if p.Name == "" {
p.Name = pkg
firstFile = name
} else if pkg != p.Name {
// TODO(#45999): The choice of p.Name is arbitrary based on file iteration
// order. Instead of resolving p.Name arbitrarily, we should clear out the
// existing Name and mark the existing files as also invalid.
badFile(name, &MultiplePackageError{
Dir: p.Dir,
Packages: []string{p.Name, pkg},
Files: []string{firstFile, name},
})
}
// Grab the first package comment as docs, provided it is not from a test file.
if p.Doc == "" && !isTest && !isXTest {
if synopsis := tf.synopsis(); synopsis != "" {
p.Doc = synopsis
}
}
// Record Imports and information about cgo.
isCgo := false
imports := tf.imports()
for _, imp := range imports {
if imp.path == "C" {
if isTest {
badFile(name, fmt.Errorf("use of cgo in test %s not supported", name))
continue
}
isCgo = true
}
}
if directives := tf.cgoDirectives(); directives != "" {
if err := ctxt.saveCgo(name, (*Package)(p), directives); err != nil {
badFile(name, err)
}
}
var fileList *[]string
var importMap, embedMap map[string][]token.Position
switch {
case isCgo:
allTags["cgo"] = true
if ctxt.CgoEnabled {
fileList = &p.CgoFiles
importMap = importPos
embedMap = embedPos
} else {
// Ignore Imports and Embeds from cgo files if cgo is disabled.
fileList = &p.IgnoredGoFiles
}
case isXTest:
fileList = &p.XTestGoFiles
importMap = xTestImportPos
embedMap = xTestEmbedPos
case isTest:
fileList = &p.TestGoFiles
importMap = testImportPos
embedMap = testEmbedPos
default:
fileList = &p.GoFiles
importMap = importPos
embedMap = embedPos
}
*fileList = append(*fileList, name)
if importMap != nil {
for _, imp := range imports {
importMap[imp.path] = append(importMap[imp.path], imp.position)
}
}
if embedMap != nil {
for _, e := range tf.embeds() {
embedMap[e.pattern] = append(embedMap[e.pattern], e.position)
}
}
}
p.EmbedPatterns, p.EmbedPatternPos = cleanDecls(embedPos)
p.TestEmbedPatterns, p.TestEmbedPatternPos = cleanDecls(testEmbedPos)
p.XTestEmbedPatterns, p.XTestEmbedPatternPos = cleanDecls(xTestEmbedPos)
p.Imports, p.ImportPos = cleanDecls(importPos)
p.TestImports, p.TestImportPos = cleanDecls(testImportPos)
p.XTestImports, p.XTestImportPos = cleanDecls(xTestImportPos)
for tag := range allTags {
p.AllTags = append(p.AllTags, tag)
}
sort.Strings(p.AllTags)
if len(p.CgoFiles) > 0 {
p.SFiles = append(p.SFiles, Sfiles...)
sort.Strings(p.SFiles)
} else {
p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, Sfiles...)
sort.Strings(p.IgnoredOtherFiles)
}
if badGoError != nil {
return p, badGoError
}
if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
return p, &build.NoGoError{Dir: p.Dir}
}
return p, pkgerr
}
// IsStandardPackage reports whether path is a standard package
// for the goroot and compiler using the module index if possible,
// and otherwise falling back to internal/goroot.IsStandardPackage
func IsStandardPackage(goroot_, compiler, path string) bool {
if !enabled || compiler != "gc" {
return goroot.IsStandardPackage(goroot_, compiler, path)
}
reldir := filepath.FromSlash(path) // relative dir path in module index for package
modroot := filepath.Join(goroot_, "src")
if str.HasFilePathPrefix(reldir, "cmd") {
reldir = str.TrimFilePathPrefix(reldir, "cmd")
modroot = filepath.Join(modroot, "cmd")
}
mod, err := Get(modroot)
if err != nil {
return goroot.IsStandardPackage(goroot_, compiler, path)
}
pkgs := mod.Packages()
i := sort.SearchStrings(pkgs, reldir)
return i != len(pkgs) && pkgs[i] == reldir
}
// IsDirWithGoFiles is the equivalent of fsys.IsDirWithGoFiles using the information in the index.
func (mi *ModuleIndex) IsDirWithGoFiles(relpath string) (_ bool, err error) {
rp := mi.indexPackage(relpath)
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("error reading module index: %v", e)
}
}()
for _, sf := range rp.sourceFiles {
if strings.HasSuffix(sf.name(), ".go") {
return true, nil
}
}
return false, nil
}
// ScanDir implements imports.ScanDir using the information in the index.
func (mi *ModuleIndex) ScanDir(path string, tags map[string]bool) (sortedImports []string, sortedTestImports []string, err error) {
rp := mi.indexPackage(path)
// TODO(matloob) dir should eventually be relative to indexed directory
// TODO(matloob): skip reading raw package and jump straight to data we need?
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("error reading module index: %v", e)
}
}()
imports_ := make(map[string]bool)
testImports := make(map[string]bool)
numFiles := 0
Files:
for _, sf := range rp.sourceFiles {
name := sf.name()
if strings.HasPrefix(name, "_") || strings.HasPrefix(name, ".") || !strings.HasSuffix(name, ".go") || !imports.MatchFile(name, tags) {
continue
}
// The following section exists for backwards compatibility reasons:
// scanDir ignores files with import "C" when collecting the list
// of imports unless the "cgo" tag is provided. The following comment
// is copied from the original.
//
// import "C" is implicit requirement of cgo tag.
// When listing files on the command line (explicitFiles=true)
// we do not apply build tag filtering but we still do apply
// cgo filtering, so no explicitFiles check here.
// Why? Because we always have, and it's not worth breaking
// that behavior now.
imps := sf.imports() // TODO(matloob): directly read import paths to avoid the extra strings?
for _, imp := range imps {
if imp.path == "C" && !tags["cgo"] && !tags["*"] {
continue Files
}
}
if !shouldBuild(sf, tags) {
continue
}
numFiles++
m := imports_
if strings.HasSuffix(name, "_test.go") {
m = testImports
}
for _, p := range imps {
m[p.path] = true
}
}
if numFiles == 0 {
return nil, nil, imports.ErrNoGo
}
return keys(imports_), keys(testImports), nil
}
func keys(m map[string]bool) []string {
list := make([]string, 0, len(m))
for k := range m {
list = append(list, k)
}
sort.Strings(list)
return list
}
// implements imports.ShouldBuild in terms of an index sourcefile.
func shouldBuild(sf *sourceFile, tags map[string]bool) bool {
if goBuildConstraint := sf.goBuildConstraint(); goBuildConstraint != "" {
x, err := constraint.Parse(goBuildConstraint)
if err != nil {
return false
}
return imports.Eval(x, tags, true)
}
plusBuildConstraints := sf.plusBuildConstraints()
for _, text := range plusBuildConstraints {
if x, err := constraint.Parse(text); err == nil {
if imports.Eval(x, tags, true) == false {
return false
}
}
}
return true
}
// index package holds the information needed to access information in the
// index about a package.
type indexPackage struct {
error error
dir string // directory of the package relative to the modroot
// Source files
sourceFiles []*sourceFile
}
var errCannotFindPackage = errors.New("cannot find package")
// indexPackage returns an indexPackage constructed using the information in the ModuleIndex.
func (mi *ModuleIndex) indexPackage(path string) *indexPackage {
defer func() {
if e := recover(); e != nil {
base.Fatalf("error reading module index: %v", e)
}
}()
offset, ok := mi.packages[path]
if !ok {
return &indexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(mi.modroot, path))}
}
// TODO(matloob): do we want to lock on the module index?
d := mi.od.decoderAt(offset)
rp := new(indexPackage)
if errstr := d.string(); errstr != "" {
rp.error = errors.New(errstr)
}
rp.dir = d.string()
numSourceFiles := d.uint32()
rp.sourceFiles = make([]*sourceFile, numSourceFiles)
for i := uint32(0); i < numSourceFiles; i++ {
offset := d.uint32()
rp.sourceFiles[i] = &sourceFile{
od: mi.od.offsetDecoderAt(offset),
}
}
return rp
}
// sourceFile represents the information of a given source file in the module index.
type sourceFile struct {
od offsetDecoder // od interprets all offsets relative to the start of the source file's data
onceReadImports sync.Once
savedImports []rawImport // saved imports so that they're only read once
}
// Offsets for fields in the sourceFile.
const (
sourceFileError = 4 * iota
sourceFileParseError
sourceFileSynopsis
sourceFileName
sourceFilePkgName
sourceFileIgnoreFile
sourceFileBinaryOnly
sourceFileCgoDirectives
sourceFileGoBuildConstraint
sourceFileNumPlusBuildConstraints
)
func (sf *sourceFile) error() string {
return sf.od.stringAt(sourceFileError)
}
func (sf *sourceFile) parseError() string {
return sf.od.stringAt(sourceFileParseError)
}
func (sf *sourceFile) name() string {
return sf.od.stringAt(sourceFileName)
}
func (sf *sourceFile) synopsis() string {
return sf.od.stringAt(sourceFileSynopsis)
}
func (sf *sourceFile) pkgName() string {
return sf.od.stringAt(sourceFilePkgName)
}
func (sf *sourceFile) ignoreFile() bool {
return sf.od.boolAt(sourceFileIgnoreFile)
}
func (sf *sourceFile) binaryOnly() bool {
return sf.od.boolAt(sourceFileBinaryOnly)
}
func (sf *sourceFile) cgoDirectives() string {
return sf.od.stringAt(sourceFileCgoDirectives)
}
func (sf *sourceFile) goBuildConstraint() string {
return sf.od.stringAt(sourceFileGoBuildConstraint)
}
func (sf *sourceFile) plusBuildConstraints() []string {
d := sf.od.decoderAt(sourceFileNumPlusBuildConstraints)
n := d.int()
ret := make([]string, n)
for i := 0; i < n; i++ {
ret[i] = d.string()
}
return ret
}
func importsOffset(numPlusBuildConstraints int) int {
// 4 bytes per uin32, add one to advance past numPlusBuildConstraints itself
return sourceFileNumPlusBuildConstraints + 4*(numPlusBuildConstraints+1)
}
func (sf *sourceFile) importsOffset() int {
numPlusBuildConstraints := sf.od.intAt(sourceFileNumPlusBuildConstraints)
return importsOffset(numPlusBuildConstraints)
}
func embedsOffset(importsOffset, numImports int) int {
// 4 bytes per uint32; 1 to advance past numImports itself, and 5 uint32s per import
return importsOffset + 4*(1+(5*numImports))
}
func (sf *sourceFile) embedsOffset() int {
importsOffset := sf.importsOffset()
numImports := sf.od.intAt(importsOffset)
return embedsOffset(importsOffset, numImports)
}
func (sf *sourceFile) imports() []rawImport {
sf.onceReadImports.Do(func() {
importsOffset := sf.importsOffset()
d := sf.od.decoderAt(importsOffset)
numImports := d.int()
ret := make([]rawImport, numImports)
for i := 0; i < numImports; i++ {
ret[i].path = d.string()
ret[i].position = d.tokpos()
}
sf.savedImports = ret
})
return sf.savedImports
}
func (sf *sourceFile) embeds() []embed {
embedsOffset := sf.embedsOffset()
d := sf.od.decoderAt(embedsOffset)
numEmbeds := d.int()
ret := make([]embed, numEmbeds)
for i := range ret {
pattern := d.string()
pos := d.tokpos()
ret[i] = embed{pattern, pos}
}
return ret
}
// A decoder reads from the current position of the file and advances its position as it
// reads.
type decoder struct {
b []byte
st *stringTable
}
func (d *decoder) uint32() uint32 {
n := binary.LittleEndian.Uint32(d.b[:4])
d.b = d.b[4:]
return n
}
func (d *decoder) int() int {
n := d.uint32()
if int64(n) > math.MaxInt {
base.Fatalf("go: attempting to read a uint32 from the index that overflows int")
}
return int(n)
}
func (d *decoder) tokpos() token.Position {
file := d.string()
offset := d.int()
line := d.int()
column := d.int()
return token.Position{
Filename: file,
Offset: offset,
Line: line,
Column: column,
}
}
func (d *decoder) string() string {
return d.st.string(d.int())
}
// And offset decoder reads information offset from its position in the file.
// It's either offset from the beginning of the index, or the beginning of a sourceFile's data.
type offsetDecoder struct {
b []byte
st *stringTable
}
func (od *offsetDecoder) uint32At(offset int) uint32 {
if offset > len(od.b) {
base.Fatalf("go: trying to read from index file at offset higher than file length. This indicates a corrupt offset file in the cache.")
}
return binary.LittleEndian.Uint32(od.b[offset:])
}
func (od *offsetDecoder) intAt(offset int) int {
n := od.uint32At(offset)
if int64(n) > math.MaxInt {
base.Fatalf("go: attempting to read a uint32 from the index that overflows int")
}
return int(n)
}
func (od *offsetDecoder) boolAt(offset int) bool {
switch v := od.uint32At(offset); v {
case 0:
return false
case 1:
return true
default:
base.Fatalf("go: invalid bool value in index file encoding: %v", v)
}
panic("unreachable")
}
func (od *offsetDecoder) stringAt(offset int) string {
return od.st.string(od.intAt(offset))
}
func (od *offsetDecoder) decoderAt(offset int) *decoder {
return &decoder{od.b[offset:], od.st}
}
func (od *offsetDecoder) offsetDecoderAt(offset uint32) offsetDecoder {
return offsetDecoder{od.b[offset:], od.st}
}
type stringTable struct {
b []byte
}
func newStringTable(b []byte) *stringTable {
return &stringTable{b: b}
}
func (st *stringTable) string(pos int) string {
if pos == 0 {
return ""
}
bb := st.b[pos:]
i := bytes.IndexByte(bb, 0)
if i == -1 {
panic("reached end of string table trying to read string")
}
s := asString(bb[:i])
return s
}
func asString(b []byte) string {
p := (*unsafeheader.Slice)(unsafe.Pointer(&b)).Data
var s string
hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
hdr.Data = p
hdr.Len = len(b)
return s
}
| [
"\"GODEBUG\""
]
| []
| [
"GODEBUG"
]
| [] | ["GODEBUG"] | go | 1 | 0 | |
libra/preprocessing/data_reader.py | import os
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.python.client import device_lib
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class DataReader():
def __init__(
self,
filepath,
trim=False,
trim_format='random',
trim_ratio=0.20):
'''
Constructor for the DataReader class.
The DataReader class creates a Pandas DataFrame object
based off of the datset's file extension, the format of trimming that needs to be applied to the
dataset (i.e. random sampling), and how much of the dataset needs to be trimmed.
:param filepath: The file path to the dataset (str)
:param trim: Whether the dataset should be trimmed or not (bool)
:param trim_format: The format/type of trimming (str)
:param trim_ratio: The proportion of the dataset that needs to be trimmed (float)
'''
self.filepath = filepath
self.trim = trim
self.trim_format = trim_format
self.trim_ratio = trim_ratio
def retrieve_file_size(self):
'''
Returns the size of the dataset in megabytes
:return: the size of the dataset in megabytes (float)
'''
file_stats = os.stat(self.filepath)
return (file_stats.st_size / (1024 ** 2))
def retrieve_extension(self):
'''
Returns the dataset's file extension (i.e. .csv, .json, .xlsx, etc.)
:return: the dataset's file extension (str)
'''
ext_index = self.filepath.rindex('.')
return self.filepath[ext_index:]
def data_generator(self):
'''
Creates a Pandas DataFrame object based off of the dataset's file extension, whether a GPU is available or not,
whether the user wants the dataset to be trimmed or not, and the format of trimming specified by the user.
NOTE: This function currently only supports .csv (comma-separated values file),
.xlsx (Microsoft Excel Open XML Spreadsheet), and .json (JavaScript Object Notation) files
If the user's device contains a GPU, the dataset won't be trimmed unless the user specifies so. If the user's
device doesn't contain a GPU, the dataset will automatically be trimmed regardless of whether the user specified
for the dataset to be trimmed or not in order to ensure efficient processing by the CPU.
If the user doesn't specify a specific form of trimming they want to apply to the dataset, random sampling will
be applied by default.
If the user doesn't specify a proportion/ratio of how much of the dataset needs to be trimmed, 20% of the
dataset will be trimmed by default.
:return: The dataset after being trimmed/pre-processed (Pandas DataFrame)
'''
if self.retrieve_extension() == '.csv':
df = pd.read_csv(self.filepath)
elif self.retrieve_extension() == '.xlsx':
df = pd.read_excel(self.filepath)
for data in df:
if df[data].dtype.name == 'int64':
df[data] = df[data].astype(float)
elif self.retrieve_extension() == '.json':
df = pd.read_json(self.filepath)
if self.is_gpu_available() == False:
self.trim = True
if self.trim:
if self.trim_format == 'random':
df = df.sample(frac=(1.0 - self.trim_ratio))
# elif self.trim_format == 'stratify':
# # ADD STRATIFYING TECHNIQUE HERE
# y = df[self.strat_col]
# del df[self.strat_col]
# print(df.shape)
# x_train, x_test, y_train, y_test = train_test_split(df, y, stratify=y, test_size=0.1, random_state=0)
#
# df = pd.concat([x_test, y_test])
# print(df.shape)
return df
def get_available_gpus(self):
'''
Returns a list of available GPUs on the current device running the program
:return: List of available GPUs on the current device (list of Strings)
'''
local_device_protos = device_lib.list_local_devices()
return [device.name for device in local_device_protos
if device.device_type == 'GPU']
def is_gpu_available(self):
'''
Returns a boolean value representing whether the current device has a GPU or not
:return: Whether the current device has a GPU or not (bool)
'''
return tf.test.gpu_device_name() != '' | []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
internal/docker/http.go | package docker
import (
"crypto/tls"
"crypto/x509"
"net/http"
"strings"
"github.com/containerssh/libcontainerssh/config"
)
func getHTTPClient(config config.DockerConfig) (*http.Client, error) {
var httpClient *http.Client = nil
if config.Connection.CaCert != "" && config.Connection.Key != "" && config.Connection.Cert != "" {
tlsConfig := &tls.Config{
MinVersion: tls.VersionTLS13,
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(config.Connection.CaCert))
tlsConfig.RootCAs = caCertPool
keyPair, err := tls.X509KeyPair([]byte(config.Connection.Cert), []byte(config.Connection.Key))
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{keyPair}
transport := &http.Transport{TLSClientConfig: tlsConfig}
httpClient = &http.Client{
Transport: transport,
Timeout: config.Timeouts.HTTP,
}
} else if strings.HasPrefix(config.Connection.Host, "http://") {
httpClient = &http.Client{
Transport: http.DefaultTransport,
Timeout: config.Timeouts.HTTP,
}
}
return httpClient, nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
Mcarfix/wsgi.py | """
WSGI config for Mcarfix project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Mcarfix.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
main.go | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"regexp"
"strings"
)
const (
defaultGCRHost = "gcr.io"
)
var (
re = regexp.MustCompile(`^/v2/`)
realm = regexp.MustCompile(`realm="(.*?)"`)
ctxKeyOriginalHost = "original-host"
)
type registryConfig struct {
host string
repoPrefix string
}
type StorageProxy struct {
p *httputil.ReverseProxy
}
func (ph *StorageProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log.Println(r.URL)
log.Println(r.Header)
log.Println(r.Host)
log.Println(r.Method)
ph.p.ServeHTTP(w, r)
}
func main() {
remote, err := url.Parse("https://storage.googleapis.com")
if err != nil {
panic(err)
}
// Proxy /storage to storage.googleapis.com for image downlaods.
proxy := httputil.NewSingleHostReverseProxy(remote)
originalDirector := proxy.Director
proxy.Director = func(req *http.Request) {
originalDirector(req)
req.Host = "storage.googleapis.com"
req.URL.Path = strings.TrimPrefix(req.URL.Path, "/storage")
}
proxy.ModifyResponse = func(r *http.Response) error {
// Ensure "transfer-encoding: chunked" as Google Cloud Run only allows 32 MB responses if they are not chunked.
r.Header.Del("content-length")
return nil
}
port := os.Getenv("PORT")
if port == "" {
log.Fatal("PORT environment variable not specified")
}
browserRedirects := os.Getenv("DISABLE_BROWSER_REDIRECTS") == ""
registryHost := os.Getenv("REGISTRY_HOST")
if registryHost == "" {
log.Fatal("REGISTRY_HOST environment variable not specified (example: gcr.io)")
}
repoPrefix := os.Getenv("REPO_PREFIX")
if repoPrefix == "" {
log.Fatal("REPO_PREFIX environment variable not specified")
}
reg := registryConfig{
host: registryHost,
repoPrefix: repoPrefix,
}
tokenEndpoint, err := discoverTokenService(reg.host)
if err != nil {
log.Fatalf("target registry's token endpoint could not be discovered: %+v", err)
}
log.Printf("discovered token endpoint for backend registry: %s", tokenEndpoint)
var auth authenticator
if basic := os.Getenv("AUTH_HEADER"); basic != "" {
auth = authHeader(basic)
} else if gcpKey := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"); gcpKey != "" {
b, err := ioutil.ReadFile(gcpKey)
if err != nil {
log.Fatalf("could not read key file from %s: %+v", gcpKey, err)
}
log.Printf("using specified service account json key to authenticate proxied requests")
auth = authHeader("Basic " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("_json_key:%s", string(b)))))
}
mux := http.NewServeMux()
if browserRedirects {
mux.Handle("/", browserRedirectHandler(reg))
}
if tokenEndpoint != "" {
mux.Handle("/_token", tokenProxyHandler(tokenEndpoint, repoPrefix))
}
mux.Handle("/v2/", registryAPIProxy(reg, auth))
mux.Handle("/storage/", &StorageProxy{proxy})
addr := ":" + port
handler := captureHostHeader(mux)
log.Printf("starting to listen on %s", addr)
if cert, key := os.Getenv("TLS_CERT"), os.Getenv("TLS_KEY"); cert != "" && key != "" {
err = http.ListenAndServeTLS(addr, cert, key, handler)
} else {
err = http.ListenAndServe(addr, handler)
}
if err != http.ErrServerClosed {
log.Fatalf("listen error: %+v", err)
}
log.Printf("server shutdown successfully")
}
func discoverTokenService(registryHost string) (string, error) {
url := fmt.Sprintf("https://%s/v2/", registryHost)
resp, err := http.Get(url)
if err != nil {
return "", fmt.Errorf("failed to query the registry host %s: %+v", registryHost, err)
}
hdr := resp.Header.Get("www-authenticate")
if hdr == "" {
return "", fmt.Errorf("www-authenticate header not returned from %s, cannot locate token endpoint", url)
}
matches := realm.FindStringSubmatch(hdr)
if len(matches) == 0 {
return "", fmt.Errorf("cannot locate 'realm' in %s response header www-authenticate: %s", url, hdr)
}
return matches[1], nil
}
// captureHostHeader is a middleware to capture Host header in a context key.
func captureHostHeader(next http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
ctx := context.WithValue(req.Context(), ctxKeyOriginalHost, req.Host)
req = req.WithContext(ctx)
next.ServeHTTP(rw, req.WithContext(ctx))
})
}
// tokenProxyHandler proxies the token requests to the specified token service.
// It adjusts the ?scope= parameter in the query from "repository:foo:..." to
// "repository:repoPrefix/foo:.." and reverse proxies the query to the specified
// tokenEndpoint.
func tokenProxyHandler(tokenEndpoint, repoPrefix string) http.HandlerFunc {
return (&httputil.ReverseProxy{
FlushInterval: -1,
Director: func(r *http.Request) {
orig := r.URL.String()
q := r.URL.Query()
scope := q.Get("scope")
if scope == "" {
return
}
newScope := strings.Replace(scope, "repository:", fmt.Sprintf("repository:%s/", repoPrefix), 1)
q.Set("scope", newScope)
u, _ := url.Parse(tokenEndpoint)
u.RawQuery = q.Encode()
r.URL = u
log.Printf("tokenProxyHandler: rewrote url:%s into:%s", orig, r.URL)
r.Host = u.Host
},
}).ServeHTTP
}
// browserRedirectHandler redirects a request like example.com/my-image to
// REGISTRY_HOST/my-image, which shows a public UI for browsing the registry.
// This works only on registries that support a web UI when the image name is
// entered into the browser, like GCR (gcr.io/google-containers/busybox).
func browserRedirectHandler(cfg registryConfig) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
url := fmt.Sprintf("https://%s/%s%s", cfg.host, cfg.repoPrefix, r.RequestURI)
http.Redirect(w, r, url, http.StatusTemporaryRedirect)
}
}
// registryAPIProxy returns a reverse proxy to the specified registry.
func registryAPIProxy(cfg registryConfig, auth authenticator) http.HandlerFunc {
return (&httputil.ReverseProxy{
FlushInterval: -1,
Director: rewriteRegistryV2URL(cfg),
Transport: ®istryRoundtripper{
auth: auth,
},
}).ServeHTTP
}
// handleRegistryAPIVersion signals docker-registry v2 API on /v2/ endpoint.
func handleRegistryAPIVersion(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Docker-Distribution-API-Version", "registry/2.0")
fmt.Fprint(w, "ok")
}
// rewriteRegistryV2URL rewrites request.URL like /v2/* that come into the server
// into https://[GCR_HOST]/v2/[PROJECT_ID]/*. It leaves /v2/ as is.
func rewriteRegistryV2URL(c registryConfig) func(*http.Request) {
return func(req *http.Request) {
u := req.URL.String()
req.Host = c.host
req.URL.Scheme = "https"
req.URL.Host = c.host
if req.URL.Path != "/v2/" {
req.URL.Path = re.ReplaceAllString(req.URL.Path, fmt.Sprintf("/v2/%s/", c.repoPrefix))
}
log.Printf("rewrote url: %s into %s", u, req.URL)
}
}
type registryRoundtripper struct {
auth authenticator
}
func (rrt *registryRoundtripper) RoundTrip(req *http.Request) (*http.Response, error) {
if req.Method == http.MethodHead {
resp := &http.Response{
StatusCode: http.StatusBadRequest,
Body: ioutil.NopCloser(bytes.NewBufferString("HEAD not supported")),
Header: make(http.Header),
}
resp.Header.Set("X-Error", "HEAD requests are not supported")
return resp, nil
}
log.Printf("request received. url=%s", req.URL)
if rrt.auth != nil {
req.Header.Set("Authorization", rrt.auth.AuthHeader())
}
origHost := req.Context().Value(ctxKeyOriginalHost).(string)
if ua := req.Header.Get("user-agent"); ua != "" {
req.Header.Set("user-agent", "gcr-proxy/0.1 customDomain/"+origHost+" "+ua)
}
resp, err := http.DefaultTransport.RoundTrip(req)
if err == nil {
log.Printf("request completed (status=%d) url=%s", resp.StatusCode, req.URL)
} else {
log.Printf("request failed with error: %+v", err)
return nil, err
}
// Google Artifact Registry sends a "location: /artifacts-downloads/..." URL
// to download blobs. We don't want these routed to the proxy itself.
if locHdr := resp.Header.Get("location"); req.Method == http.MethodGet &&
resp.StatusCode == http.StatusFound && strings.HasPrefix(locHdr, "/") {
resp.Header.Set("location", req.URL.Scheme+"://"+req.URL.Host+locHdr)
}
updateTokenEndpoint(resp, origHost)
updateLocationHeader(resp, origHost)
return resp, nil
}
// updateLocationHeader modifies the response header like:
// Location: https://storage.googleapis.com/xyz
// to point to the internal Google Cloud Storage proxy under /storage
func updateLocationHeader(resp *http.Response, host string) {
replace := "https://storage.googleapis.com"
v := resp.Header.Get("Location")
if v == "" {
return
}
if strings.HasPrefix(v, replace) {
newHost := fmt.Sprintf("https://%s/storage", host)
resp.Header.Set("Location", strings.Replace(v, replace, newHost, 1))
}
}
// updateTokenEndpoint modifies the response header like:
// Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io"
// to point to the https://host/token endpoint to force using local token
// endpoint proxy.
func updateTokenEndpoint(resp *http.Response, host string) {
v := resp.Header.Get("www-authenticate")
if v == "" {
return
}
cur := fmt.Sprintf("https://%s/_token", host)
resp.Header.Set("www-authenticate", realm.ReplaceAllString(v, fmt.Sprintf(`realm="%s"`, cur)))
}
type authenticator interface {
AuthHeader() string
}
type authHeader string
func (b authHeader) AuthHeader() string { return string(b) }
| [
"\"PORT\"",
"\"DISABLE_BROWSER_REDIRECTS\"",
"\"REGISTRY_HOST\"",
"\"REPO_PREFIX\"",
"\"AUTH_HEADER\"",
"\"GOOGLE_APPLICATION_CREDENTIALS\"",
"\"TLS_CERT\"",
"\"TLS_KEY\""
]
| []
| [
"PORT",
"REPO_PREFIX",
"REGISTRY_HOST",
"GOOGLE_APPLICATION_CREDENTIALS",
"TLS_CERT",
"DISABLE_BROWSER_REDIRECTS",
"AUTH_HEADER",
"TLS_KEY"
]
| [] | ["PORT", "REPO_PREFIX", "REGISTRY_HOST", "GOOGLE_APPLICATION_CREDENTIALS", "TLS_CERT", "DISABLE_BROWSER_REDIRECTS", "AUTH_HEADER", "TLS_KEY"] | go | 8 | 0 | |
fantasy_command/migrate.py | """
=============
=============
"""
import importlib
import os
import sys
import click
from . import ff, with_appcontext, app
def get_migrations_root(migrations_root):
migrations_root = migrations_root or os.path.join(
os.environ.get('FANTASY_MIGRATION_PATH',
os.environ['FANTASY_WORKSPACE']), 'migrations')
return os.path.expanduser(migrations_root)
@ff.command()
@click.option('--migrations-root', type=click.Path(exists=False))
@with_appcontext
def makemigrations(migrations_root):
"""a command same as django makemigrations
migrations path search order:
1. migrations_root set by user
1. environment: FANTASY_MIGRATION_PATH
1. environment: FANTASY_WORKSPACE + /migrations
"""
from flask_migrate import (Migrate, init as migrate_init,
migrate as migrate_exec)
migrations_root = get_migrations_root(migrations_root)
mig = Migrate(app, app.db, directory=migrations_root)
if not os.path.exists(migrations_root):
migrate_init(migrations_root)
pass
models_file = os.path.join(migrations_root, 'models.txt')
if not os.path.exists(models_file):
with open(models_file, 'w') as fw:
fw.write('# add module name in this file.')
pass
pass
with open(models_file, 'r') as fp:
modules = fp.readlines()
pass
modules = filter(lambda x: x.strip("\n"), modules)
modules = map(lambda x: x.strip("\n").split("#")[0].strip(), modules)
modules = list(filter(lambda x: x, modules))
if not modules:
click.echo(
click.style('No Model found,'
'skip create migrations...'
'You need edit %s file set your module' % models_file,
fg='yellow'))
sys.exit(0)
for m in modules:
importlib.import_module(m + '.models')
pass
migrate_exec(migrations_root)
mig.init_app(app, app.db)
pass
@ff.command()
@click.option('--migrations-root', type=click.Path(exists=False))
@with_appcontext
def migrate(migrations_root):
"""a command same as django migrate
..note::
if database not exist, will create it.
the default charset use
"""
from flask_migrate import Migrate, upgrade as migrate_upgrade
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.engine.url import make_url
from sqlalchemy_utils import database_exists, create_database
if not app.config['SQLALCHEMY_DATABASE_URI']:
click.echo(
click.style(
'no SQLALCHEMY_DATABASE_URI config found,skip migrate...',
fg='red'))
sys.exit(-1)
dsn = make_url(app.config['SQLALCHEMY_DATABASE_URI'])
if not database_exists(dsn):
create_database(dsn,
encoding=app.config.get('SQLALCHEMY_DATABASE_CHARSET',
'utf8mb4'))
pass
migrations_root = get_migrations_root(migrations_root)
if not os.path.exists(migrations_root):
click.echo(
click.style('migration files not exist,skip migrate...', fg='red'))
sys.exit(-1)
db = SQLAlchemy()
mig = Migrate(app, db, directory=migrations_root)
mig.init_app(app, db)
migrate_upgrade(migrations_root)
pass
| []
| []
| [
"FANTASY_MIGRATION_PATH",
"FANTASY_WORKSPACE"
]
| [] | ["FANTASY_MIGRATION_PATH", "FANTASY_WORKSPACE"] | python | 2 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "alternative_backend.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
blob/gcsblob/gcsblob.go | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gcsblob provides a blob implementation that uses GCS. Use OpenBucket
// to construct a *blob.Bucket.
//
// URLs
//
// For blob.OpenBucket, gcsblob registers for the scheme "gs".
// The default URL opener will set up a connection using default credentials
// from the environment, as described in
// https://cloud.google.com/docs/authentication/production.
// Some environments, such as GCE, come without a private key. In such cases
// the IAM Credentials API will be configured for use in Options.MakeSignBytes,
// which will introduce latency to any and all calls to bucket.SignedURL
// that you can avoid by installing a service account credentials file or
// obtaining and configuring a private key:
// https://cloud.google.com/iam/docs/creating-managing-service-account-keys
//
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information.
//
// Escaping
//
// Go CDK supports all UTF-8 strings; to make this work with services lacking
// full UTF-8 support, strings must be escaped (during writes) and unescaped
// (during reads). The following escapes are performed for gcsblob:
// - Blob keys: ASCII characters 10 and 13 are escaped to "__0x<hex>__".
// Additionally, the "/" in "../" is escaped in the same way.
//
// As
//
// gcsblob exposes the following types for As:
// - Bucket: *storage.Client
// - Error: *googleapi.Error
// - ListObject: storage.ObjectAttrs
// - ListOptions.BeforeList: *storage.Query
// - Reader: *storage.Reader
// - ReaderOptions.BeforeRead: **storage.ObjectHandle, *storage.Reader (if accessing both, must be in that order)
// - Attributes: storage.ObjectAttrs
// - CopyOptions.BeforeCopy: *CopyObjectHandles, *storage.Copier (if accessing both, must be in that order)
// - WriterOptions.BeforeWrite: **storage.ObjectHandle, *storage.Writer (if accessing both, must be in that order)
// - SignedURLOptions.BeforeSign: *storage.SignedURLOptions
package gcsblob // import "gocloud.dev/blob/gcsblob"
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"sort"
"strings"
"sync"
"time"
"cloud.google.com/go/compute/metadata"
"cloud.google.com/go/storage"
"github.com/google/wire"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"gocloud.dev/blob"
"gocloud.dev/blob/driver"
"gocloud.dev/gcerrors"
"gocloud.dev/gcp"
"gocloud.dev/internal/escape"
"gocloud.dev/internal/gcerr"
"gocloud.dev/internal/useragent"
)
const defaultPageSize = 1000
func init() {
blob.DefaultURLMux().RegisterBucket(Scheme, new(lazyCredsOpener))
}
// Set holds Wire providers for this package.
var Set = wire.NewSet(
wire.Struct(new(URLOpener), "Client"),
)
// readDefaultCredentials gets the field values from the supplied JSON data.
// For its possible formats please see
// https://cloud.google.com/iam/docs/creating-managing-service-account-keys#iam-service-account-keys-create-go
//
// Use "golang.org/x/oauth2/google".DefaultCredentials.JSON to get
// the contents of the preferred credential file.
//
// Returns null-values for fields that have not been obtained.
func readDefaultCredentials(credFileAsJSON []byte) (AccessID string, PrivateKey []byte) {
// For example, a credentials file as generated for service accounts through the web console.
var contentVariantA struct {
ClientEmail string `json:"client_email"`
PrivateKey string `json:"private_key"`
}
if err := json.Unmarshal(credFileAsJSON, &contentVariantA); err == nil {
AccessID = contentVariantA.ClientEmail
PrivateKey = []byte(contentVariantA.PrivateKey)
}
if AccessID != "" {
return
}
// If obtained through the REST API.
var contentVariantB struct {
Name string `json:"name"`
PrivateKeyData string `json:"privateKeyData"`
}
if err := json.Unmarshal(credFileAsJSON, &contentVariantB); err == nil {
nextFieldIsAccessID := false
for _, s := range strings.Split(contentVariantB.Name, "/") {
if nextFieldIsAccessID {
AccessID = s
break
}
nextFieldIsAccessID = s == "serviceAccounts"
}
PrivateKey = []byte(contentVariantB.PrivateKeyData)
}
return
}
// lazyCredsOpener obtains Application Default Credentials on the first call
// to OpenBucketURL.
type lazyCredsOpener struct {
init sync.Once
opener *URLOpener
err error
}
func (o *lazyCredsOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
o.init.Do(func() {
var opts Options
var creds *google.Credentials
if os.Getenv("STORAGE_EMULATOR_HOST") != "" {
creds, _ = google.CredentialsFromJSON(ctx, []byte(`{"type": "service_account", "project_id": "my-project-id"}`))
} else {
var err error
creds, err = gcp.DefaultCredentials(ctx)
if err != nil {
o.err = err
return
}
// Populate default values from credentials files, where available.
opts.GoogleAccessID, opts.PrivateKey = readDefaultCredentials(creds.JSON)
// … else, on GCE, at least get the instance's main service account.
if opts.GoogleAccessID == "" && metadata.OnGCE() {
mc := metadata.NewClient(nil)
opts.GoogleAccessID, _ = mc.Email("")
}
}
// Provide a default factory for SignBytes for environments without a private key.
if len(opts.PrivateKey) <= 0 && opts.GoogleAccessID != "" {
iam := new(credentialsClient)
// We cannot hold onto the first context: it might've been cancelled already.
ctx := context.Background()
opts.MakeSignBytes = iam.CreateMakeSignBytesWith(ctx, opts.GoogleAccessID)
}
client, err := gcp.NewHTTPClient(gcp.DefaultTransport(), creds.TokenSource)
if err != nil {
o.err = err
return
}
o.opener = &URLOpener{Client: client, Options: opts}
})
if o.err != nil {
return nil, fmt.Errorf("open bucket %v: %v", u, o.err)
}
return o.opener.OpenBucketURL(ctx, u)
}
// Scheme is the URL scheme gcsblob registers its URLOpener under on
// blob.DefaultMux.
const Scheme = "gs"
// URLOpener opens GCS URLs like "gs://mybucket".
//
// The URL host is used as the bucket name.
//
// The following query parameters are supported:
//
// - access_id: sets Options.GoogleAccessID
// - private_key_path: path to read for Options.PrivateKey
//
// Currently their use is limited to SignedURL.
type URLOpener struct {
// Client must be set to a non-nil HTTP client authenticated with
// Cloud Storage scope or equivalent.
Client *gcp.HTTPClient
// Options specifies the default options to pass to OpenBucket.
Options Options
}
// OpenBucketURL opens the GCS bucket with the same name as the URL's host.
func (o *URLOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
opts, err := o.forParams(ctx, u.Query())
if err != nil {
return nil, fmt.Errorf("open bucket %v: %v", u, err)
}
return OpenBucket(ctx, o.Client, u.Host, opts)
}
func (o *URLOpener) forParams(ctx context.Context, q url.Values) (*Options, error) {
for k := range q {
if k != "access_id" && k != "private_key_path" {
return nil, fmt.Errorf("invalid query parameter %q", k)
}
}
opts := new(Options)
*opts = o.Options
if accessID := q.Get("access_id"); accessID != "" && accessID != opts.GoogleAccessID {
opts.GoogleAccessID = accessID
opts.PrivateKey = nil // Clear any previous key unrelated to the new accessID.
// Clear this as well to prevent calls with the old and mismatched accessID.
opts.MakeSignBytes = nil
}
if keyPath := q.Get("private_key_path"); keyPath != "" {
pk, err := ioutil.ReadFile(keyPath)
if err != nil {
return nil, err
}
opts.PrivateKey = pk
} else if _, exists := q["private_key_path"]; exists {
// A possible default value has been cleared by setting this to an empty value:
// The private key might have expired, or falling back to SignBytes/MakeSignBytes
// is intentional such as for tests or involving a key stored in a HSM/TPM.
opts.PrivateKey = nil
}
return opts, nil
}
// Options sets options for constructing a *blob.Bucket backed by GCS.
type Options struct {
// GoogleAccessID represents the authorizer for SignedURL.
// Required to use SignedURL.
// See https://godoc.org/cloud.google.com/go/storage#SignedURLOptions.
GoogleAccessID string
// PrivateKey is the Google service account private key.
// Exactly one of PrivateKey or SignBytes must be non-nil to use SignedURL.
// See https://godoc.org/cloud.google.com/go/storage#SignedURLOptions.
PrivateKey []byte
// SignBytes is a function for implementing custom signing.
// Exactly one of PrivateKey, SignBytes, or MakeSignBytes must be non-nil to use SignedURL.
// See https://godoc.org/cloud.google.com/go/storage#SignedURLOptions.
SignBytes func([]byte) ([]byte, error)
// MakeSignBytes is a factory for functions that are being used in place of an empty SignBytes.
// If your implementation of 'SignBytes' needs a request context, set this instead.
MakeSignBytes func(requestCtx context.Context) SignBytesFunc
}
// SignBytesFunc is shorthand for the signature of Options.SignBytes.
type SignBytesFunc func([]byte) ([]byte, error)
// openBucket returns a GCS Bucket that communicates using the given HTTP client.
func openBucket(ctx context.Context, client *gcp.HTTPClient, bucketName string, opts *Options) (*bucket, error) {
if client == nil {
return nil, errors.New("gcsblob.OpenBucket: client is required")
}
if bucketName == "" {
return nil, errors.New("gcsblob.OpenBucket: bucketName is required")
}
clientOpts := []option.ClientOption{option.WithHTTPClient(useragent.HTTPClient(&client.Client, "blob"))}
if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" {
clientOpts = []option.ClientOption{
option.WithoutAuthentication(),
option.WithEndpoint("http://" + host + "/storage/v1/"),
option.WithHTTPClient(http.DefaultClient),
}
}
// We wrap the provided http.Client to add a Go CDK User-Agent.
c, err := storage.NewClient(ctx, clientOpts...)
if err != nil {
return nil, err
}
if opts == nil {
opts = &Options{}
}
return &bucket{name: bucketName, client: c, opts: opts}, nil
}
// OpenBucket returns a *blob.Bucket backed by an existing GCS bucket. See the
// package documentation for an example.
func OpenBucket(ctx context.Context, client *gcp.HTTPClient, bucketName string, opts *Options) (*blob.Bucket, error) {
drv, err := openBucket(ctx, client, bucketName, opts)
if err != nil {
return nil, err
}
return blob.NewBucket(drv), nil
}
// bucket represents a GCS bucket, which handles read, write and delete operations
// on objects within it.
type bucket struct {
name string
client *storage.Client
opts *Options
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// reader reads a GCS object. It implements driver.Reader.
type reader struct {
body io.ReadCloser
attrs driver.ReaderAttributes
raw *storage.Reader
}
func (r *reader) Read(p []byte) (int, error) {
return r.body.Read(p)
}
// Close closes the reader itself. It must be called when done reading.
func (r *reader) Close() error {
return r.body.Close()
}
func (r *reader) Attributes() *driver.ReaderAttributes {
return &r.attrs
}
func (r *reader) As(i interface{}) bool {
p, ok := i.(**storage.Reader)
if !ok {
return false
}
*p = r.raw
return true
}
func (b *bucket) ErrorCode(err error) gcerrors.ErrorCode {
if err == storage.ErrObjectNotExist {
return gcerrors.NotFound
}
if gerr, ok := err.(*googleapi.Error); ok {
switch gerr.Code {
case http.StatusForbidden:
return gcerrors.PermissionDenied
case http.StatusNotFound:
return gcerrors.NotFound
case http.StatusPreconditionFailed:
return gcerrors.FailedPrecondition
case http.StatusTooManyRequests:
return gcerrors.ResourceExhausted
}
}
return gcerrors.Unknown
}
func (b *bucket) Close() error {
return nil
}
// ListPaged implements driver.ListPaged.
func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) {
bkt := b.client.Bucket(b.name)
query := &storage.Query{
Prefix: escapeKey(opts.Prefix),
Delimiter: escapeKey(opts.Delimiter),
}
if opts.BeforeList != nil {
asFunc := func(i interface{}) bool {
p, ok := i.(**storage.Query)
if !ok {
return false
}
*p = query
return true
}
if err := opts.BeforeList(asFunc); err != nil {
return nil, err
}
}
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
iter := bkt.Objects(ctx, query)
pager := iterator.NewPager(iter, pageSize, string(opts.PageToken))
var objects []*storage.ObjectAttrs
nextPageToken, err := pager.NextPage(&objects)
if err != nil {
return nil, err
}
page := driver.ListPage{NextPageToken: []byte(nextPageToken)}
if len(objects) > 0 {
page.Objects = make([]*driver.ListObject, len(objects))
for i, obj := range objects {
toCopy := obj
asFunc := func(val interface{}) bool {
p, ok := val.(*storage.ObjectAttrs)
if !ok {
return false
}
*p = *toCopy
return true
}
if obj.Prefix == "" {
// Regular blob.
page.Objects[i] = &driver.ListObject{
Key: unescapeKey(obj.Name),
ModTime: obj.Updated,
Size: obj.Size,
MD5: obj.MD5,
AsFunc: asFunc,
}
} else {
// "Directory".
page.Objects[i] = &driver.ListObject{
Key: unescapeKey(obj.Prefix),
IsDir: true,
AsFunc: asFunc,
}
}
}
// GCS always returns "directories" at the end; sort them.
sort.Slice(page.Objects, func(i, j int) bool {
return page.Objects[i].Key < page.Objects[j].Key
})
}
return &page, nil
}
// As implements driver.As.
func (b *bucket) As(i interface{}) bool {
p, ok := i.(**storage.Client)
if !ok {
return false
}
*p = b.client
return true
}
// As implements driver.ErrorAs.
func (b *bucket) ErrorAs(err error, i interface{}) bool {
switch v := err.(type) {
case *googleapi.Error:
if p, ok := i.(**googleapi.Error); ok {
*p = v
return true
}
}
return false
}
// Attributes implements driver.Attributes.
func (b *bucket) Attributes(ctx context.Context, key string) (*driver.Attributes, error) {
key = escapeKey(key)
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
attrs, err := obj.Attrs(ctx)
if err != nil {
return nil, err
}
return &driver.Attributes{
CacheControl: attrs.CacheControl,
ContentDisposition: attrs.ContentDisposition,
ContentEncoding: attrs.ContentEncoding,
ContentLanguage: attrs.ContentLanguage,
ContentType: attrs.ContentType,
Metadata: attrs.Metadata,
ModTime: attrs.Updated,
Size: attrs.Size,
MD5: attrs.MD5,
AsFunc: func(i interface{}) bool {
p, ok := i.(*storage.ObjectAttrs)
if !ok {
return false
}
*p = *attrs
return true
},
}, nil
}
// NewRangeReader implements driver.NewRangeReader.
func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *driver.ReaderOptions) (driver.Reader, error) {
key = escapeKey(key)
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
// Add an extra level of indirection so that BeforeRead can replace obj
// if needed. For example, ObjectHandle.If returns a new ObjectHandle.
// Also, make the Reader lazily in case this replacement happens.
objp := &obj
makeReader := func() (*storage.Reader, error) {
return (*objp).NewRangeReader(ctx, offset, length)
}
var r *storage.Reader
var rerr error
madeReader := false
if opts.BeforeRead != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(***storage.ObjectHandle); ok && !madeReader {
*p = objp
return true
}
if p, ok := i.(**storage.Reader); ok {
if !madeReader {
r, rerr = makeReader()
madeReader = true
if r == nil {
return false
}
}
*p = r
return true
}
return false
}
if err := opts.BeforeRead(asFunc); err != nil {
return nil, err
}
}
if !madeReader {
r, rerr = makeReader()
}
if rerr != nil {
return nil, rerr
}
return &reader{
body: r,
attrs: driver.ReaderAttributes{
ContentType: r.Attrs.ContentType,
ModTime: r.Attrs.LastModified,
Size: r.Attrs.Size,
},
raw: r,
}, nil
}
// escapeKey does all required escaping for UTF-8 strings to work with GCS.
func escapeKey(key string) string {
return escape.HexEscape(key, func(r []rune, i int) bool {
switch {
// GCS doesn't handle these characters (determined via experimentation).
case r[i] == 10 || r[i] == 13:
return true
// For "../", escape the trailing slash.
case i > 1 && r[i] == '/' && r[i-1] == '.' && r[i-2] == '.':
return true
}
return false
})
}
// unescapeKey reverses escapeKey.
func unescapeKey(key string) string {
return escape.HexUnescape(key)
}
// NewTypedWriter implements driver.NewTypedWriter.
func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) {
key = escapeKey(key)
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
// Add an extra level of indirection so that BeforeWrite can replace obj
// if needed. For example, ObjectHandle.If returns a new ObjectHandle.
// Also, make the Writer lazily in case this replacement happens.
objp := &obj
makeWriter := func() *storage.Writer {
w := (*objp).NewWriter(ctx)
w.CacheControl = opts.CacheControl
w.ContentDisposition = opts.ContentDisposition
w.ContentEncoding = opts.ContentEncoding
w.ContentLanguage = opts.ContentLanguage
w.ContentType = contentType
w.ChunkSize = bufferSize(opts.BufferSize)
w.Metadata = opts.Metadata
w.MD5 = opts.ContentMD5
return w
}
var w *storage.Writer
if opts.BeforeWrite != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(***storage.ObjectHandle); ok && w == nil {
*p = objp
return true
}
if p, ok := i.(**storage.Writer); ok {
if w == nil {
w = makeWriter()
}
*p = w
return true
}
return false
}
if err := opts.BeforeWrite(asFunc); err != nil {
return nil, err
}
}
if w == nil {
w = makeWriter()
}
return w, nil
}
// CopyObjectHandles holds the ObjectHandles for the destination and source
// of a Copy. It is used by the BeforeCopy As hook.
type CopyObjectHandles struct {
Dst, Src *storage.ObjectHandle
}
// Copy implements driver.Copy.
func (b *bucket) Copy(ctx context.Context, dstKey, srcKey string, opts *driver.CopyOptions) error {
dstKey = escapeKey(dstKey)
srcKey = escapeKey(srcKey)
bkt := b.client.Bucket(b.name)
// Add an extra level of indirection so that BeforeCopy can replace the
// dst or src ObjectHandles if needed.
// Also, make the Copier lazily in case this replacement happens.
handles := CopyObjectHandles{
Dst: bkt.Object(dstKey),
Src: bkt.Object(srcKey),
}
makeCopier := func() *storage.Copier {
return handles.Dst.CopierFrom(handles.Src)
}
var copier *storage.Copier
if opts.BeforeCopy != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(**CopyObjectHandles); ok && copier == nil {
*p = &handles
return true
}
if p, ok := i.(**storage.Copier); ok {
if copier == nil {
copier = makeCopier()
}
*p = copier
return true
}
return false
}
if err := opts.BeforeCopy(asFunc); err != nil {
return err
}
}
if copier == nil {
copier = makeCopier()
}
_, err := copier.Run(ctx)
return err
}
// Delete implements driver.Delete.
func (b *bucket) Delete(ctx context.Context, key string) error {
key = escapeKey(key)
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
return obj.Delete(ctx)
}
func (b *bucket) SignedURL(ctx context.Context, key string, dopts *driver.SignedURLOptions) (string, error) {
numSigners := 0
if b.opts.PrivateKey != nil {
numSigners++
}
if b.opts.SignBytes != nil {
numSigners++
}
if b.opts.MakeSignBytes != nil {
numSigners++
}
if b.opts.GoogleAccessID == "" || numSigners != 1 {
return "", gcerr.New(gcerr.Unimplemented, nil, 1, "gcsblob: to use SignedURL, you must call OpenBucket with a valid Options.GoogleAccessID and exactly one of Options.PrivateKey, Options.SignBytes, or Options.MakeSignBytes")
}
key = escapeKey(key)
opts := &storage.SignedURLOptions{
Expires: time.Now().Add(dopts.Expiry),
Method: dopts.Method,
ContentType: dopts.ContentType,
GoogleAccessID: b.opts.GoogleAccessID,
PrivateKey: b.opts.PrivateKey,
SignBytes: b.opts.SignBytes,
}
if b.opts.MakeSignBytes != nil {
opts.SignBytes = b.opts.MakeSignBytes(ctx)
}
if dopts.BeforeSign != nil {
asFunc := func(i interface{}) bool {
v, ok := i.(**storage.SignedURLOptions)
if ok {
*v = opts
}
return ok
}
if err := dopts.BeforeSign(asFunc); err != nil {
return "", err
}
}
return storage.SignedURL(b.name, key, opts)
}
func bufferSize(size int) int {
if size == 0 {
return googleapi.DefaultUploadChunkSize
} else if size > 0 {
return size
}
return 0 // disable buffering
}
| [
"\"STORAGE_EMULATOR_HOST\"",
"\"STORAGE_EMULATOR_HOST\""
]
| []
| [
"STORAGE_EMULATOR_HOST"
]
| [] | ["STORAGE_EMULATOR_HOST"] | go | 1 | 0 | |
version.go | package w32version
import (
"errors"
"os"
"os/exec"
"strings"
)
type W32Version uint16
var UnknownWindowsVersion = errors.New("Unknown Windows version")
const (
WindowsVista = 0x0006
Windows7 = 0x0106
Windows8 = 0x0206
Windows8_1 = 0x0306
Windows10 = 0x0406
)
const versionPrefix = "[Version "
const versionSuffix = "]"
func GetVersion() (W32Version, error) {
cmd := os.Getenv("ComSpec")
out, err := exec.Command(cmd, "/c", "ver").Output()
if err != nil {
return 0, err
}
outStr := string(out)
start := strings.Index(outStr, versionPrefix)
if start == -1 {
return 0, UnknownWindowsVersion
}
outStr = outStr[start+len(versionPrefix):]
end := strings.Index(outStr, versionSuffix)
if end == -1 {
return 0, UnknownWindowsVersion
}
s := strings.Split(outStr[:end], ".")
switch {
case s[0] == "6" && s[1] == "0":
return WindowsVista, nil
case s[0] == "6" && s[1] == "1":
return Windows7, nil
case s[0] == "6" && s[1] == "2":
return Windows8, nil
case s[0] == "6" && s[1] == "3":
return Windows8_1, nil
case s[0] == "6" && s[1] == "4":
return Windows10, nil
default:
return 0, UnknownWindowsVersion
}
}
func (v W32Version) String() string {
switch v {
case WindowsVista:
return "Vista"
case Windows7:
return "7"
case Windows8:
return "8"
case Windows8_1:
return "8.1"
case Windows10:
return "10"
default:
panic(UnknownWindowsVersion)
}
}
| [
"\"ComSpec\""
]
| []
| [
"ComSpec"
]
| [] | ["ComSpec"] | go | 1 | 0 | |
docs/source/conf.py | # -*- coding: utf-8 -*-
#
# phantom documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 13 15:41:29 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from unittest.mock import MagicMock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxcontrib.bibtex',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = False
# Bibtex settings
bibtex_bibfiles = ['bibtex/cite.bib', 'bibtex/refs.bib']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'XDesign'
copyright = u'2016, UChicago Argonne, LLC'
author = u'Doga Gursoy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
# release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"collapse_navigation": False,
"display_version": True,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'xdesigndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'xdesign.tex', u'XDesign Documentation', copyright, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'xdesign', u'XDesign Documentation', [author], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc, 'xdesign', u'XDesign Documentation', author, copyright,
'Benchmarking tools for tomography.', 'Miscellaneous'
),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org', None)
}
# picked from http://read-the-docs.readthedocs.org/en/latest/faq.html
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
'numpy',
'matplotlib',
'matplotlib.pyplot',
'matplotlib.patches',
'matplotlib.path',
'matplotlib.patheffects',
'matplotlib.axis',
'matplotlib.collections',
'cached_property',
'scipy',
'scipy.stats',
'scipy.ndimage',
'scipy.spatial',
'cycler',
'phasepack',
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
tests/integration/policy/policy_test.go | // Copyright 2016-2020, Pulumi Corporation. All rights reserved.
package ints
import (
"encoding/json"
"fmt"
"os"
"strings"
"testing"
"time"
ptesting "github.com/pulumi/pulumi/sdk/v2/go/common/testing"
)
// TestPolicyWithConfig runs integration tests against the policy pack in the policy_pack_w_config
// directory using version 0.4.1-dev of the pulumi/policy sdk.
func TestPolicyWithConfig(t *testing.T) {
t.Skip("Skip test that is causing unrelated tests to fail - pulumi/pulumi#4149")
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
// Confirm we have credentials.
if os.Getenv("PULUMI_ACCESS_TOKEN") == "" {
t.Fatal("PULUMI_ACCESS_TOKEN not found, aborting tests.")
}
name, _ := e.RunCommand("pulumi", "whoami")
orgName := strings.TrimSpace(name)
// Pack and push a Policy Pack for the organization.
policyPackName := fmt.Sprintf("%s-%x", "test-policy-pack", time.Now().UnixNano())
e.ImportDirectory("policy_pack_w_config")
e.RunCommand("yarn", "install")
os.Setenv("TEST_POLICY_PACK", policyPackName)
// Publish the Policy Pack twice.
publishPolicyPackWithVersion(e, orgName, `"0.0.1"`)
publishPolicyPackWithVersion(e, orgName, `"0.0.2"`)
// Check the policy ls commands.
packsOutput, _ := e.RunCommand("pulumi", "policy", "ls", "--json")
var packs []policyPacksJSON
assertJSON(e, packsOutput, &packs)
groupsOutput, _ := e.RunCommand("pulumi", "policy", "group", "ls", "--json")
var groups []policyGroupsJSON
assertJSON(e, groupsOutput, &groups)
// Enable, Disable and then Delete the Policy Pack.
e.RunCommand("pulumi", "policy", "enable", fmt.Sprintf("%s/%s", orgName, policyPackName), "0.0.1")
// Validate Policy Pack Configuration.
e.RunCommand("pulumi", "policy", "validate-config", fmt.Sprintf("%s/%s", orgName, policyPackName),
"--config=configs/valid-config.json", "0.0.1")
// Valid config, but no version specified.
e.RunCommandExpectError("pulumi", "policy", "validate-config", fmt.Sprintf("%s/%s", orgName, policyPackName),
"--config=configs/config.json")
// Invalid configs
e.RunCommandExpectError("pulumi", "policy", "validate-config", fmt.Sprintf("%s/%s", orgName, policyPackName),
"--config=configs/invalid-config.json", "0.0.1")
// Invalid - missing required property.
e.RunCommandExpectError("pulumi", "policy", "validate-config", fmt.Sprintf("%s/%s", orgName, policyPackName),
"--config=configs/invalid-required-prop.json", "0.0.1")
// Required config flag not present.
e.RunCommandExpectError("pulumi", "policy", "validate-config", fmt.Sprintf("%s/%s", orgName, policyPackName))
e.RunCommandExpectError("pulumi", "policy", "validate-config", fmt.Sprintf("%s/%s", orgName, policyPackName),
"--config", "0.0.1")
// Enable Policy Pack with Configuration.
e.RunCommand("pulumi", "policy", "enable", fmt.Sprintf("%s/%s", orgName, policyPackName),
"--config=configs/valid-config.json", "0.0.1")
e.RunCommandExpectError("pulumi", "policy", "enable", fmt.Sprintf("%s/%s", orgName, policyPackName),
"--config=configs/invalid-config.json", "0.0.1")
// Disable Policy Pack specifying version.
e.RunCommand("pulumi", "policy", "disable", fmt.Sprintf("%s/%s", orgName, policyPackName), "--version=0.0.1")
// Enable and Disable without specifying the version number.
e.RunCommand("pulumi", "policy", "enable", fmt.Sprintf("%s/%s", orgName, policyPackName), "latest")
e.RunCommand("pulumi", "policy", "disable", fmt.Sprintf("%s/%s", orgName, policyPackName))
e.RunCommand("pulumi", "policy", "rm", fmt.Sprintf("%s/%s", orgName, policyPackName), "0.0.1")
e.RunCommand("pulumi", "policy", "rm", fmt.Sprintf("%s/%s", orgName, policyPackName), "all")
}
// TestPolicyWithoutConfig runs integration tests against the policy pack in the policy_pack_w_config
// directory. This tests against version 0.4.0 of the pulumi/policy sdk, prior to policy config being supported.
func TestPolicyWithoutConfig(t *testing.T) {
t.Skip("Skip test that is causing unrelated tests to fail - pulumi/pulumi#4149")
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
// Confirm we have credentials.
if os.Getenv("PULUMI_ACCESS_TOKEN") == "" {
t.Fatal("PULUMI_ACCESS_TOKEN not found, aborting tests.")
}
name, _ := e.RunCommand("pulumi", "whoami")
orgName := strings.TrimSpace(name)
// Pack and push a Policy Pack for the organization.
policyPackName := fmt.Sprintf("%s-%x", "test-policy-pack", time.Now().UnixNano())
e.ImportDirectory("policy_pack_wo_config")
e.RunCommand("yarn", "install")
os.Setenv("TEST_POLICY_PACK", policyPackName)
// Publish the Policy Pack twice.
e.RunCommand("pulumi", "policy", "publish", orgName)
e.RunCommand("pulumi", "policy", "publish", orgName)
// Check the policy ls commands.
packsOutput, _ := e.RunCommand("pulumi", "policy", "ls", "--json")
var packs []policyPacksJSON
assertJSON(e, packsOutput, &packs)
groupsOutput, _ := e.RunCommand("pulumi", "policy", "group", "ls", "--json")
var groups []policyGroupsJSON
assertJSON(e, groupsOutput, &groups)
// Enable, Disable and then Delete the Policy Pack.
e.RunCommand("pulumi", "policy", "enable", fmt.Sprintf("%s/%s", orgName, policyPackName), "1")
e.RunCommand("pulumi", "policy", "disable", fmt.Sprintf("%s/%s", orgName, policyPackName), "--version=1")
// Enable and Disable without specifying the version number.
e.RunCommand("pulumi", "policy", "enable", fmt.Sprintf("%s/%s", orgName, policyPackName), "latest")
e.RunCommand("pulumi", "policy", "disable", fmt.Sprintf("%s/%s", orgName, policyPackName))
e.RunCommand("pulumi", "policy", "rm", fmt.Sprintf("%s/%s", orgName, policyPackName), "1")
e.RunCommand("pulumi", "policy", "rm", fmt.Sprintf("%s/%s", orgName, policyPackName), "all")
}
type policyPacksJSON struct {
Name string `json:"name"`
Versions []string `json:"versions"`
}
type policyGroupsJSON struct {
Name string `json:"name"`
Default bool `json:"default"`
NumPolicyPacks int `json:"numPolicyPacks"`
NumStacks int `json:"numStacks"`
}
func assertJSON(e *ptesting.Environment, out string, respObj interface{}) {
err := json.Unmarshal([]byte(out), &respObj)
if err != nil {
e.Errorf("unable to unmarshal %v", out)
}
}
// publishPolicyPackWithVersion updates the version in package.json so we can
// dynamically publish different versions for testing.
func publishPolicyPackWithVersion(e *ptesting.Environment, orgName, version string) {
cmd := fmt.Sprintf(`sed 's/{ policyVersion }/%s/g' package.json.tmpl | tee package.json`, version)
e.RunCommand("bash", "-c", cmd)
e.RunCommand("pulumi", "policy", "publish", orgName)
}
| [
"\"PULUMI_ACCESS_TOKEN\"",
"\"PULUMI_ACCESS_TOKEN\""
]
| []
| [
"PULUMI_ACCESS_TOKEN"
]
| [] | ["PULUMI_ACCESS_TOKEN"] | go | 1 | 0 | |
SCons/Variables/PathVariable.py | # MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Option type for path Variables.
This file defines an option type for SCons implementing path settings.
To be used whenever a user-specified path override should be allowed.
Arguments to PathVariable are:
option-name = name of this option on the command line (e.g. "prefix")
option-help = help string for option
option-dflt = default value for this option
validator = [optional] validator for option value. Predefined are:
PathAccept -- accepts any path setting; no validation
PathIsDir -- path must be an existing directory
PathIsDirCreate -- path must be a dir; will create
PathIsFile -- path must be a file
PathExists -- path must exist (any type) [default]
The validator is a function that is called and which should return
True or False to indicate if the path is valid. The arguments
to the validator function are: (key, val, env). The key is the
name of the option, the val is the path specified for the option,
and the env is the env to which the Options have been added.
Usage example::
Examples:
prefix=/usr/local
opts = Variables()
opts = Variables()
opts.Add(PathVariable('qtdir',
'where the root of Qt is installed',
qtdir, PathIsDir))
opts.Add(PathVariable('qt_includes',
'where the Qt includes are installed',
'$qtdir/includes', PathIsDirCreate))
opts.Add(PathVariable('qt_libraries',
'where the Qt library is installed',
'$qtdir/lib'))
"""
__all__ = ['PathVariable',]
import os
import os.path
import SCons.Errors
class _PathVariableClass:
@staticmethod
def PathAccept(key, val, env):
"""Accepts any path, no checking done."""
pass
@staticmethod
def PathIsDir(key, val, env):
"""Validator to check if Path is a directory."""
if not os.path.isdir(val):
if os.path.isfile(val):
m = 'Directory path for option %s is a file: %s'
else:
m = 'Directory path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
@staticmethod
def PathIsDirCreate(key, val, env):
"""Validator to check if Path is a directory,
creating it if it does not exist."""
if os.path.isfile(val):
m = 'Path for option %s is a file, not a directory: %s'
raise SCons.Errors.UserError(m % (key, val))
if not os.path.isdir(val):
os.makedirs(val)
@staticmethod
def PathIsFile(key, val, env):
"""Validator to check if Path is a file"""
if not os.path.isfile(val):
if os.path.isdir(val):
m = 'File path for option %s is a directory: %s'
else:
m = 'File path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
@staticmethod
def PathExists(key, val, env):
"""Validator to check if Path exists"""
if not os.path.exists(val):
m = 'Path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def __call__(self, key, help, default, validator=None):
"""
The input parameters describe a 'path list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
The 'default' option specifies the default path to use if the
user does not specify an override with this option.
validator is a validator, see this file for examples
"""
if validator is None:
validator = self.PathExists
if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key):
return (key, '%s ( /path/to/%s )' % (help, key[0]), default,
validator, None)
else:
return (key, '%s ( /path/to/%s )' % (help, key), default,
validator, None)
PathVariable = _PathVariableClass()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| []
| []
| []
| [] | [] | python | null | null | null |
pkg/docker/docker.go | package docker
import (
"bufio"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"path"
"strings"
"time"
dockerConfig "github.com/docker/cli/cli/config"
configTypes "github.com/docker/cli/cli/config/types"
"github.com/docker/cli/cli/streams"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
docker "github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/go-connections/nat"
hydroformDocker "github.com/kyma-incubator/hydroform/function/pkg/docker"
"github.com/kyma-project/cli/internal/minikube"
"github.com/kyma-project/cli/pkg/step"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
defaultRegistry = "index.docker.io"
)
type dockerClient struct {
*docker.Client
}
type dockerWrapper struct {
Docker Client
}
type kymaDockerClient struct {
Docker Client
}
//go:generate mockery --name Client
type Client interface {
ArchiveDirectory(srcPath string, options *archive.TarOptions) (io.ReadCloser, error)
ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig,
platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error)
ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error
ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error
ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error)
ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error)
Info(ctx context.Context) (types.Info, error)
NegotiateAPIVersion(ctx context.Context)
}
type KymaClient interface {
PushKymaInstaller(image string, currentStep step.Step) error
BuildKymaInstaller(localSrcPath, imageName string) error
}
// Wrapper provides helper functions
type Wrapper interface {
PullImageAndStartContainer(ctx context.Context, opts ContainerRunOpts) (string, error)
ContainerFollowRun(ctx context.Context, containerID string) error
Stop(ctx context.Context, containerID string, log func(...interface{})) func()
IsDockerDesktopOS(ctx context.Context) (bool, error)
}
// ErrorMessage is used to parse error messages coming from Docker
type ErrorMessage struct {
Error string
}
type ContainerRunOpts struct {
ContainerName string
Envs []string
Image string
Mounts []mount.Mount
NetworkMode string
Ports map[string]string
}
//NewClient creates docker client using docker environment of the OS
func NewClient() (Client, error) {
dClient, err := docker.NewClientWithOpts(docker.FromEnv)
if err != nil {
return nil, err
}
return &dockerClient{
dClient,
}, nil
}
//NewMinikubeClient creates docker client for minikube docker-env
func NewMinikubeClient(verbosity bool, profile string, timeout time.Duration) (Client, error) {
dClient, err := minikube.DockerClient(verbosity, profile, timeout)
if err != nil {
return nil, err
}
return &dockerClient{
dClient,
}, nil
}
func NewKymaClient(isLocal bool, verbosity bool, profile string, timeout time.Duration) (KymaClient, error) {
var err error
var dc Client
if isLocal {
dc, err = NewMinikubeClient(verbosity, profile, timeout)
} else {
dc, err = NewClient()
}
return &kymaDockerClient{
Docker: dc,
}, err
}
// NewWrapper creates a new wrapper around the docker client with helper funtions
func NewWrapper() (Wrapper, error) {
dClient, err := NewClient()
if err != nil {
return nil, err
}
return &dockerWrapper{
Docker: dClient,
}, nil
}
func (d *dockerClient) ArchiveDirectory(srcPath string, options *archive.TarOptions) (io.ReadCloser, error) {
return archive.TarWithOptions(srcPath, &archive.TarOptions{})
}
func (k *kymaDockerClient) BuildKymaInstaller(localSrcPath, imageName string) error {
reader, err := k.Docker.ArchiveDirectory(localSrcPath, &archive.TarOptions{})
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(300)*time.Second)
defer cancel()
k.Docker.NegotiateAPIVersion(ctx)
args := make(map[string]*string)
_, err = k.Docker.ImageBuild(
ctx,
reader,
types.ImageBuildOptions{
Tags: []string{strings.TrimSpace(string(imageName))},
SuppressOutput: true,
Remove: true,
Dockerfile: path.Join("tools", "kyma-installer", "kyma.Dockerfile"),
BuildArgs: args,
},
)
if err != nil {
return err
}
return nil
}
func (k *kymaDockerClient) PushKymaInstaller(image string, currentStep step.Step) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(300)*time.Second)
defer cancel()
k.Docker.NegotiateAPIVersion(ctx)
domain, _ := splitDockerDomain(image)
auth, err := resolve(domain)
if err != nil {
return err
}
encodedJSON, err := json.Marshal(auth)
if err != nil {
return err
}
authStr := base64.URLEncoding.EncodeToString(encodedJSON)
currentStep.LogInfof("Pushing Docker image: '%s'", image)
pusher, err := k.Docker.ImagePush(ctx, image, types.ImagePushOptions{RegistryAuth: authStr})
if err != nil {
return err
}
defer pusher.Close()
var errorMessage ErrorMessage
buffIOReader := bufio.NewReader(pusher)
for {
streamBytes, err := buffIOReader.ReadBytes('\n')
if err == io.EOF {
break
}
err = json.Unmarshal(streamBytes, &errorMessage)
if err != nil {
return err
}
if errorMessage.Error != "" {
if strings.Contains(errorMessage.Error, "unauthorized") || strings.Contains(errorMessage.Error, "requested access to the resource is denied") {
return fmt.Errorf("missing permissions to push Docker image: %s\nPlease run `docker login` to authenticate", errorMessage.Error)
}
return fmt.Errorf("failed to push Docker image: %s", errorMessage.Error)
}
}
return nil
}
// PullImageAndStartContainer creates, pulls and starts a container
func (w *dockerWrapper) PullImageAndStartContainer(ctx context.Context, opts ContainerRunOpts) (string, error) {
config := &container.Config{
Env: opts.Envs,
ExposedPorts: portSet(opts.Ports),
Image: opts.Image,
}
hostConfig := &container.HostConfig{
PortBindings: portMap(opts.Ports),
AutoRemove: true,
Mounts: opts.Mounts,
NetworkMode: container.NetworkMode(opts.NetworkMode),
}
var r io.ReadCloser
r, err := w.Docker.ImagePull(ctx, config.Image, types.ImagePullOptions{})
if err != nil {
return "", err
}
defer r.Close()
streamer := streams.NewOut(os.Stdout)
if err = jsonmessage.DisplayJSONMessagesToStream(r, streamer, nil); err != nil {
return "", err
}
body, err := w.Docker.ContainerCreate(ctx, config, hostConfig, nil, nil, opts.ContainerName)
if err != nil {
return "", err
}
err = w.Docker.ContainerStart(ctx, body.ID, types.ContainerStartOptions{})
if err != nil {
return "", err
}
return body.ID, nil
}
// ContainerFollowRun attaches a connection to a container and logs the output
func (w *dockerWrapper) ContainerFollowRun(ctx context.Context, containerID string) error {
return hydroformDocker.FollowRun(ctx, w.Docker, containerID)
}
// Stop stops a container with additional logging
func (w *dockerWrapper) Stop(ctx context.Context, containerID string, log func(...interface{})) func() {
return hydroformDocker.Stop(ctx, w.Docker, containerID, log)
}
func (w *dockerWrapper) IsDockerDesktopOS(ctx context.Context) (bool, error) {
info, err := w.Docker.Info(ctx)
if err != nil {
return false, err
}
return info.OperatingSystem == "Docker Desktop", nil
}
func splitDockerDomain(name string) (domain, remainder string) {
i := strings.IndexRune(name, '/')
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
domain, remainder = defaultRegistry, name
} else {
domain, remainder = name[:i], name[i+1:]
}
return
}
// resolve finds the Docker credentials to push an image.
func resolve(registry string) (*configTypes.AuthConfig, error) {
cf, err := dockerConfig.Load(os.Getenv("DOCKER_CONFIG"))
if err != nil {
return nil, err
}
if registry == defaultRegistry {
registry = "https://" + defaultRegistry + "/v1/"
}
cfg, err := cf.GetAuthConfig(registry)
if err != nil {
return nil, err
}
empty := configTypes.AuthConfig{}
if cfg == empty {
return &empty, nil
}
return &configTypes.AuthConfig{
Username: cfg.Username,
Password: cfg.Password,
Auth: cfg.Auth,
IdentityToken: cfg.IdentityToken,
RegistryToken: cfg.RegistryToken,
}, nil
}
func portSet(ports map[string]string) nat.PortSet {
portSet := nat.PortSet{}
for from := range ports {
portSet[nat.Port(from)] = struct{}{}
}
return portSet
}
func portMap(ports map[string]string) nat.PortMap {
portMap := nat.PortMap{}
for from, to := range ports {
portMap[nat.Port(from)] = []nat.PortBinding{
{
HostPort: to,
},
}
}
return portMap
}
| [
"\"DOCKER_CONFIG\""
]
| []
| [
"DOCKER_CONFIG"
]
| [] | ["DOCKER_CONFIG"] | go | 1 | 0 | |
telegram/main.go | package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"strings"
)
// Create a struct that mimics the webhook response body
// https://core.telegram.org/bots/api#update
type webhookReqBody struct {
Message struct {
Text string `json:"text"`
Chat struct {
ID int64 `json:"id"`
} `json:"chat"`
} `json:"message"`
}
// This handler is called everytime telegram sends us a webhook event
func Handler(res http.ResponseWriter, req *http.Request) {
// First, decode the JSON response body
body := &webhookReqBody{}
if err := json.NewDecoder(req.Body).Decode(body); err != nil {
fmt.Println("could not decode request body", err)
return
}
// Check if the message contains the word "marco"
// if not, return without doing anything
if !strings.Contains(strings.ToLower(body.Message.Text), "marco") {
return
}
// If the text contains marco, call the `sayPolo` function, which
// is defined below
if err := sayPolo(body.Message.Chat.ID); err != nil {
fmt.Println("error in sending reply:", err)
return
}
// log a confirmation message if the message is sent successfully
fmt.Println("reply sent")
}
//The below code deals with the process of sending a response message
// to the user
// Create a struct to conform to the JSON body
// of the send message request
// https://core.telegram.org/bots/api#sendmessage
type sendMessageReqBody struct {
ChatID int64 `json:"chat_id"`
Text string `json:"text"`
}
// sayPolo takes a chatID and sends "polo" to them
func sayPolo(chatID int64) error {
// Create the request body struct
reqBody := &sendMessageReqBody{
ChatID: chatID,
Text: "Polo!!",
}
// Create the JSON body from the struct
reqBytes, err := json.Marshal(reqBody)
if err != nil {
return err
}
// Send a post request with your token
token := os.Getenv("TELEGRAM_BOT")
url := "https://api.telegram.org/bot" + token + "/sendMessage"
res, err := http.Post(url, "application/json", bytes.NewBuffer(reqBytes))
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return errors.New("unexpected status" + res.Status)
}
return nil
}
// Finally, the main funtion starts our server on port 3000
func main() {
http.ListenAndServe(":3010", http.HandlerFunc(Handler))
}
| [
"\"TELEGRAM_BOT\""
]
| []
| [
"TELEGRAM_BOT"
]
| [] | ["TELEGRAM_BOT"] | go | 1 | 0 | |
Caesar Cipher/caesar-cipher.java | import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.function.*;
import java.util.regex.*;
import java.util.stream.*;
import static java.util.stream.Collectors.joining;
import static java.util.stream.Collectors.toList;
class Result {
/*
* Complete the 'caesarCipher' function below.
*
* The function is expected to return a STRING.
* The function accepts following parameters:
* 1. STRING s
* 2. INTEGER k
*/
public static String caesarCipher(String s, int k) {
// Write your code here
char[] arr = s.toCharArray();
for(int i=0; i< arr.length; i++){
boolean check = Character.isAlphabetic(arr[i]);
boolean isUpper = Character.isUpperCase(arr[i]);
if(check){
k = k % 26;
if(Character.isAlphabetic((char)((int)arr[i]) + k)){
if(isUpper && !Character.isUpperCase((char) (((int) arr[i]) + k))){
arr[i] = (char) (((int) arr[i]) + k - 26);
}else{
arr[i] = (char) (((int) arr[i]) + k);
}
}else{
arr[i] = (char) (((int) arr[i]) + k - 26);
}
}
}
String result = String.valueOf(arr);
return result;
}
}
public class Solution {
public static void main(String[] args) throws IOException {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in));
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int n = Integer.parseInt(bufferedReader.readLine().trim());
String s = bufferedReader.readLine();
int k = Integer.parseInt(bufferedReader.readLine().trim());
String result = Result.caesarCipher(s, k);
bufferedWriter.write(result);
bufferedWriter.newLine();
bufferedReader.close();
bufferedWriter.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
main.go | package main
import (
"log"
"os"
"time"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
"github.com/joho/godotenv"
"github.com/shirou/gopsutil/v3/net"
"github.com/shirou/gopsutil/v3/process"
)
func main() {
// Ignore any errors when loading the .env file just in case it doesn't exist
_ = godotenv.Load()
client := influxdb2.NewClient(os.Getenv("INFLUX_API"), os.Getenv("INFLUX_TOKEN"))
defer client.Close()
processes, err := process.Processes()
if err != nil {
panic(err)
}
log.Printf("Logging RSS and CPU from %d processes", len(processes))
// get non-blocking write client
writeAPI := client.WriteAPI(os.Getenv("INFLUX_EMAIL"), os.Getenv("INFLUX_BUCKET"))
for _, process := range processes {
name, err := process.Name()
if err != nil {
log.Printf("ERROR: Unable to get process name for PID: %d: %v", process.Pid, err)
continue
}
memory, err := process.MemoryInfo()
if err != nil {
log.Printf("ERROR: Unable to get memory info for process %s: %v", name, err)
continue
}
cpuTime, err := process.CPUPercent()
if err != nil {
log.Printf("ERROR: Unable to get cpu time for process %s: %v", name, err)
continue
}
p := influxdb2.NewPointWithMeasurement("host_process").
AddTag("host", os.Getenv("HOST")).
AddTag("process", name).
AddField("rss_mb", float64(memory.RSS)/1024/1024).
AddField("cpu_time_percent", cpuTime).
SetTime(time.Now())
// write point asynchronously
writeAPI.WritePoint(p)
}
log.Println("Logging Network Traffic")
iocountersBefore, err := net.IOCounters(false)
if err != nil {
panic(err)
}
lastSentBefore := iocountersBefore[0].BytesSent
lastRecvBefore := iocountersBefore[0].BytesRecv
time.Sleep(4 * time.Second)
iocountersAfter, err := net.IOCounters(false)
if err != nil {
panic(err)
}
lastSentAfter := iocountersAfter[0].BytesSent
lastRecvAfter := iocountersAfter[0].BytesRecv
sentDelta := lastSentAfter - lastSentBefore
recvDelta := lastRecvAfter - lastRecvBefore
sentPerSec := float64(sentDelta) / 4
recvPerSec := float64(recvDelta) / 4
log.Printf("Network in %f B/s; Network out %f B/s", recvPerSec, sentPerSec)
p := influxdb2.NewPointWithMeasurement("host_network").
AddTag("host", os.Getenv("HOST")).
AddField("net_sent", sentPerSec).
AddField("net_recv", recvPerSec).
SetTime(time.Now())
writeAPI.WritePoint(p)
// Flush writes
writeAPI.Flush()
log.Printf("Successfully pushed metrics to influxdb for %d processes", len(processes))
}
| [
"\"INFLUX_API\"",
"\"INFLUX_TOKEN\"",
"\"INFLUX_EMAIL\"",
"\"INFLUX_BUCKET\"",
"\"HOST\"",
"\"HOST\""
]
| []
| [
"HOST",
"INFLUX_BUCKET",
"INFLUX_TOKEN",
"INFLUX_API",
"INFLUX_EMAIL"
]
| [] | ["HOST", "INFLUX_BUCKET", "INFLUX_TOKEN", "INFLUX_API", "INFLUX_EMAIL"] | go | 5 | 0 | |
recipe/fix-version.py | from pathlib import Path
import os
SRC_DIR = Path(os.environ["SRC_DIR"])
SETUP_PY = SRC_DIR / "setup.py"
UTF8 = dict(encoding="utf-8")
PKG_VERSION = os.environ["PKG_VERSION"]
SETUP_PY.write_text(
SETUP_PY.read_text(**UTF8).replace(
'''version='0.0.0',''',
f'''version='{PKG_VERSION}','''
),
**UTF8
)
print("added", PKG_VERSION, "to", SETUP_PY, ":")
print(SETUP_PY.read_text(**UTF8))
| []
| []
| [
"PKG_VERSION",
"SRC_DIR"
]
| [] | ["PKG_VERSION", "SRC_DIR"] | python | 2 | 0 | |
autotest/pymod/gdaltest.py | # -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Python Library supporting GDAL/OGR Test Suite
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <[email protected]>
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import contextlib
import os
import sys
import time
from osgeo import gdal
from osgeo import osr
cur_name = 'default'
success_counter = 0
failure_counter = 0
expected_failure_counter = 0
blow_counter = 0
skip_counter = 0
failure_summary = []
reason = None
count_skipped_tests_download = 0
count_skipped_tests_slow = 0
start_time = None
end_time = None
jp2kak_drv = None
jpeg2000_drv = None
jp2ecw_drv = None
jp2mrsid_drv = None
jp2openjpeg_drv = None
jp2kak_drv_unregistered = False
jpeg2000_drv_unregistered = False
jp2ecw_drv_unregistered = False
jp2mrsid_drv_unregistered = False
jp2openjpeg_drv_unregistered = False
from sys import version_info
if version_info >= (3,0,0):
import gdaltest_python3 as gdaltestaux
else:
import gdaltest_python2 as gdaltestaux
# Process commandline arguments for stuff like --debug, --locale, --config
argv = gdal.GeneralCmdLineProcessor( sys.argv )
###############################################################################
def setup_run( name ):
if 'APPLY_LOCALE' in os.environ:
import locale
locale.setlocale(locale.LC_ALL, '')
global cur_name
cur_name = name
###############################################################################
def run_tests( test_list ):
global success_counter, failure_counter, expected_failure_counter, blow_counter, skip_counter
global reason, failure_summary, cur_name
global start_time, end_time
set_time = start_time is None
if set_time:
start_time = time.time()
had_errors_this_script = 0
for test_item in test_list:
if test_item is None:
continue
try:
(func, name) = test_item
if func.__name__[:4] == 'test':
outline = ' TEST: ' + func.__name__[4:] + ': ' + name + ' ... '
else:
outline = ' TEST: ' + func.__name__ + ': ' + name + ' ... '
except:
func = test_item
name = func.__name__
outline = ' TEST: ' + name + ' ... '
sys.stdout.write( outline )
sys.stdout.flush()
reason = None
result = run_func(func)
if result[:4] == 'fail':
if had_errors_this_script == 0:
failure_summary.append( 'Script: ' + cur_name )
had_errors_this_script = 1
failure_summary.append( outline + result )
if reason is not None:
failure_summary.append( ' ' + reason )
if reason is not None:
print((' ' + reason))
if result == 'success':
success_counter = success_counter + 1
elif result == 'expected_fail':
expected_failure_counter = expected_failure_counter + 1
elif result == 'fail':
failure_counter = failure_counter + 1
elif result == 'skip':
skip_counter = skip_counter + 1
elif result == 'fail (blowup)':
blow_counter = blow_counter + 1
else:
failure_counter = failure_counter + 1
print('Unexpected return value: %s' % result)
if had_errors_this_script == 0:
failure_summary.append( 'Script: ' + cur_name )
had_errors_this_script = 1
failure_summary.append( outline + result + ' (unexpected value)' )
if reason is not None:
failure_summary.append( ' ' + reason )
if set_time:
end_time = time.time()
###############################################################################
def get_lineno_2framesback( frames ):
try:
import inspect
frame = inspect.currentframe()
while frames > 0:
frame = frame.f_back
frames = frames-1
return frame.f_lineno
except:
return -1
###############################################################################
def post_reason( msg, frames=2 ):
lineno = get_lineno_2framesback( frames )
global reason
if lineno >= 0:
reason = 'line %d: %s' % (lineno, msg)
else:
reason = msg
###############################################################################
def summarize():
global count_skipped_tests_download, count_skipped_tests_slow
global success_counter, failure_counter, blow_counter, skip_counter
global cur_name
global start_time, end_time
print('')
if cur_name is not None:
print('Test Script: %s' % cur_name)
print('Succeeded: %d' % success_counter)
print('Failed: %d (%d blew exceptions)' \
% (failure_counter+blow_counter, blow_counter))
print('Skipped: %d' % skip_counter)
print('Expected fail:%d' % expected_failure_counter)
if start_time is not None:
duration = end_time - start_time
if duration >= 60:
print('Duration: %02dm%02.1fs' % (duration / 60., duration % 60.))
else:
print('Duration: %02.2fs' % duration)
if count_skipped_tests_download != 0:
print('As GDAL_DOWNLOAD_TEST_DATA environment variable is not defined, %d tests relying on data to downloaded from the Web have been skipped' % count_skipped_tests_download)
if count_skipped_tests_slow != 0:
print('As GDAL_RUN_SLOW_TESTS environment variable is not defined, %d "slow" tests have been skipped' % count_skipped_tests_slow)
print('')
sys.path.append( 'gcore' )
sys.path.append( '../gcore' )
import testnonboundtoswig
# Do it twice to ensure that cleanup routines properly do their jobs
for i in range(2):
testnonboundtoswig.OSRCleanup()
testnonboundtoswig.GDALDestroyDriverManager()
testnonboundtoswig.OGRCleanupAll()
return failure_counter + blow_counter
###############################################################################
def run_all( dirlist, run_as_external = False ):
global start_time, end_time
global cur_name
start_time = time.time()
for dir_name in dirlist:
files = os.listdir(dir_name)
old_path = sys.path
# We prepend '.' rather than append it, so that "import rasterio"
# imports our rasterio.py and not another famous external package.
sys.path = ['.'] + sys.path
for file in files:
if not file[-3:] == '.py':
continue
module = file[:-3]
try:
wd = os.getcwd()
os.chdir( dir_name )
# Even try to import as module in run_as_external case
# so as to be able to detect ImportError and skip them
exec("import " + module)
if run_as_external:
exec("%s.gdaltest_list" % module)
python_exe = sys.executable
if sys.platform == 'win32':
python_exe = python_exe.replace('\\', '/')
print('Running %s/%s...' % (dir_name,file))
#ret = runexternal(python_exe + ' ' + file, display_live_on_parent_stdout = True)
if 'GDALTEST_ASAN_OPTIONS' in os.environ:
if 'ASAN_OPTIONS' in os.environ:
backup_asan_options = os.environ['ASAN_OPTIONS']
else:
backup_asan_options = None
os.environ['ASAN_OPTIONS'] = os.environ['GDALTEST_ASAN_OPTIONS']
ret = runexternal(python_exe + """ -c "import %s; import sys; sys.path.append('../pymod'); import gdaltest; gdaltest.run_tests( %s.gdaltest_list ); gdaltest.summarize()" """ % (module, module) , display_live_on_parent_stdout = True)
if 'GDALTEST_ASAN_OPTIONS' in os.environ:
if backup_asan_options is None:
del os.environ['ASAN_OPTIONS']
else:
os.environ['ASAN_OPTIONS'] = backup_asan_options
global success_counter, failure_counter, failure_summary
if ret.find('Failed: 0') < 0:
failure_counter += 1
failure_summary.append( dir_name + '/' + file )
else:
success_counter += 1
else:
try:
print('Running tests from %s/%s' % (dir_name,file))
setup_run( '%s/%s' % (dir_name,file) )
exec("run_tests( " + module + ".gdaltest_list)")
except:
#import traceback
#traceback.print_exc(file=sys.stderr)
pass
os.chdir( wd )
except:
os.chdir( wd )
print('... failed to load %s ... skipping.' % file)
import traceback
traceback.print_exc()
# We only add the tool directory to the python path long enough
# to load the tool files.
sys.path = old_path
end_time = time.time()
cur_name = None
if len(failure_summary) > 0:
print('')
print(' ------------ Failures ------------')
for item in failure_summary:
print(item)
print(' ----------------------------------')
###############################################################################
def clean_tmp():
all_files = os.listdir('tmp')
for file in all_files:
if file == 'CVS' or file == 'do-not-remove':
continue
try:
os.remove( 'tmp/' + file )
except:
pass
return 'success'
###############################################################################
def testCreateCopyInterruptCallback(pct, message, user_data):
if pct > 0.5:
return 0 # to stop
else:
return 1 # to continue
###############################################################################
class GDALTest:
def __init__(self, drivername, filename, band, chksum,
xoff = 0, yoff = 0, xsize = 0, ysize = 0, options = [],
filename_absolute = 0, chksum_after_reopening = None, open_options = None ):
self.driver = None
self.drivername = drivername
self.filename = filename
self.filename_absolute = filename_absolute
self.band = band
self.chksum = chksum
if chksum_after_reopening is not None:
if type(chksum_after_reopening) == type([]):
self.chksum_after_reopening = chksum_after_reopening
else:
self.chksum_after_reopening = [ chksum_after_reopening ]
elif chksum is None:
self.chksum_after_reopening = None
else:
self.chksum_after_reopening = [ chksum ]
self.xoff = xoff
self.yoff = yoff
self.xsize = xsize
self.ysize = ysize
self.options = options
self.open_options = open_options
def testDriver(self):
if self.driver is None:
self.driver = gdal.GetDriverByName( self.drivername )
if self.driver is None:
post_reason( self.drivername + ' driver not found!' )
return 'fail'
return 'success'
def testOpen(self, check_prj = None, check_gt = None, gt_epsilon = None, \
check_stat = None, check_approx_stat = None, \
stat_epsilon = None, skip_checksum = None):
"""check_prj - projection reference, check_gt - geotransformation
matrix (tuple), gt_epsilon - geotransformation tolerance,
check_stat - band statistics (tuple), stat_epsilon - statistics
tolerance."""
if self.testDriver() == 'fail':
return 'skip'
if self.filename_absolute:
wrk_filename = self.filename
else:
wrk_filename = 'data/' + self.filename
if self.open_options:
ds = gdal.OpenEx( wrk_filename, gdal.OF_RASTER, open_options = self.open_options )
else:
ds = gdal.Open( wrk_filename, gdal.GA_ReadOnly )
if ds is None:
post_reason( 'Failed to open dataset: ' + wrk_filename )
return 'fail'
if ds.GetDriver().ShortName != gdal.GetDriverByName( self.drivername ).ShortName:
post_reason( 'The driver of the returned dataset is %s instead of %s.' % ( ds.GetDriver().ShortName, self.drivername ) )
return 'fail'
if self.xsize == 0 and self.ysize == 0:
self.xsize = ds.RasterXSize
self.ysize = ds.RasterYSize
# Do we need to check projection?
if check_prj is not None:
new_prj = ds.GetProjection()
src_osr = osr.SpatialReference()
src_osr.SetFromUserInput( check_prj )
new_osr = osr.SpatialReference( wkt=new_prj )
if not src_osr.IsSame(new_osr):
print('')
print('old = %s' % src_osr.ExportToPrettyWkt())
print('new = %s' % new_osr.ExportToPrettyWkt())
post_reason( 'Projections differ' )
return 'fail'
# Do we need to check geotransform?
if check_gt:
# Default to 100th of pixel as our test value.
if gt_epsilon is None:
gt_epsilon = (abs(check_gt[1])+abs(check_gt[2])) / 100.0
new_gt = ds.GetGeoTransform()
for i in range(6):
if abs(new_gt[i]-check_gt[i]) > gt_epsilon:
print('')
print('old = ', check_gt)
print('new = ', new_gt)
post_reason( 'Geotransform differs.' )
return 'fail'
oBand = ds.GetRasterBand(self.band)
if skip_checksum is None:
chksum = oBand.Checksum(self.xoff, self.yoff, self.xsize, self.ysize)
# Do we need to check approximate statistics?
if check_approx_stat:
# Default to 1000th of pixel value range as our test value.
if stat_epsilon is None:
stat_epsilon = \
abs(check_approx_stat[1] - check_approx_stat[0]) / 1000.0
new_stat = oBand.GetStatistics(1, 1)
for i in range(4):
# NOTE - mloskot: Poor man Nan/Inf value check. It's poor
# because we need to support old and buggy Python 2.3.
# Tested on Linux, Mac OS X and Windows, with Python 2.3/2.4/2.5.
sv = str(new_stat[i]).lower()
if sv.find('n') >= 0 or sv.find('i') >= 0 or sv.find('#') >= 0:
post_reason( 'NaN or Invinite value encountered '%'.' % sv )
return 'fail'
if abs(new_stat[i]-check_approx_stat[i]) > stat_epsilon:
print('')
print('old = ', check_approx_stat)
print('new = ', new_stat)
post_reason( 'Approximate statistics differs.' )
return 'fail'
# Do we need to check statistics?
if check_stat:
# Default to 1000th of pixel value range as our test value.
if stat_epsilon is None:
stat_epsilon = abs(check_stat[1] - check_stat[0]) / 1000.0
# FIXME: how to test approximate statistic results?
new_stat = oBand.GetStatistics(1, 1)
new_stat = oBand.GetStatistics(0, 1)
for i in range(4):
sv = str(new_stat[i]).lower()
if sv.find('n') >= 0 or sv.find('i') >= 0 or sv.find('#') >= 0:
post_reason( 'NaN or Invinite value encountered '%'.' % sv )
return 'fail'
if abs(new_stat[i]-check_stat[i]) > stat_epsilon:
print('')
print('old = ', check_stat)
print('new = ', new_stat)
post_reason( 'Statistics differs.' )
return 'fail'
ds = None
if is_file_open(wrk_filename):
post_reason('file still open after dataset closing')
return 'fail'
if skip_checksum is not None:
return 'success'
elif self.chksum is None or chksum == self.chksum:
return 'success'
else:
post_reason('Checksum for band %d in "%s" is %d, but expected %d.' \
% (self.band, self.filename, chksum, self.chksum) )
return 'fail'
def testCreateCopy(self, check_minmax = 1, check_gt = 0, check_srs = None,
vsimem = 0, new_filename = None, strict_in = 0,
skip_preclose_test = 0, delete_copy = 1, gt_epsilon = None,
check_checksum_not_null = None, interrupt_during_copy = False,
dest_open_options = None):
if self.testDriver() == 'fail':
return 'skip'
if self.filename_absolute:
wrk_filename = self.filename
else:
wrk_filename = 'data/' + self.filename
if self.open_options:
src_ds = gdal.OpenEx( wrk_filename, gdal.OF_RASTER, open_options = self.open_options )
else:
src_ds = gdal.Open( wrk_filename, gdal.GA_ReadOnly )
if self.band > 0:
minmax = src_ds.GetRasterBand(self.band).ComputeRasterMinMax()
src_prj = src_ds.GetProjection()
src_gt = src_ds.GetGeoTransform()
if new_filename is None:
if vsimem:
new_filename = '/vsimem/' + self.filename + '.tst'
else:
new_filename = 'tmp/' + self.filename + '.tst'
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
if interrupt_during_copy:
new_ds = self.driver.CreateCopy( new_filename, src_ds,
strict = strict_in,
options = self.options,
callback = testCreateCopyInterruptCallback)
else:
new_ds = self.driver.CreateCopy( new_filename, src_ds,
strict = strict_in,
options = self.options )
gdal.PopErrorHandler()
if interrupt_during_copy:
if new_ds is None:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
self.driver.Delete( new_filename )
gdal.PopErrorHandler()
return 'success'
else:
post_reason( 'CreateCopy() should have failed due to interruption')
new_ds = None
self.driver.Delete( new_filename )
return 'fail'
if new_ds is None:
post_reason( 'Failed to create test file using CreateCopy method.'\
+ '\n' + gdal.GetLastErrorMsg() )
return 'fail'
if new_ds.GetDriver().ShortName != gdal.GetDriverByName( self.drivername ).ShortName:
post_reason( 'The driver of the returned dataset is %s instead of %s.' % ( new_ds.GetDriver().ShortName, self.drivername ) )
return 'fail'
if self.band > 0 and skip_preclose_test == 0:
bnd = new_ds.GetRasterBand(self.band)
if check_checksum_not_null is True:
if bnd.Checksum() == 0:
post_reason('Got null checksum on still-open file.')
return 'fail'
elif self.chksum is not None and bnd.Checksum() != self.chksum:
post_reason(
'Did not get expected checksum on still-open file.\n' \
' Got %d instead of %d.' % (bnd.Checksum(),self.chksum))
return 'fail'
if check_minmax:
got_minmax = bnd.ComputeRasterMinMax()
if got_minmax != minmax:
post_reason( \
'Did not get expected min/max values on still-open file.\n' \
' Got %g,%g instead of %g,%g.' \
% ( got_minmax[0], got_minmax[1], minmax[0], minmax[1] ) )
return 'fail'
bnd = None
new_ds = None
# hopefully it's closed now!
if dest_open_options is not None:
new_ds = gdal.OpenEx( new_filename, gdal.OF_RASTER, open_options = dest_open_options )
else:
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
if self.band > 0:
bnd = new_ds.GetRasterBand(self.band)
if check_checksum_not_null is True:
if bnd.Checksum() == 0:
post_reason('Got null checksum on reopened file.')
return 'fail'
elif self.chksum_after_reopening is not None and bnd.Checksum() not in self.chksum_after_reopening:
post_reason( 'Did not get expected checksum on reopened file.\n'
' Got %d instead of %s.' \
% (bnd.Checksum(), str(self.chksum_after_reopening)) )
return 'fail'
if check_minmax:
got_minmax = bnd.ComputeRasterMinMax()
if got_minmax != minmax:
post_reason( \
'Did not get expected min/max values on reopened file.\n' \
' Got %g,%g instead of %g,%g.' \
% ( got_minmax[0], got_minmax[1], minmax[0], minmax[1] ) )
return 'fail'
# Do we need to check the geotransform?
if check_gt:
if gt_epsilon is None:
eps = 0.00000001
else:
eps = gt_epsilon
new_gt = new_ds.GetGeoTransform()
if abs(new_gt[0] - src_gt[0]) > eps \
or abs(new_gt[1] - src_gt[1]) > eps \
or abs(new_gt[2] - src_gt[2]) > eps \
or abs(new_gt[3] - src_gt[3]) > eps \
or abs(new_gt[4] - src_gt[4]) > eps \
or abs(new_gt[5] - src_gt[5]) > eps:
print('')
print('old = ', src_gt)
print('new = ', new_gt)
post_reason( 'Geotransform differs.' )
return 'fail'
# Do we need to check the geotransform?
if check_srs is not None:
new_prj = new_ds.GetProjection()
src_osr = osr.SpatialReference( wkt=src_prj )
new_osr = osr.SpatialReference( wkt=new_prj )
if not src_osr.IsSame(new_osr):
print('')
print('old = %s' % src_osr.ExportToPrettyWkt())
print('new = %s' % new_osr.ExportToPrettyWkt())
post_reason( 'Projections differ' )
return 'fail'
bnd = None
new_ds = None
src_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON' and delete_copy == 1 :
self.driver.Delete( new_filename )
return 'success'
def testCreate(self, vsimem = 0, new_filename = None, out_bands = 1,
check_minmax = 1, dest_open_options = None ):
if self.testDriver() == 'fail':
return 'skip'
if self.filename_absolute:
wrk_filename = self.filename
else:
wrk_filename = 'data/' + self.filename
if self.open_options:
src_ds = gdal.OpenEx( wrk_filename, gdal.OF_RASTER, open_options = self.open_options )
else:
src_ds = gdal.Open( wrk_filename, gdal.GA_ReadOnly )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
src_img = src_ds.GetRasterBand(self.band).ReadRaster(0,0,xsize,ysize)
minmax = src_ds.GetRasterBand(self.band).ComputeRasterMinMax()
if new_filename is None:
if vsimem:
new_filename = '/vsimem/' + self.filename + '.tst'
else:
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, out_bands,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
src_ds = None
try:
for band in range(1,out_bands+1):
new_ds.GetRasterBand(band).WriteRaster( 0, 0, xsize, ysize, src_img )
except:
post_reason( 'Failed to write raster bands to test file.' )
return 'fail'
for band in range(1,out_bands+1):
if self.chksum is not None \
and new_ds.GetRasterBand(band).Checksum() != self.chksum:
post_reason(
'Did not get expected checksum on still-open file.\n' \
' Got %d instead of %d.' \
% (new_ds.GetRasterBand(band).Checksum(),self.chksum))
return 'fail'
computed_minmax = new_ds.GetRasterBand(band).ComputeRasterMinMax()
if computed_minmax != minmax and check_minmax:
post_reason( 'Did not get expected min/max values on still-open file.' )
print('expect: ', minmax)
print('got: ', computed_minmax)
return 'fail'
new_ds = None
if dest_open_options is not None:
new_ds = gdal.OpenEx( new_filename, gdal.OF_RASTER, open_options = dest_open_options )
else:
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
for band in range(1,out_bands+1):
if self.chksum is not None \
and new_ds.GetRasterBand(band).Checksum() != self.chksum:
post_reason( 'Did not get expected checksum on reopened file.' \
' Got %d instead of %d.' \
% (new_ds.GetRasterBand(band).Checksum(),self.chksum))
return 'fail'
if new_ds.GetRasterBand(band).ComputeRasterMinMax() != minmax and check_minmax:
post_reason( 'Did not get expected min/max values on reopened file.' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetGeoTransform(self):
if self.testDriver() == 'fail':
return 'skip'
wrk_filename = 'data/' + self.filename
if self.open_options:
src_ds = gdal.OpenEx( wrk_filename, gdal.OF_RASTER, open_options = self.open_options )
else:
src_ds = gdal.Open( wrk_filename, gdal.GA_ReadOnly )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
gt = (123.0, 1.18, 0.0, 456.0, 0.0, -1.18 )
if new_ds.SetGeoTransform( gt ) is not gdal.CE_None:
post_reason( 'Failed to set geographic transformation.' )
return 'fail'
src_ds = None
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
eps = 0.00000001
new_gt = new_ds.GetGeoTransform()
if abs(new_gt[0] - gt[0]) > eps \
or abs(new_gt[1] - gt[1]) > eps \
or abs(new_gt[2] - gt[2]) > eps \
or abs(new_gt[3] - gt[3]) > eps \
or abs(new_gt[4] - gt[4]) > eps \
or abs(new_gt[5] - gt[5]) > eps:
print('')
print('old = ', gt)
print('new = ', new_gt)
post_reason( 'Did not get expected geotransform.' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetProjection(self, prj = None, expected_prj = None ):
if self.testDriver() == 'fail':
return 'skip'
wrk_filename = 'data/' + self.filename
if self.open_options:
src_ds = gdal.OpenEx( wrk_filename, gdal.OF_RASTER, open_options = self.open_options )
else:
src_ds = gdal.Open( wrk_filename, gdal.GA_ReadOnly )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
gt = (123.0, 1.18, 0.0, 456.0, 0.0, -1.18 )
if prj is None:
# This is a challenging SRS since it has non-meter linear units.
prj='PROJCS["NAD83 / Ohio South",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",40.03333333333333],PARAMETER["standard_parallel_2",38.73333333333333],PARAMETER["latitude_of_origin",38],PARAMETER["central_meridian",-82.5],PARAMETER["false_easting",1968500],PARAMETER["false_northing",0],UNIT["feet",0.3048006096012192]]'
src_osr = osr.SpatialReference()
src_osr.ImportFromWkt(prj)
new_ds.SetGeoTransform( gt )
if new_ds.SetProjection( prj ) is not gdal.CE_None:
post_reason( 'Failed to set geographic projection string.' )
return 'fail'
src_ds = None
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
expected_osr = osr.SpatialReference()
if expected_prj is None:
expected_osr = src_osr
else:
expected_osr.ImportFromWkt( expected_prj )
new_osr = osr.SpatialReference()
new_osr.ImportFromWkt(new_ds.GetProjection())
if not new_osr.IsSame(expected_osr):
post_reason( 'Did not get expected projection reference.' )
print('Got: ')
print(new_osr.ExportToPrettyWkt())
print('Expected:')
print(expected_osr.ExportToPrettyWkt())
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetMetadata(self):
if self.testDriver() == 'fail':
return 'skip'
wrk_filename = 'data/' + self.filename
if self.open_options:
src_ds = gdal.OpenEx( wrk_filename, gdal.OF_RASTER, open_options = self.open_options )
else:
src_ds = gdal.Open( wrk_filename, gdal.GA_ReadOnly )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
dict = {}
dict['TEST_KEY'] = 'TestValue'
new_ds.SetMetadata( dict )
# FIXME
#if new_ds.SetMetadata( dict ) is not gdal.CE_None:
#print new_ds.SetMetadata( dict )
#post_reason( 'Failed to set metadata item.' )
#return 'fail'
src_ds = None
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
md_dict = new_ds.GetMetadata()
if (not 'TEST_KEY' in md_dict):
post_reason( 'Metadata item TEST_KEY does not exist.')
return 'fail'
if md_dict['TEST_KEY'] != 'TestValue':
post_reason( 'Did not get expected metadata item.' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetNoDataValue(self, delete = False):
if self.testDriver() == 'fail':
return 'skip'
wrk_filename = 'data/' + self.filename
if self.open_options:
src_ds = gdal.OpenEx( wrk_filename, gdal.OF_RASTER, open_options = self.open_options )
else:
src_ds = gdal.Open( wrk_filename, gdal.GA_ReadOnly )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
if self.options is None or not 'PIXELTYPE=SIGNEDBYTE' in self.options:
nodata = 130
else:
nodata = 11
if new_ds.GetRasterBand(1).SetNoDataValue(nodata) is not gdal.CE_None:
post_reason( 'Failed to set NoData value.' )
return 'fail'
src_ds = None
new_ds = None
if delete:
mode = gdal.GA_Update
else:
mode = gdal.GA_ReadOnly
new_ds = gdal.Open( new_filename, mode )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
if nodata != new_ds.GetRasterBand(1).GetNoDataValue():
post_reason( 'Did not get expected NoData value.' )
return 'fail'
if delete:
if new_ds.GetRasterBand(1).DeleteNoDataValue() != 0:
post_reason( 'Did not manage to delete nodata value' )
return 'fail'
new_ds = None
if delete:
new_ds = gdal.Open (new_filename)
if new_ds.GetRasterBand(1).GetNoDataValue() is not None:
post_reason( 'Got nodata value whereas none was expected' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetNoDataValueAndDelete(self):
return self.testSetNoDataValue(delete = True)
def testSetDescription(self):
if self.testDriver() == 'fail':
return 'skip'
wrk_filename = 'data/' + self.filename
if self.open_options:
src_ds = gdal.OpenEx( wrk_filename, gdal.OF_RASTER, open_options = self.open_options )
else:
src_ds = gdal.Open( wrk_filename, gdal.GA_ReadOnly )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
description = "Description test string"
new_ds.GetRasterBand(1).SetDescription(description)
src_ds = None
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
if description != new_ds.GetRasterBand(1).GetDescription():
post_reason( 'Did not get expected description string.' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetUnitType(self):
if self.testDriver() == 'fail':
return 'skip'
wrk_filename = 'data/' + self.filename
if self.open_options:
src_ds = gdal.OpenEx( wrk_filename, gdal.OF_RASTER, open_options = self.open_options )
else:
src_ds = gdal.Open( wrk_filename, gdal.GA_ReadOnly )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
unit = 'mg/m3'
if new_ds.GetRasterBand(1).SetUnitType( unit ) is not gdal.CE_None:
post_reason( 'Failed to set unit type.' )
return 'fail'
src_ds = None
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
new_unit = new_ds.GetRasterBand(1).GetUnitType()
if new_unit != unit:
print('')
print('old = ', unit)
print('new = ', new_unit)
post_reason( 'Did not get expected unit type.' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def approx_equal( a, b ):
a = float(a)
b = float(b)
if a == 0 and b != 0:
return 0
if abs(b/a - 1.0) > .00000000001:
return 0
else:
return 1
def user_srs_to_wkt( user_text ):
srs = osr.SpatialReference()
srs.SetFromUserInput( user_text )
return srs.ExportToWkt()
def equal_srs_from_wkt( expected_wkt, got_wkt ):
expected_srs = osr.SpatialReference()
expected_srs.ImportFromWkt( expected_wkt )
got_srs = osr.SpatialReference()
got_srs.ImportFromWkt( got_wkt )
if got_srs.IsSame( expected_srs ):
return 1
else:
print('Expected:\n%s' % expected_wkt)
print('Got: \n%s' % got_wkt)
post_reason( 'SRS differs from expected.' )
return 0
###############################################################################
# Compare two sets of RPC metadata, and establish if they are essentially
# equivalent or not.
def rpcs_equal( md1, md2 ):
simple_fields = [ 'LINE_OFF', 'SAMP_OFF', 'LAT_OFF', 'LONG_OFF',
'HEIGHT_OFF', 'LINE_SCALE', 'SAMP_SCALE', 'LAT_SCALE',
'LONG_SCALE', 'HEIGHT_SCALE' ]
coef_fields = [ 'LINE_NUM_COEFF', 'LINE_DEN_COEFF',
'SAMP_NUM_COEFF', 'SAMP_DEN_COEFF' ]
for sf in simple_fields:
try:
if not approx_equal(float(md1[sf]),float(md2[sf])):
post_reason( '%s values differ.' % sf )
print(md1[sf])
print(md2[sf])
return 0
except:
post_reason( '%s value missing or corrupt.' % sf )
print(md1)
print(md2)
return 0
for cf in coef_fields:
try:
list1 = md1[cf].split()
list2 = md2[cf].split()
except:
post_reason( '%s value missing or corrupt.' % cf )
print(md1[cf])
print(md2[cf])
return 0
if len(list1) != 20:
post_reason( '%s value list length wrong(1)' % cf )
print(list1)
return 0
if len(list2) != 20:
post_reason( '%s value list length wrong(2)' % cf )
print(list2)
return 0
for i in range(20):
if not approx_equal(float(list1[i]),float(list2[i])):
post_reason( '%s[%d] values differ.' % (cf,i) )
print(list1[i], list2[i])
return 0
return 1
###############################################################################
# Test if geotransforms are equal with an epsilon tolerance
#
def geotransform_equals(gt1, gt2, gt_epsilon):
for i in range(6):
if abs(gt1[i]-gt2[i]) > gt_epsilon:
print('')
print('gt1 = ', gt1)
print('gt2 = ', gt2)
post_reason( 'Geotransform differs.' )
return False
return True
###############################################################################
# Download file at url 'url' and put it as 'filename' in 'tmp/cache/'
#
# If 'filename' already exits in 'tmp/cache/', it is not downloaded
# If GDAL_DOWNLOAD_TEST_DATA is not defined, the function fails
# If GDAL_DOWNLOAD_TEST_DATA is defined, 'url' is downloaded as 'filename' in 'tmp/cache/'
def download_file(url, filename, download_size = -1, force_download = False, max_download_duration = None, base_dir = 'tmp/cache'):
if filename.startswith(base_dir + '/'):
filename = filename[len(base_dir + '/'):]
global count_skipped_tests_download
try:
os.stat( base_dir + '/' + filename )
return True
except:
if 'GDAL_DOWNLOAD_TEST_DATA' in os.environ or force_download:
val = None
import time
start_time = time.time()
try:
handle = gdalurlopen(url)
if handle is None:
return False
if download_size == -1:
try:
handle_info = handle.info()
download_size = int(handle_info['content-length'])
print('Downloading %s (length = %d bytes)...' % (url, download_size))
except:
print('Downloading %s...' % (url))
else:
print('Downloading %d bytes from %s...' % (download_size, url))
except:
return False
if download_size >= 0:
sys.stdout.write('Progress: ')
nLastTick = -1
val = ''.encode('ascii')
while len(val) < download_size or download_size < 0:
chunk_size = 1024
if download_size >= 0 and len(val) + chunk_size > download_size:
chunk_size = download_size - len(val)
try:
chunk = handle.read(chunk_size)
except:
print('Did not get expected data length.')
return False
if len(chunk) < chunk_size:
if download_size < 0:
break
print('Did not get expected data length.')
return False
val = val + chunk
if download_size >= 0:
nThisTick = int(40 * len(val) / download_size)
while nThisTick > nLastTick:
nLastTick = nLastTick + 1
if nLastTick % 4 == 0:
sys.stdout.write( "%d" % int((nLastTick / 4) * 10) )
else:
sys.stdout.write(".")
nLastTick = nThisTick
if nThisTick == 40:
sys.stdout.write(" - done.\n" )
current_time = time.time()
if max_download_duration is not None and current_time - start_time > max_download_duration:
print('Download aborted due to timeout.')
return False
try:
os.stat( base_dir )
except:
os.mkdir(base_dir)
try:
open( base_dir + '/' + filename, 'wb').write(val)
return True
except:
print('Cannot write %s' % (filename))
return False
else:
if count_skipped_tests_download == 0:
print('As GDAL_DOWNLOAD_TEST_DATA environment variable is not defined, some tests relying on data to downloaded from the Web will be skipped')
count_skipped_tests_download = count_skipped_tests_download + 1
return False
###############################################################################
# GDAL data type to python struct format
def gdal_data_type_to_python_struct_format(datatype):
type_char = 'B'
if datatype == gdal.GDT_Int16:
type_char = 'h'
elif datatype == gdal.GDT_UInt16:
type_char = 'H'
elif datatype == gdal.GDT_Int32:
type_char = 'i'
elif datatype == gdal.GDT_UInt32:
type_char = 'I'
elif datatype == gdal.GDT_Float32:
type_char = 'f'
elif datatype == gdal.GDT_Float64:
type_char = 'd'
return type_char
###############################################################################
# Compare the values of the pixels
def compare_ds(ds1, ds2, xoff = 0, yoff = 0, width = 0, height = 0, verbose=1):
import struct
if width == 0:
width = ds1.RasterXSize
if height == 0:
height = ds1.RasterYSize
data1 = ds1.GetRasterBand(1).ReadRaster(xoff, yoff, width, height)
type_char = gdal_data_type_to_python_struct_format(ds1.GetRasterBand(1).DataType)
val_array1 = struct.unpack(type_char * width * height, data1)
data2 = ds2.GetRasterBand(1).ReadRaster(xoff, yoff, width, height)
type_char = gdal_data_type_to_python_struct_format(ds2.GetRasterBand(1).DataType)
val_array2 = struct.unpack(type_char * width * height, data2)
maxdiff = 0.0
ndiffs = 0
for i in range(width*height):
diff = val_array1[i] - val_array2[i]
if diff != 0:
#print(val_array1[i])
#print(val_array2[i])
ndiffs = ndiffs + 1
if abs(diff) > maxdiff:
maxdiff = abs(diff)
if verbose:
print("Diff at pixel (%d, %d) : %f" % (i % width, i / width, float(diff)))
elif ndiffs < 10:
if verbose:
print("Diff at pixel (%d, %d) : %f" % (i % width, i / width, float(diff)))
if maxdiff != 0 and verbose:
print("Max diff : %d" % (maxdiff))
print("Number of diffs : %d" % (ndiffs))
return maxdiff
###############################################################################
# Deregister all JPEG2000 drivers, except the one passed as an argument
def deregister_all_jpeg2000_drivers_but(name_of_driver_to_keep):
global jp2kak_drv, jpeg2000_drv, jp2ecw_drv, jp2mrsid_drv, jp2openjpeg_drv
global jp2kak_drv_unregistered,jpeg2000_drv_unregistered,jp2ecw_drv_unregistered,jp2mrsid_drv_unregistered,jp2openjpeg_drv_unregistered
# Deregister other potential conflicting JPEG2000 drivers that will
# be re-registered in the cleanup
try:
jp2kak_drv = gdal.GetDriverByName('JP2KAK')
if name_of_driver_to_keep != 'JP2KAK' and jp2kak_drv:
gdal.Debug('gdaltest','Deregistering JP2KAK')
jp2kak_drv.Deregister()
jp2kak_drv_unregistered = True
except:
pass
try:
jpeg2000_drv = gdal.GetDriverByName('JPEG2000')
if name_of_driver_to_keep != 'JPEG2000' and jpeg2000_drv:
gdal.Debug('gdaltest','Deregistering JPEG2000')
jpeg2000_drv.Deregister()
jpeg2000_drv_unregistered = True
except:
pass
try:
jp2ecw_drv = gdal.GetDriverByName('JP2ECW')
if name_of_driver_to_keep != 'JP2ECW' and jp2ecw_drv:
gdal.Debug('gdaltest.','Deregistering JP2ECW')
jp2ecw_drv.Deregister()
jp2ecw_drv_unregistered = True
except:
pass
try:
jp2mrsid_drv = gdal.GetDriverByName('JP2MrSID')
if name_of_driver_to_keep != 'JP2MrSID' and jp2mrsid_drv:
gdal.Debug('gdaltest.','Deregistering JP2MrSID')
jp2mrsid_drv.Deregister()
jp2mrsid_drv_unregistered = True
except:
pass
try:
jp2openjpeg_drv = gdal.GetDriverByName('JP2OpenJPEG')
if name_of_driver_to_keep != 'JP2OpenJPEG' and jp2openjpeg_drv:
gdal.Debug('gdaltest.','Deregistering JP2OpenJPEG')
jp2openjpeg_drv.Deregister()
jp2openjpeg_drv_unregistered = True
except:
pass
return True
###############################################################################
# Re-register all JPEG2000 drivers previously disabled by
# deregister_all_jpeg2000_drivers_but
def reregister_all_jpeg2000_drivers():
global jp2kak_drv, jpeg2000_drv, jp2ecw_drv, jp2mrsid_drv, jp2openjpeg_drv
global jp2kak_drv_unregistered,jpeg2000_drv_unregistered,jp2ecw_drv_unregistered,jp2mrsid_drv_unregistered, jp2openjpeg_drv_unregistered
try:
if jp2kak_drv_unregistered:
jp2kak_drv.Register()
jp2kak_drv_unregistered = False
gdal.Debug('gdaltest','Registering JP2KAK')
except:
pass
try:
if jpeg2000_drv_unregistered:
jpeg2000_drv.Register()
jpeg2000_drv_unregistered = False
gdal.Debug('gdaltest','Registering JPEG2000')
except:
pass
try:
if jp2ecw_drv_unregistered:
jp2ecw_drv.Register()
jp2ecw_drv_unregistered = False
gdal.Debug('gdaltest','Registering JP2ECW')
except:
pass
try:
if jp2mrsid_drv_unregistered:
jp2mrsid_drv.Register()
jp2mrsid_drv_unregistered = False
gdal.Debug('gdaltest','Registering JP2MrSID')
except:
pass
try:
if jp2openjpeg_drv_unregistered:
jp2openjpeg_drv.Register()
jp2openjpeg_drv = False
gdal.Debug('gdaltest','Registering JP2OpenJPEG')
except:
pass
return True
###############################################################################
# Determine if the filesystem supports sparse files.
# Currently, this will only work on Linux (or any *NIX that has the stat
# command line utility)
def filesystem_supports_sparse_files(path):
if skip_on_travis():
return False
try:
(ret, err) = runexternal_out_and_err('stat -f -c "%T" ' + path)
except:
return False
if err != '':
post_reason('Cannot determine if filesystem supports sparse files')
return False
if ret.find('fat32') != -1:
post_reason('File system does not support sparse files')
return False
# Add here any missing filesystem supporting sparse files
# See http://en.wikipedia.org/wiki/Comparison_of_file_systems
if ret.find('ext3') == -1 and \
ret.find('ext4') == -1 and \
ret.find('reiser') == -1 and \
ret.find('xfs') == -1 and \
ret.find('jfs') == -1 and \
ret.find('zfs') == -1 and \
ret.find('ntfs') == -1 :
post_reason('Filesystem %s is not believed to support sparse files' % ret)
return False
return True
###############################################################################
# Unzip a file
def unzip(target_dir, zipfilename, verbose = False):
try:
import zipfile
zf = zipfile.ZipFile(zipfilename)
except:
os.system('unzip -d ' + target_dir + ' ' + zipfilename)
return
for filename in zf.namelist():
if verbose:
print(filename)
outfilename = os.path.join(target_dir, filename)
if filename.endswith('/'):
if not os.path.exists(outfilename):
os.makedirs(outfilename)
else:
outdirname = os.path.dirname(outfilename)
if not os.path.exists(outdirname):
os.makedirs(outdirname)
outfile = open(outfilename,'wb')
outfile.write(zf.read(filename))
outfile.close()
return
###############################################################################
# Return if a number is the NaN number
def isnan(val):
if val == val:
# Python 2.3 unlike later versions return True for nan == nan
val_str = '%f' % val
if val_str == 'nan':
return True
else:
return False
else:
return True
###############################################################################
# Return NaN
def NaN():
try:
# Python >= 2.6
return float('nan')
except:
return 1e400 / 1e400
###############################################################################
# Return positive infinity
def posinf():
try:
# Python >= 2.6
return float('inf')
except:
return 1e400
###############################################################################
# Return negative infinity
def neginf():
try:
# Python >= 2.6
return float('-inf')
except:
return -1e400
###############################################################################
# Has the user requested to run the slow tests
def run_slow_tests():
global count_skipped_tests_slow
val = gdal.GetConfigOption('GDAL_RUN_SLOW_TESTS', None)
if val != 'yes' and val != 'YES':
if count_skipped_tests_slow == 0:
print('As GDAL_RUN_SLOW_TESTS environment variable is not defined, some "slow" tests will be skipped')
count_skipped_tests_slow = count_skipped_tests_slow + 1
return False
return True
###############################################################################
# Return true if the platform support symlinks
def support_symlink():
if sys.platform.startswith('linux'):
return True
if sys.platform.find('freebsd') != -1:
return True
if sys.platform == 'darwin':
return True
if sys.platform.find('sunos') != -1:
return True
return False
###############################################################################
# Return True if the test must be skipped
def skip_on_travis():
val = gdal.GetConfigOption('TRAVIS', None)
if val is not None:
post_reason('Test skipped on Travis')
return True
return False
###############################################################################
# find_lib_linux()
# Parse /proc/self/maps to find an occurrence of libXXXXX.so.*
def find_lib_linux(libname):
f = open('/proc/self/maps')
lines = f.readlines()
f.close()
for line in lines:
if line.rfind('/lib' + libname) == -1 or line.find('.so') == -1:
continue
i = line.find(' ')
if i < 0:
continue
line = line[i+1:]
i = line.find(' ')
if i < 0:
continue
line = line[i+1:]
i = line.find(' ')
if i < 0:
continue
line = line[i+1:]
i = line.find(' ')
if i < 0:
continue
line = line[i+1:]
i = line.find(' ')
if i < 0:
continue
line = line[i+1:]
soname = line.lstrip().rstrip('\n')
if soname.rfind('/lib' + libname) == -1:
continue
return soname
return None
###############################################################################
# find_lib_sunos()
# Parse output of pmap to find an occurrence of libXXX.so.*
def find_lib_sunos(libname):
pid = os.getpid()
(lines, err) = runexternal_out_and_err('pmap %d' % pid)
for line in lines.split('\n'):
if line.rfind('/lib' + libname) == -1 or line.find('.so') == -1:
continue
i = line.find('/')
if i < 0:
continue
line = line[i:]
soname = line.lstrip().rstrip('\n')
if soname.rfind('/lib' + libname) == -1:
continue
return soname
return None
###############################################################################
# find_lib_windows()
# use Module32First() / Module32Next() API on the current process
def find_lib_windows(libname):
try:
import ctypes
except:
return None
kernel32 = ctypes.windll.kernel32
MAX_MODULE_NAME32 = 255
MAX_PATH = 260
TH32CS_SNAPMODULE = 0x00000008
class MODULEENTRY32(ctypes.Structure):
_fields_ = [
("dwSize", ctypes.c_int),
("th32ModuleID", ctypes.c_int),
("th32ProcessID", ctypes.c_int),
("GlblcntUsage", ctypes.c_int),
("ProccntUsage", ctypes.c_int),
("modBaseAddr", ctypes.c_char_p),
("modBaseSize", ctypes.c_int),
("hModule", ctypes.c_void_p),
("szModule", ctypes.c_char * (MAX_MODULE_NAME32 + 1)),
("szExePath", ctypes.c_char * MAX_PATH)
]
Module32First = kernel32.Module32First
Module32First.argtypes = [ ctypes.c_void_p, ctypes.POINTER(MODULEENTRY32) ]
Module32First.rettypes = ctypes.c_int
Module32Next = kernel32.Module32Next
Module32Next.argtypes = [ ctypes.c_void_p, ctypes.POINTER(MODULEENTRY32) ]
Module32Next.rettypes = ctypes.c_int
CreateToolhelp32Snapshot = kernel32.CreateToolhelp32Snapshot
CreateToolhelp32Snapshot.argtypes = [ ctypes.c_int, ctypes.c_int ]
CreateToolhelp32Snapshot.rettypes = ctypes.c_void_p
CloseHandle = kernel32.CloseHandle
CloseHandle.argtypes = [ ctypes.c_void_p ]
CloseHandle.rettypes = ctypes.c_int
GetLastError = kernel32.GetLastError
GetLastError.argtypes = []
GetLastError.rettypes = ctypes.c_int
snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE,0)
if snapshot is None:
return None
soname = None
i = 0
while True:
entry = MODULEENTRY32()
entry.dwSize = ctypes.sizeof(MODULEENTRY32)
pentry = ctypes.pointer(entry)
if i == 0:
ret = Module32First(snapshot, pentry)
else:
ret = Module32Next(snapshot, pentry)
i = i + 1
if ret == 0:
break
try:
path = entry.szExePath.decode('latin1')
except:
continue
i = path.rfind('\\' + libname)
if i < 0:
continue
if path[i+1:].find('\\') >= 0:
continue
soname = path
break
CloseHandle(snapshot)
return soname
###############################################################################
# find_lib()
def find_lib(mylib):
if sys.platform.startswith('linux'):
return find_lib_linux(mylib)
elif sys.platform.startswith('sunos'):
return find_lib_sunos(mylib)
elif sys.platform.startswith('win32'):
return find_lib_windows(mylib)
else:
# sorry mac users or other BSDs
# should be doable
return None
###############################################################################
# get_opened_files()
def get_opened_files():
if not sys.platform.startswith('linux'):
return []
fdpath = '/proc/%d/fd' % os.getpid()
file_numbers = os.listdir(fdpath)
filenames = []
for fd in file_numbers:
try:
filename = os.readlink('%s/%s' % (fdpath, fd))
if not filename.startswith('/dev/') and not filename.startswith('pipe:'):
filenames.append(filename)
except:
pass
return filenames
###############################################################################
# is_file_open()
def is_file_open(filename):
for got_filename in get_opened_files():
if got_filename.find(filename) >= 0:
return True
return False
###############################################################################
# error_handler()
# Allow use of "with" for an ErrorHandler that always pops at the scope close.
# Defaults to suppressing errors and warnings.
@contextlib.contextmanager
def error_handler(error_name = 'CPLQuietErrorHandler'):
handler = gdal.PushErrorHandler(error_name)
try:
yield handler
finally:
gdal.PopErrorHandler()
###############################################################################
# Temporarily define a new value of block cache
@contextlib.contextmanager
def SetCacheMax(val):
oldval = gdal.GetCacheMax()
gdal.SetCacheMax(val)
try:
yield
finally:
gdal.SetCacheMax(oldval)
###############################################################################
run_func = gdaltestaux.run_func
urlescape = gdaltestaux.urlescape
gdalurlopen = gdaltestaux.gdalurlopen
spawn_async = gdaltestaux.spawn_async
wait_process = gdaltestaux.wait_process
runexternal = gdaltestaux.runexternal
read_in_thread = gdaltestaux.read_in_thread
runexternal_out_and_err = gdaltestaux.runexternal_out_and_err
| []
| []
| [
"ASAN_OPTIONS",
"GDALTEST_ASAN_OPTIONS"
]
| [] | ["ASAN_OPTIONS", "GDALTEST_ASAN_OPTIONS"] | python | 2 | 0 | |
scripts/runAll.py | #! /usr/bin/env python
from __future__ import print_function
import sys
import os
import glob
pFiles = glob.glob('*_p.py')
caseDict = {}
for pf in pFiles:
caseDict[pf] = set(glob.glob(pf[:-5]+'*_n.py'))
#fix cases were problem name is a subset of some other problem name
for pf1 in pFiles:
for pf2 in pFiles:
if pf2.find(pf1[:-4]):
nf1Set=set(glob.glob(pf1[:-5]+'*_n.py'))
caseDict[pf2] -= nf1Set
for pf in pFiles:
print(pf)
print(caseDict[pf])
for p,nList in caseDict.items():
if len(nList) == 0:
sys.stdout.write("\n----------------Skipping "+p+". No n file----------------------\n")
sys.stdout.flush()
else:
for n in nList:
args = ('proteusRun.py',p,n,'-l 4','-b','runAllBatch.py')
sys.stdout.write("\n----------------Running "+p+"---"+n+"\n")
sys.stdout.flush()
os.spawnvpe(os.P_WAIT,'proteusRun.py',args,os.environ)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
ingestion/src/metadata/ingestion/source/bigquery.py | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import tempfile
from typing import Optional, Tuple
from google.cloud.datacatalog_v1 import PolicyTagManagerClient
from sqlalchemy_bigquery import _types
from sqlalchemy_bigquery._struct import STRUCT
from sqlalchemy_bigquery._types import (
_get_sqla_column_type,
_get_transitive_schema_fields,
)
from metadata.generated.schema.api.tags.createTagCategory import (
CreateTagCategoryRequest,
)
from metadata.generated.schema.entity.data.database import Database
from metadata.generated.schema.entity.data.table import TableData
from metadata.generated.schema.entity.services.connections.database.bigQueryConnection import (
BigQueryConnection,
)
from metadata.generated.schema.entity.services.connections.metadata.openMetadataConnection import (
OpenMetadataConnection,
)
from metadata.generated.schema.metadataIngestion.workflow import (
Source as WorkflowSource,
)
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.api.source import InvalidSourceException
from metadata.ingestion.source.sql_source import SQLSource
from metadata.utils.column_type_parser import create_sqlalchemy_type
from metadata.utils.helpers import get_start_and_end
logger = logging.getLogger(__name__)
GEOGRAPHY = create_sqlalchemy_type("GEOGRAPHY")
_types._type_map["GEOGRAPHY"] = GEOGRAPHY
def get_columns(bq_schema):
fields = _get_transitive_schema_fields(bq_schema)
col_list = []
for field in fields:
col_obj = {
"name": field.name,
"type": _get_sqla_column_type(field)
if "STRUCT" or "RECORD" not in field
else STRUCT,
"nullable": field.mode == "NULLABLE" or field.mode == "REPEATED",
"comment": field.description,
"default": None,
"precision": field.precision,
"scale": field.scale,
"max_length": field.max_length,
"raw_data_type": str(_get_sqla_column_type(field)),
"policy_tags": None,
}
try:
if field.policy_tags:
col_obj["policy_tags"] = (
PolicyTagManagerClient()
.get_policy_tag(name=field.policy_tags.names[0])
.display_name
)
except Exception as err:
logger.info(f"Skipping Policy Tag: {err}")
col_list.append(col_obj)
return col_list
_types.get_columns = get_columns
class BigquerySource(SQLSource):
def __init__(self, config, metadata_config):
super().__init__(config, metadata_config)
self.connection_config = self.config.serviceConnection.__root__.config
self.temp_credentials = None
# and "policy_tags" in column and column["policy_tags"]
def prepare(self):
try:
if self.connection_config.enablePolicyTagImport:
self.metadata.create_tag_category(
CreateTagCategoryRequest(
name=self.connection_config.tagCategoryName,
description="",
categoryType="Classification",
)
)
except Exception as err:
logger.error(err)
@classmethod
def create(cls, config_dict, metadata_config: OpenMetadataConnection):
config: WorkflowSource = WorkflowSource.parse_obj(config_dict)
connection: BigQueryConnection = config.serviceConnection.__root__.config
if not isinstance(connection, BigQueryConnection):
raise InvalidSourceException(
f"Expected BigQueryConnection, but got {connection}"
)
if (
not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
and connection.connectionOptions
):
options = connection.connectionOptions.dict()
if options.get("credentials_path"):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = options[
"credentials_path"
]
del connection.connectionOptions.credentials_path
elif options.get("credentials"):
cls.temp_credentials = cls.create_credential_temp_file(
credentials=options["credentials"]
)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = cls.temp_credentials
del connection.connectionOptions.credentials
else:
logger.warning(
"Please refer to the BigQuery connector documentation, especially the credentials part "
"https://docs.open-metadata.org/connectors/bigquery"
)
return cls(config, metadata_config)
@staticmethod
def create_credential_temp_file(credentials: dict) -> str:
with tempfile.NamedTemporaryFile(delete=False) as fp:
cred_json = json.dumps(credentials, indent=4, separators=(",", ": "))
fp.write(cred_json.encode())
return fp.name
def standardize_schema_table_names(
self, schema: str, table: str
) -> Tuple[str, str]:
segments = table.split(".")
if len(segments) != 2:
raise ValueError(f"expected table to contain schema name already {table}")
if segments[0] != schema:
raise ValueError(f"schema {schema} does not match table {table}")
return segments[0], segments[1]
def fetch_sample_data(self, schema: str, table: str) -> Optional[TableData]:
partition_details = self.inspector.get_indexes(table, schema)
if partition_details and partition_details[0].get("name") == "partition":
try:
logger.info("Using Query for Partitioned Tables")
partition_details = self.inspector.get_indexes(table, schema)
start, end = get_start_and_end(
self.connection_config.partitionQueryDuration
)
query = self.connection_config.partitionQuery.format(
schema,
table,
partition_details[0]["column_names"][0]
or self.connection_config.partitionField,
start.strftime("%Y-%m-%d"),
)
logger.info(query)
results = self.connection.execute(query)
cols = []
for col in results.keys():
cols.append(col)
rows = []
for res in results:
row = list(res)
rows.append(row)
return TableData(columns=cols, rows=rows)
except Exception as err:
logger.error(err)
return []
super().fetch_sample_data(schema, table)
def _get_database(self, database: Optional[str]) -> Database:
if not database:
database = self.service_connection.projectID
return Database(
name=database,
service=EntityReference(
id=self.service.id, type=self.service_connection.type.value
),
)
def parse_raw_data_type(self, raw_data_type):
return raw_data_type.replace(", ", ",").replace(" ", ":").lower()
def close(self):
super().close()
if self.temp_credentials:
os.unlink(self.temp_credentials)
| []
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | python | 1 | 0 | |
server/controller/helpers.go | package controller
import (
"bytes"
"encoding/json"
"errors"
"net/http"
"os"
"github.com/gin-gonic/gin"
)
// ValidationErrors Users input validation errros
var ValidationErrors = []string{}
//HandleErr //generic error handler, logs error and Os.Exit(1)
func HandleErr(c *gin.Context, err error) error {
if err != nil {
c.Error(err)
}
return err
}
//send image and get prediction from Pytorch API server
func getPrediction(fileURL string, c chan Prediction) error {
var data Prediction
baseURL := os.Getenv("FLASK_API_BASE_URL")
targetURL := baseURL + "/predict"
postBody, _ := json.Marshal(map[string]string{
"value": fileURL,
})
response, err := http.Post(targetURL, "application/json", bytes.NewBuffer(postBody))
if err != nil {
close(c)
return err
}
defer response.Body.Close()
err = json.NewDecoder(response.Body).Decode(&data)
c <- data
return nil
}
func getQuery(name string) NatureServeParams {
return NatureServeParams{
CriteriaType: "species",
TextCriteria: []TextCriteria{{
ParamType: "textSearch",
SearchToken: name,
MatchAgainst: "allNames",
Operator: "equals",
}},
}
}
// send bird name and get bird details from NatureServe api
func getBirdDetails(name string, c chan NatureServeAPIResponse) error {
var data NatureServeAPIResponse
targetURL := "https://explorer.natureserve.org/api/data/speciesSearch"
requestQuery := getQuery(name)
var postBody []byte
postBody, err := json.Marshal(requestQuery)
response, err := http.Post(targetURL, "application/json", bytes.NewBuffer(postBody))
if err != nil {
close(c)
return err
} else if response.StatusCode == 404 {
close(c)
err := errors.New("Data not found")
return err
}
defer response.Body.Close()
err = json.NewDecoder(response.Body).Decode(&data)
c <- data
return nil
}
| [
"\"FLASK_API_BASE_URL\""
]
| []
| [
"FLASK_API_BASE_URL"
]
| [] | ["FLASK_API_BASE_URL"] | go | 1 | 0 | |
tfx/components/example_gen/driver_test.py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.example_gen.driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tfx.components.example_gen import driver
from tfx.components.example_gen import utils
from tfx.dsl.components.base import base_driver
from tfx.dsl.io import fileio
from tfx.orchestration import data_types
from tfx.orchestration.portable import data_types as portable_data_types
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.types import artifact_utils
from tfx.types import channel_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
from google.protobuf import json_format
class DriverTest(tf.test.TestCase):
def setUp(self):
super(DriverTest, self).setUp()
self._test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Mock metadata and create driver.
self._mock_metadata = tf.compat.v1.test.mock.Mock()
self._example_gen_driver = driver.Driver(self._mock_metadata)
def testResolveExecProperties(self):
# Create input dir.
self._input_base_path = os.path.join(self._test_dir, 'input_base')
fileio.makedirs(self._input_base_path)
# Create exec proterties.
self._exec_properties = {
utils.INPUT_BASE_KEY:
self._input_base_path,
utils.INPUT_CONFIG_KEY:
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='s1',
pattern='span{SPAN}/version{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2',
pattern='span{SPAN}/version{VERSION}/split2/*')
]),
preserving_proto_field_name=True),
utils.RANGE_CONFIG_KEY:
None,
}
# Test align of span number.
span1_v1_split1 = os.path.join(self._input_base_path, 'span01', 'version01',
'split1', 'data')
io_utils.write_string_file(span1_v1_split1, 'testing11')
span1_v1_split2 = os.path.join(self._input_base_path, 'span01', 'version01',
'split2', 'data')
io_utils.write_string_file(span1_v1_split2, 'testing12')
span2_v1_split1 = os.path.join(self._input_base_path, 'span02', 'version01',
'split1', 'data')
io_utils.write_string_file(span2_v1_split1, 'testing21')
# Check that error raised when span does not match.
with self.assertRaisesRegexp(
ValueError, 'Latest span should be the same for each split'):
self._example_gen_driver.resolve_exec_properties(self._exec_properties,
None, None)
span2_v1_split2 = os.path.join(self._input_base_path, 'span02', 'version01',
'split2', 'data')
io_utils.write_string_file(span2_v1_split2, 'testing22')
span2_v2_split1 = os.path.join(self._input_base_path, 'span02', 'version02',
'split1', 'data')
io_utils.write_string_file(span2_v2_split1, 'testing21')
# Check that error raised when span matches, but version does not match.
with self.assertRaisesRegexp(
ValueError, 'Latest version should be the same for each split'):
self._example_gen_driver.resolve_exec_properties(self._exec_properties,
None, None)
span2_v2_split2 = os.path.join(self._input_base_path, 'span02', 'version02',
'split2', 'data')
io_utils.write_string_file(span2_v2_split2, 'testing22')
# Test if latest span and version selected when span and version aligns
# for each split.
self._example_gen_driver.resolve_exec_properties(self._exec_properties,
None, None)
self.assertEqual(self._exec_properties[utils.SPAN_PROPERTY_NAME], 2)
self.assertEqual(self._exec_properties[utils.VERSION_PROPERTY_NAME], 2)
self.assertRegex(
self._exec_properties[utils.FINGERPRINT_PROPERTY_NAME],
r'split:s1,num_files:1,total_bytes:9,xor_checksum:.*,sum_checksum:.*\nsplit:s2,num_files:1,total_bytes:9,xor_checksum:.*,sum_checksum:.*'
)
updated_input_config = example_gen_pb2.Input()
json_format.Parse(self._exec_properties[utils.INPUT_CONFIG_KEY],
updated_input_config)
# Check if latest span is selected.
self.assertProtoEquals(
"""
splits {
name: "s1"
pattern: "span02/version02/split1/*"
}
splits {
name: "s2"
pattern: "span02/version02/split2/*"
}""", updated_input_config)
# Test driver behavior using RangeConfig with static range.
self._exec_properties[utils.INPUT_CONFIG_KEY] = json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN:2}/version{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='span{SPAN:2}/version{VERSION}/split2/*')
]),
preserving_proto_field_name=True)
self._exec_properties[utils.RANGE_CONFIG_KEY] = json_format.MessageToJson(
range_config_pb2.RangeConfig(
static_range=range_config_pb2.StaticRange(
start_span_number=1, end_span_number=2)),
preserving_proto_field_name=True)
with self.assertRaisesRegexp(
ValueError, 'Start and end span numbers for RangeConfig.static_range'):
self._example_gen_driver.resolve_exec_properties(self._exec_properties,
None, None)
self._exec_properties[utils.RANGE_CONFIG_KEY] = json_format.MessageToJson(
range_config_pb2.RangeConfig(
static_range=range_config_pb2.StaticRange(
start_span_number=1, end_span_number=1)),
preserving_proto_field_name=True)
self._example_gen_driver.resolve_exec_properties(self._exec_properties,
None, None)
self.assertEqual(self._exec_properties[utils.SPAN_PROPERTY_NAME], 1)
self.assertEqual(self._exec_properties[utils.VERSION_PROPERTY_NAME], 1)
self.assertRegex(
self._exec_properties[utils.FINGERPRINT_PROPERTY_NAME],
r'split:s1,num_files:1,total_bytes:9,xor_checksum:.*,sum_checksum:.*\nsplit:s2,num_files:1,total_bytes:9,xor_checksum:.*,sum_checksum:.*'
)
updated_input_config = example_gen_pb2.Input()
json_format.Parse(self._exec_properties[utils.INPUT_CONFIG_KEY],
updated_input_config)
# Check if correct span inside static range is selected.
self.assertProtoEquals(
"""
splits {
name: "s1"
pattern: "span01/version01/split1/*"
}
splits {
name: "s2"
pattern: "span01/version01/split2/*"
}""", updated_input_config)
def testPrepareOutputArtifacts(self):
examples = standard_artifacts.Examples()
output_dict = {utils.EXAMPLES_KEY: channel_utils.as_channel([examples])}
exec_properties = {
utils.SPAN_PROPERTY_NAME: 2,
utils.VERSION_PROPERTY_NAME: 1,
utils.FINGERPRINT_PROPERTY_NAME: 'fp'
}
pipeline_info = data_types.PipelineInfo(
pipeline_name='name', pipeline_root=self._test_dir, run_id='rid')
component_info = data_types.ComponentInfo(
component_type='type', component_id='cid', pipeline_info=pipeline_info)
input_artifacts = {}
output_artifacts = self._example_gen_driver._prepare_output_artifacts( # pylint: disable=protected-access
input_artifacts, output_dict, exec_properties, 1, pipeline_info,
component_info)
examples = artifact_utils.get_single_instance(
output_artifacts[utils.EXAMPLES_KEY])
base_output_dir = os.path.join(self._test_dir, component_info.component_id)
expected_uri = base_driver._generate_output_uri( # pylint: disable=protected-access
base_output_dir, 'examples', 1)
self.assertEqual(examples.uri, expected_uri)
self.assertEqual(
examples.get_string_custom_property(utils.FINGERPRINT_PROPERTY_NAME),
'fp')
self.assertEqual(
examples.get_string_custom_property(utils.SPAN_PROPERTY_NAME), '2')
self.assertEqual(
examples.get_string_custom_property(utils.VERSION_PROPERTY_NAME), '1')
def testDriverRunFn(self):
# Create input dir.
self._input_base_path = os.path.join(self._test_dir, 'input_base')
fileio.makedirs(self._input_base_path)
# Fake previous outputs
span1_v1_split1 = os.path.join(self._input_base_path, 'span01', 'split1',
'data')
io_utils.write_string_file(span1_v1_split1, 'testing11')
span1_v1_split2 = os.path.join(self._input_base_path, 'span01', 'split2',
'data')
io_utils.write_string_file(span1_v1_split2, 'testing12')
ir_driver = driver.Driver(self._mock_metadata)
example = standard_artifacts.Examples()
# Prepare output_dic
example.uri = 'my_uri' # Will verify that this uri is not changed.
output_dic = {utils.EXAMPLES_KEY: [example]}
# Prepare output_dic exec_proterties.
exec_properties = {
utils.INPUT_BASE_KEY:
self._input_base_path,
utils.INPUT_CONFIG_KEY:
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='span{SPAN}/split2/*')
]),
preserving_proto_field_name=True),
}
result = ir_driver.run(
portable_data_types.ExecutionInfo(
output_dict=output_dic, exec_properties=exec_properties))
# Assert exec_properties' values
exec_properties = result.exec_properties
self.assertEqual(exec_properties[utils.SPAN_PROPERTY_NAME].int_value, 1)
updated_input_config = example_gen_pb2.Input()
json_format.Parse(exec_properties[utils.INPUT_CONFIG_KEY].string_value,
updated_input_config)
self.assertProtoEquals(
"""
splits {
name: "s1"
pattern: "span01/split1/*"
}
splits {
name: "s2"
pattern: "span01/split2/*"
}""", updated_input_config)
self.assertRegex(
exec_properties[utils.FINGERPRINT_PROPERTY_NAME].string_value,
r'split:s1,num_files:1,total_bytes:9,xor_checksum:.*,sum_checksum:.*\nsplit:s2,num_files:1,total_bytes:9,xor_checksum:.*,sum_checksum:.*'
)
# Assert output_artifacts' values
self.assertLen(result.output_artifacts[utils.EXAMPLES_KEY].artifacts, 1)
output_example = result.output_artifacts[utils.EXAMPLES_KEY].artifacts[0]
self.assertEqual(output_example.uri, example.uri)
self.assertEqual(
output_example.custom_properties[utils.SPAN_PROPERTY_NAME].string_value,
'1')
self.assertRegex(
output_example.custom_properties[
utils.FINGERPRINT_PROPERTY_NAME].string_value,
r'split:s1,num_files:1,total_bytes:9,xor_checksum:.*,sum_checksum:.*\nsplit:s2,num_files:1,total_bytes:9,xor_checksum:.*,sum_checksum:.*'
)
if __name__ == '__main__':
tf.test.main()
| []
| []
| [
"TEST_UNDECLARED_OUTPUTS_DIR"
]
| [] | ["TEST_UNDECLARED_OUTPUTS_DIR"] | python | 1 | 0 | |
db/Credentials.go | package db
import (
"fmt"
"os"
)
// if you run locally and does not have environment variables on your machine comment out this block
//func init() {
// err := gotenv.Load(".env.sample")
// if err != nil {
// panic(err)
// }
//}
func getSqlInfo() string {
dbname := os.Getenv("POSTGRES_DB")
user := os.Getenv("POSTGRES_DB_USER")
password := os.Getenv("POSTGRES_DB_PASS")
host := os.Getenv("POSTGRES_DB_HOST")
port := os.Getenv("POSTGRES_DB_PORT")
return fmt.Sprintf("postgres://%v:%v@%v:%v/%v?sslmode=disable",
user, password, host, port, dbname)
}
func GetApiKey() string {
return os.Getenv("API_KEY")
}
func GetTargetGrpcServer() string {
return os.Getenv("TARGET_GRPC_SERVER")
}
func GetMyGrpcServer() string {
return os.Getenv("MY_GRPC_SERVER")
}
| [
"\"POSTGRES_DB\"",
"\"POSTGRES_DB_USER\"",
"\"POSTGRES_DB_PASS\"",
"\"POSTGRES_DB_HOST\"",
"\"POSTGRES_DB_PORT\"",
"\"API_KEY\"",
"\"TARGET_GRPC_SERVER\"",
"\"MY_GRPC_SERVER\""
]
| []
| [
"POSTGRES_DB_HOST",
"API_KEY",
"TARGET_GRPC_SERVER",
"POSTGRES_DB_PASS",
"MY_GRPC_SERVER",
"POSTGRES_DB",
"POSTGRES_DB_USER",
"POSTGRES_DB_PORT"
]
| [] | ["POSTGRES_DB_HOST", "API_KEY", "TARGET_GRPC_SERVER", "POSTGRES_DB_PASS", "MY_GRPC_SERVER", "POSTGRES_DB", "POSTGRES_DB_USER", "POSTGRES_DB_PORT"] | go | 8 | 0 | |
internal/modules/deviantart/api/api_test.go | package api
import (
"io/ioutil"
"net/url"
"os"
"testing"
"time"
cloudflarebp "github.com/DaRealFreak/cloudflare-bp-go"
"github.com/DaRealFreak/watcher-go/internal/models"
implicitoauth2 "github.com/DaRealFreak/watcher-go/pkg/oauth2"
"github.com/stretchr/testify/assert"
)
// nolint: gochecknoglobals
var daAPI *DeviantartAPI
// TestMain is the constructor for the test functions to use a shared API instance
// to prevent multiple logins for every test
func TestMain(m *testing.M) {
testAccount := &models.Account{
Username: os.Getenv("DEVIANTART_USER"),
Password: os.Getenv("DEVIANTART_PASS"),
}
// initialize the shared API instance
daAPI = NewDeviantartAPI("deviantart API", testAccount)
daAPI.AddRoundTrippers()
// run the unit tests
os.Exit(m.Run())
}
func TestNewDeviantartAPI(t *testing.T) {
daAPI.useConsoleExploit = false
res, err := daAPI.request("GET", "/placebo", url.Values{})
assert.New(t).NoError(err)
assert.New(t).Equal(200, res.StatusCode)
contentAPI, err := ioutil.ReadAll(res.Body)
assert.New(t).NoError(err)
// toggle console exploit, we also require the first OAuth2 process to have succeeded
// since we require the user information cookie which is set on a successful login
daAPI.useConsoleExploit = true
res, err = daAPI.request("GET", "/placebo", url.Values{})
assert.New(t).NoError(err)
assert.New(t).Equal(200, res.StatusCode)
contentConsoleExploit, err := ioutil.ReadAll(res.Body)
assert.New(t).NoError(err)
assert.New(t).Equal(contentAPI, contentConsoleExploit)
}
func TestNewDeviantartAPIExpiredToken(t *testing.T) {
testAccount := &models.Account{
Username: os.Getenv("DEVIANTART_USER"),
Password: os.Getenv("DEVIANTART_PASS"),
}
// initialize the shared API instance
api := NewDeviantartAPI("token expiration test", testAccount)
client := api.Session.GetClient()
// apply CloudFlare bypass
client.Transport = cloudflarebp.AddCloudFlareByPass(client.Transport)
ts := &implicitoauth2.ImplicitGrantTokenSource{
Grant: NewImplicitGrantDeviantart(api.OAuth2Config, client, api.account),
}
token, err := ts.Token()
assert.New(t).NoError(err)
assert.New(t).Equal("bearer", token.TokenType)
// expire token to force a refresh
token.Expiry = time.Now().Add(-1 * time.Minute)
token, err = ts.Token()
assert.New(t).NoError(err)
assert.New(t).Equal("bearer", token.TokenType)
}
| [
"\"DEVIANTART_USER\"",
"\"DEVIANTART_PASS\"",
"\"DEVIANTART_USER\"",
"\"DEVIANTART_PASS\""
]
| []
| [
"DEVIANTART_USER",
"DEVIANTART_PASS"
]
| [] | ["DEVIANTART_USER", "DEVIANTART_PASS"] | go | 2 | 0 | |
pyppeteer/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Meta data for pyppeteer."""
import logging
import os
from appdirs import AppDirs
__author__ = """Hiroyuki Takagi"""
__email__ = '[email protected]'
__version__ = '0.0.25'
__chromium_revision__ = '674921'
__base_puppeteer_version__ = 'v1.6.0'
__pyppeteer_home__ = os.environ.get(
'PYPPETEER_HOME', AppDirs('pyppeteer').user_data_dir) # type: str
DEBUG = False
# Setup root logger
_logger = logging.getLogger('pyppeteer')
_log_handler = logging.StreamHandler()
_fmt = '[{levelname[0]}:{name}] {msg}'
_formatter = logging.Formatter(fmt=_fmt, style='{')
_log_handler.setFormatter(_formatter)
_log_handler.setLevel(logging.DEBUG)
_logger.addHandler(_log_handler)
_logger.propagate = False
from pyppeteer.launcher import connect, launch, executablePath # noqa: E402
from pyppeteer.launcher import defaultArgs # noqa: E402
version = __version__
version_info = tuple(int(i) for i in version.split('.'))
__all__ = [
'connect',
'launch',
'executablePath',
'defaultArgs',
'version',
'version_info',
]
| []
| []
| [
"PYPPETEER_HOME"
]
| [] | ["PYPPETEER_HOME"] | python | 1 | 0 | |
service/hook/parser/parse.go | // Copyright 2019 Drone IO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"errors"
"fmt"
"net/http"
"net/http/httputil"
"os"
"strconv"
"strings"
"time"
"github.com/drone/drone/core"
"github.com/drone/go-scm/scm"
)
// TODO(bradrydzewski): stash, push hook missing link
// TODO(bradrydzewski): stash, tag hook missing timestamp
// TODO(bradrydzewski): stash, tag hook missing commit message
// TODO(bradrydzewski): stash, tag hook missing link
// TODO(bradrydzewski): stash, pull request hook missing link
// TODO(bradrydzewski): stash, hooks missing repository clone http url
// TODO(bradrydzewski): stash, hooks missing repository clone ssh url
// TODO(bradrydzewski): stash, hooks missing repository html link
// TODO(bradrydzewski): gogs, push hook missing author avatar, using sender instead.
// TODO(bradrydzewski): gogs, pull request hook missing commit sha.
// TODO(bradrydzewski): gogs, tag hook missing commit sha.
// TODO(bradrydzewski): gogs, sender missing Name field.
// TODO(bradrydzewski): gogs, push hook missing repository html url
// TODO(bradrydzewski): gitea, push hook missing author avatar, using sender instead.
// TODO(bradrydzewski): gitea, tag hook missing commit sha.
// TODO(bradrydzewski): gitea, sender missing Name field.
// TODO(bradrydzewski): gitea, push hook missing repository html url
// TODO(bradrydzewski): bitbucket, pull request hook missing author email.
// TODO(bradrydzewski): bitbucket, hooks missing default repository branch.
// TODO(bradrydzewski): github, push hook timestamp is negative value.
// TODO(bradrydzewski): github, pull request message is empty
// represents a deleted ref in the github webhook.
const emptyCommit = "0000000000000000000000000000000000000000"
// this is intended for local testing and instructs the handler
// to print the contents of the hook to stdout.
var debugPrintHook = false
func init() {
debugPrintHook, _ = strconv.ParseBool(
os.Getenv("DRONE_DEBUG_DUMP_HOOK"),
)
}
// New returns a new HookParser.
func New(client *scm.Client) core.HookParser {
return &parser{client}
}
type parser struct {
client *scm.Client
}
func (p *parser) Parse(req *http.Request, secretFunc func(string) string) (*core.Hook, *core.Repository, error) {
if debugPrintHook {
// if DRONE_DEBUG_DUMP_HOOK=true print the http.Request
// headers and body to stdout.
out, _ := httputil.DumpRequest(req, true)
os.Stderr.Write(out)
}
// callback function provides the webhook parser with
// a per-repository secret key used to verify the webhook
// payload signature for authenticity.
fn := func(webhook scm.Webhook) (string, error) {
if webhook == nil {
// HACK(bradrydzewski) if the incoming webhook is nil
// we assume it is an unknown event or action. A more
// permanent fix is to update go-scm to return an
// scm.ErrUnknownAction error.
return "", scm.ErrUnknownEvent
}
repo := webhook.Repository()
slug := scm.Join(repo.Namespace, repo.Name)
secret := secretFunc(slug)
if secret == "" {
return secret, errors.New("Cannot find repository")
}
return secret, nil
}
payload, err := p.client.Webhooks.Parse(req, fn)
if err == scm.ErrUnknownEvent {
return nil, nil, nil
}
if err != nil {
return nil, nil, err
}
var repo *core.Repository
var hook *core.Hook
switch v := payload.(type) {
case *scm.PushHook:
// github sends push hooks when tags and branches are
// deleted. These hooks should be ignored.
if v.Commit.Sha == emptyCommit {
return nil, nil, nil
}
// github sends push hooks when tags are created. The
// push hook contains more information than the tag hook,
// so we choose to use the push hook for tags.
if strings.HasPrefix(v.Ref, "refs/tags/") {
hook = &core.Hook{
Trigger: core.TriggerHook, // core.TriggerHook
Event: core.EventTag,
Action: core.ActionCreate,
Link: v.Commit.Link,
Timestamp: v.Commit.Author.Date.Unix(),
Message: v.Commit.Message,
Before: v.Before,
After: v.Commit.Sha,
Source: scm.TrimRef(v.BaseRef),
Target: scm.TrimRef(v.BaseRef),
Ref: v.Ref,
Author: v.Commit.Author.Login,
AuthorName: v.Commit.Author.Name,
AuthorEmail: v.Commit.Author.Email,
AuthorAvatar: v.Commit.Author.Avatar,
Sender: v.Sender.Login,
}
} else {
hook = &core.Hook{
Trigger: core.TriggerHook, //core.TriggerHook,
Event: core.EventPush,
Link: v.Commit.Link,
Timestamp: v.Commit.Author.Date.Unix(),
Message: v.Commit.Message,
Before: v.Before,
After: v.Commit.Sha,
Ref: v.Ref,
Source: strings.TrimPrefix(v.Ref, "refs/heads/"),
Target: strings.TrimPrefix(v.Ref, "refs/heads/"),
Author: v.Commit.Author.Login,
AuthorName: v.Commit.Author.Name,
AuthorEmail: v.Commit.Author.Email,
AuthorAvatar: v.Commit.Author.Avatar,
Sender: v.Sender.Login,
}
}
repo = &core.Repository{
UID: v.Repo.ID,
Namespace: v.Repo.Namespace,
Name: v.Repo.Name,
Slug: scm.Join(v.Repo.Namespace, v.Repo.Name),
Link: v.Repo.Link,
Branch: v.Repo.Branch,
Private: v.Repo.Private,
HTTPURL: v.Repo.Clone,
SSHURL: v.Repo.CloneSSH,
}
// gogs and gitea do not include the author avatar in
// the webhook, but they do include the sender avatar.
// use the sender avatar when necessary.
if hook.AuthorAvatar == "" {
hook.AuthorAvatar = v.Sender.Avatar
}
return hook, repo, nil
case *scm.TagHook:
if v.Action != scm.ActionCreate {
return nil, nil, nil
}
// when a tag is created github sends both a push hook
// and a tag create hook. The push hook contains more
// information, so we choose to use the push hook and
// ignore the native tag hook.
if p.client.Driver == scm.DriverGithub ||
p.client.Driver == scm.DriverGitea ||
p.client.Driver == scm.DriverGitlab {
return nil, nil, nil
}
// the tag hook does not include the commit link, message
// or timestamp. In some cases it does not event include
// the sha (gogs). Note that we may need to fetch additional
// details to augment the webhook.
hook = &core.Hook{
Trigger: core.TriggerHook, // core.TriggerHook,
Event: core.EventTag,
Action: core.ActionCreate,
Link: "",
Timestamp: 0,
Message: "",
After: v.Ref.Sha,
Ref: v.Ref.Name,
Source: v.Ref.Name,
Target: v.Ref.Name,
Author: v.Sender.Login,
AuthorName: v.Sender.Name,
AuthorEmail: v.Sender.Email,
AuthorAvatar: v.Sender.Avatar,
Sender: v.Sender.Login,
}
repo = &core.Repository{
UID: v.Repo.ID,
Namespace: v.Repo.Namespace,
Name: v.Repo.Name,
Slug: scm.Join(v.Repo.Namespace, v.Repo.Name),
Link: v.Repo.Link,
Branch: v.Repo.Branch,
Private: v.Repo.Private,
HTTPURL: v.Repo.Clone,
SSHURL: v.Repo.CloneSSH,
}
// TODO(bradrydzewski) can we use scm.ExpandRef here?
if !strings.HasPrefix(hook.Ref, "refs/tags/") {
hook.Ref = fmt.Sprintf("refs/tags/%s", hook.Ref)
}
if hook.AuthorAvatar == "" {
hook.AuthorAvatar = v.Sender.Avatar
}
return hook, repo, nil
case *scm.PullRequestHook:
// TODO(bradrydzewski) cleanup the pr close hook code.
if v.Action == scm.ActionClose {
return &core.Hook{
Trigger: core.TriggerHook,
Event: core.EventPullRequest,
Action: core.ActionClose,
After: v.PullRequest.Sha,
Ref: v.PullRequest.Ref,
}, &core.Repository{
UID: v.Repo.ID,
Namespace: v.Repo.Namespace,
Name: v.Repo.Name,
Slug: scm.Join(v.Repo.Namespace, v.Repo.Name),
}, nil
}
if v.Action != scm.ActionOpen && v.Action != scm.ActionSync {
return nil, nil, nil
}
// Pull Requests are not supported for Bitbucket due
// to lack of refs (e.g. refs/pull-requests/42/from).
// Please contact Bitbucket Support if you would like to
// see this feature enabled:
// https://bitbucket.org/site/master/issues/5814/repository-refs-for-pull-requests
if p.client.Driver == scm.DriverBitbucket {
return nil, nil, nil
}
hook = &core.Hook{
Trigger: core.TriggerHook, // core.TriggerHook,
Event: core.EventPullRequest,
Action: v.Action.String(),
Link: v.PullRequest.Link,
Timestamp: v.PullRequest.Created.Unix(),
Title: v.PullRequest.Title,
Message: v.PullRequest.Body,
Before: v.PullRequest.Base.Sha,
After: v.PullRequest.Sha,
Ref: v.PullRequest.Ref,
Fork: v.PullRequest.Fork,
Source: v.PullRequest.Source,
Target: v.PullRequest.Target,
Author: v.PullRequest.Author.Login,
AuthorName: v.PullRequest.Author.Name,
AuthorEmail: v.PullRequest.Author.Email,
AuthorAvatar: v.PullRequest.Author.Avatar,
Sender: v.Sender.Login,
}
// HACK this is a workaround for github. The pull
// request title is populated, but not the message.
if hook.Message == "" {
hook.Message = hook.Title
}
repo = &core.Repository{
UID: v.Repo.ID,
Namespace: v.Repo.Namespace,
Name: v.Repo.Name,
Slug: scm.Join(v.Repo.Namespace, v.Repo.Name),
Link: v.Repo.Link,
Branch: v.Repo.Branch,
Private: v.Repo.Private,
HTTPURL: v.Repo.Clone,
SSHURL: v.Repo.CloneSSH,
}
if hook.AuthorAvatar == "" {
hook.AuthorAvatar = v.Sender.Avatar
}
return hook, repo, nil
case *scm.BranchHook:
// TODO(bradrydzewski) cleanup the branch hook code.
if v.Action == scm.ActionDelete {
return &core.Hook{
Trigger: core.TriggerHook,
Event: core.EventPush,
After: v.Ref.Sha,
Action: core.ActionDelete,
Target: scm.TrimRef(v.Ref.Name),
}, &core.Repository{
UID: v.Repo.ID,
Namespace: v.Repo.Namespace,
Name: v.Repo.Name,
Slug: scm.Join(v.Repo.Namespace, v.Repo.Name),
}, nil
}
if v.Action != scm.ActionCreate {
return nil, nil, nil
}
if p.client.Driver != scm.DriverStash {
return nil, nil, nil
}
hook = &core.Hook{
Trigger: core.TriggerHook, // core.TriggerHook,
Event: core.EventPush,
Link: "",
Timestamp: 0,
Message: "",
After: v.Ref.Sha,
Ref: v.Ref.Name,
Source: v.Ref.Name,
Target: v.Ref.Name,
Author: v.Sender.Login,
AuthorName: v.Sender.Name,
AuthorEmail: v.Sender.Email,
AuthorAvatar: v.Sender.Avatar,
Sender: v.Sender.Login,
}
repo = &core.Repository{
UID: v.Repo.ID,
Namespace: v.Repo.Namespace,
Name: v.Repo.Name,
Slug: scm.Join(v.Repo.Namespace, v.Repo.Name),
Link: v.Repo.Link,
Branch: v.Repo.Branch,
Private: v.Repo.Private,
HTTPURL: v.Repo.Clone,
SSHURL: v.Repo.CloneSSH,
}
return hook, repo, nil
case *scm.DeployHook:
hook = &core.Hook{
Trigger: core.TriggerHook,
Event: core.EventPromote,
Link: v.TargetURL,
Timestamp: time.Now().Unix(),
Message: v.Desc,
After: v.Ref.Sha,
Ref: v.Ref.Path,
Source: v.Ref.Name,
Target: v.Ref.Name,
Author: v.Sender.Login,
AuthorName: v.Sender.Name,
AuthorEmail: v.Sender.Email,
AuthorAvatar: v.Sender.Avatar,
Sender: v.Sender.Login,
Deployment: v.Target,
DeploymentID: v.Number,
Params: toMap(v.Data),
}
repo = &core.Repository{
UID: v.Repo.ID,
Namespace: v.Repo.Namespace,
Name: v.Repo.Name,
Slug: scm.Join(v.Repo.Namespace, v.Repo.Name),
Link: v.Repo.Link,
Branch: v.Repo.Branch,
Private: v.Repo.Private,
HTTPURL: v.Repo.Clone,
SSHURL: v.Repo.CloneSSH,
}
return hook, repo, nil
default:
return nil, nil, nil
}
}
func toMap(src interface{}) map[string]string {
set, ok := src.(map[string]interface{})
if !ok {
return nil
}
dst := map[string]string{}
for k, v := range set {
dst[k] = fmt.Sprint(v)
}
return dst
}
| [
"\"DRONE_DEBUG_DUMP_HOOK\""
]
| []
| [
"DRONE_DEBUG_DUMP_HOOK"
]
| [] | ["DRONE_DEBUG_DUMP_HOOK"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.