filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
backend/src/api/api.go
|
package api
import (
"errors"
"os"
"github.com/jmoiron/sqlx"
"github.com/rubenv/sql-migrate"
"gopkg.in/mgutz/dat.v1"
"gopkg.in/mgutz/dat.v1/sqlx-runner"
// Postgresql driver
_ "github.com/lib/pq"
)
//go:generate go-bindata -ignore=\.swp -pkg api db db/migrations
const (
defaultDbURL = "postgres://[email protected]:5432/coreroller?sslmode=disable&connect_timeout=10"
nowUTC = dat.UnsafeString("now() at time zone 'utc'")
)
var (
// ErrNoRowsAffected indicates that no rows were affected in an update or
// delete database operation.
ErrNoRowsAffected = errors.New("coreroller: no rows affected")
// ErrInvalidSemver indicates that the provided semver version is not valid.
ErrInvalidSemver = errors.New("coreroller: invalid semver")
)
// API represents an api instance used to interact with CoreRoller entities.
type API struct {
db *sqlx.DB
dbR *runner.DB
dbDriver string
dbURL string
}
// New creates a new API instance, creating the underlying db connection and
// applying db migrations available.
func New(options ...func(*API) error) (*API, error) {
api := &API{
dbDriver: "postgres",
dbURL: os.Getenv("COREROLLER_DB_URL"),
}
if api.dbURL == "" {
api.dbURL = defaultDbURL
}
var err error
api.db, err = sqlx.Open(api.dbDriver, api.dbURL)
if err != nil {
return nil, err
}
if err := api.db.Ping(); err != nil {
return nil, err
}
dat.EnableInterpolation = true
api.dbR = runner.NewDBFromSqlx(api.db)
for _, option := range options {
err := option(api)
if err != nil {
return nil, err
}
}
migrate.SetTable("database_migrations")
migrations := &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: "db/migrations",
}
if _, err := migrate.Exec(api.db.DB, "postgres", migrations, migrate.Up); err != nil {
return nil, err
}
return api, nil
}
// OptionInitDB will initialize the database during the API instance creation,
// dropping all existing tables, which will force all migration scripts to be
// re-executed. Use with caution, this will DESTROY ALL YOUR DATA.
func OptionInitDB(api *API) error {
sqlFile, err := Asset("db/drop_all_tables.sql")
if err != nil {
return err
}
if _, err := api.db.Exec(string(sqlFile)); err != nil {
return err
}
return nil
}
// Close releases the connections to the database.
func (api *API) Close() {
_ = api.db.DB.Close()
}
|
[
"\"COREROLLER_DB_URL\""
] |
[] |
[
"COREROLLER_DB_URL"
] |
[]
|
["COREROLLER_DB_URL"]
|
go
| 1 | 0 | |
backend/backend/settings/production.py
|
import os
from .base import *
SECRET_KEY = os.environ.get("SECRET_KEY")
DEBUG = False
ALLOWED_HOSTS = ['0.0.0.0', os.environ.get("PRODUCTION_HOST")]
# White Noise configuration - http://whitenoise.evans.io/en/stable/django.html
INSTALLED_APPS.extend(["whitenoise.runserver_nostatic"])
# Must insert after SecurityMiddleware, which is first in settings/common.py
MIDDLEWARE.insert(1, "whitenoise.middleware.WhiteNoiseMiddleware")
TEMPLATES[0]["DIRS"] = [os.path.join(BASE_DIR, "../", "frontend", "build")]
STATICFILES_DIRS = [os.path.join(BASE_DIR, "../", "frontend", "build", "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATIC_URL = "/static/"
WHITENOISE_ROOT = os.path.join(BASE_DIR, "../", "frontend", "build", "root")
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
|
[] |
[] |
[
"SECRET_KEY",
"PRODUCTION_HOST"
] |
[]
|
["SECRET_KEY", "PRODUCTION_HOST"]
|
python
| 2 | 0 | |
python/pyspark/rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return h
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1] not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(3)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
try:
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY_SER}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY_SER)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY_SER}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == "true")
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = self._can_spill()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
@ignore_unicode_prefix
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
"""
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip(b'\n').decode('utf-8') for x in iter(pipe.stdout.readline, b''))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative and commutative function and
a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
return self.mapPartitions(func).fold(zeroValue, combOp)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element(where n = # buckets).
Buckets must be sorted and not contain any duplicates, must be
at least two elements.
If `buckets` is a number, it will generates buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given buckets
as 2, the resulting buckets will be [0,50) [50,100]. buckets must
be at least 1 If the RDD contains infinity, NaN throws an exception
If the elements in RDD do not vary (max == min) always returns
a single bucket.
It will return an tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p, True)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
spill = self._can_spill()
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions)
def _can_spill(self):
return self.ctx._conf.get("spark.shuffle.spill", "True").lower() == "true"
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
spill = self._can_spill()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)\
if spill else InMemoryMerger(agg)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)], False)
return values.collect()
def _to_java_object_rdd(self):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available
<a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
for partition in range(self.getNumPartitions()):
rows = self.context.runJob(self, lambda x: x, [partition])
for row in rows:
yield row
def _prepare_for_python_RDD(sc, command, obj=None):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
# There is a bug in py4j.java_gateway.JavaClass with auto_convert
# https://github.com/bartdag/py4j/issues/161
# TODO: use auto_convert once py4j fix the bug
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
command = (self.func, profiler, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_cmd, bvars, env, includes = _prepare_for_python_RDD(self.ctx, command, self)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_cmd),
env, includes, self.preservesPartitioning,
self.ctx.pythonExec, self.ctx.pythonVer,
bvars, self.ctx._javaAccumulator)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
bizhi/apps/usercenter/views.py
|
from django.shortcuts import render
from django.views.generic import View
from django.contrib.auth.mixins import LoginRequiredMixin
import logging
logger = logging.getLogger("account")
class Edit_account(LoginRequiredMixin, View):
def get(self, request):
return render(request, "edit-account.html")
def post(self, request):
ret_info = {"code": 200, "msg": "修改成功"}
try:
if request.POST.get("username"):
request.user.username = request.POST.get("username")
if request.POST.get("mobile"):
print('change mobile')
request.user.mobile = request.POST.get("mobile")
if request.POST.get("qq"):
request.user.qq = request.POST.get("qq")
if request.POST.get("email"):
request.user.email = request.POST.get("email")
request.user.save()
except Exception as ex:
ret_info = {"code": 200, "msg": "修改失败"}
print(ret_info)
return render(request, "edit-account.html", {"ret_info":ret_info})
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
vendor/github.com/buildpack/lifecycle/launcher.go
|
package lifecycle
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
)
type Launcher struct {
DefaultProcessType string
DefaultLaunchDir string
Processes []Process
Buildpacks []string
Exec func(argv0 string, argv []string, envv []string) error
}
func (l *Launcher) Launch(executable, startCommand string) error {
env := &Env{
Getenv: os.Getenv,
Setenv: os.Setenv,
Environ: os.Environ,
Map: POSIXLaunchEnv,
}
if err := l.eachDir(l.DefaultLaunchDir, func(bp string) error {
if bp == "app" {
return nil
}
bpPath := filepath.Join(l.DefaultLaunchDir, bp)
return l.eachDir(bpPath, func(layer string) error {
return env.AddRootDir(filepath.Join(bpPath, layer))
})
}); err != nil {
return errors.Wrap(err, "modify env")
}
if err := os.Chdir(filepath.Join(l.DefaultLaunchDir, "app")); err != nil {
return errors.Wrap(err, "change to app directory")
}
startCommand, err := l.processFor(startCommand)
if err != nil {
return errors.Wrap(err, "determine start command")
}
launcher, err := l.profileD()
if err != nil {
return errors.Wrap(err, "determine profile")
}
if err := l.Exec("/bin/bash", []string{
"bash", "-c",
launcher, executable,
startCommand,
}, os.Environ()); err != nil {
return errors.Wrap(err, "exec")
}
return nil
}
func (l *Launcher) profileD() (string, error) {
var out []string
appendIfFile := func(path string) error {
fi, err := os.Stat(path)
if os.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
if !fi.IsDir() {
out = append(out, fmt.Sprintf(`source "%s"`, path))
}
return nil
}
for _, bp := range l.Buildpacks {
scripts, err := filepath.Glob(filepath.Join(l.DefaultLaunchDir, bp, "*", "profile.d", "*"))
if err != nil {
return "", err
}
for _, script := range scripts {
if err := appendIfFile(script); err != nil {
return "", err
}
}
}
if err := appendIfFile(filepath.Join(l.DefaultLaunchDir, "app", ".profile")); err != nil {
return "", err
}
out = append(out, `exec bash -c "$@"`)
return strings.Join(out, "\n"), nil
}
func (l *Launcher) processFor(cmd string) (string, error) {
if cmd == "" {
if process, ok := l.findProcessType(l.DefaultProcessType); ok {
return process, nil
}
return "", fmt.Errorf("process type %s was not found", l.DefaultProcessType)
}
if process, ok := l.findProcessType(cmd); ok {
return process, nil
}
return cmd, nil
}
func (l *Launcher) findProcessType(kind string) (string, bool) {
for _, p := range l.Processes {
if p.Type == kind {
return p.Command, true
}
}
return "", false
}
func (*Launcher) eachDir(dir string, fn func(file string) error) error {
files, err := ioutil.ReadDir(dir)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
for _, f := range files {
if !f.IsDir() {
continue
}
if err := fn(f.Name()); err != nil {
return err
}
}
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
app/main.py
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main request handler. All dynamic requests except for remote_api are
handled by this handler, which dispatches to all other dynamic handlers."""
import django_setup # always keep this first
import mimetypes
import re
import os
import urlparse
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import webapp
import config
import const
import django.utils.html
import logging
import model
import pfif
import resources
import utils
import user_agents
import setup_pf
# When no action or repo is specified, redirect to this action.
HOME_ACTION = 'home.html'
# Map of URL actions to Python module and class names.
# TODO(kpy): Remove the need for this configuration information, either by
# regularizing the module and class names or adding a URL attribute to handlers.
HANDLER_CLASSES = dict((x, x.replace('/', '_') + '.Handler') for x in [
'start',
'amp_start',
'query',
'results',
'create',
'view',
'multiview',
'reveal',
'photo',
'embed',
'extend',
'gadget',
'delete',
'flag_note',
'restore',
'subscribe',
'unsubscribe',
'disable_notes',
'confirm_disable_notes',
'enable_notes',
'confirm_enable_notes',
'post_flagged_note',
'confirm_post_flagged_note',
'third_party_search',
'admin',
'admin/create_repo',
'admin/dashboard',
'admin/resources',
'admin/review',
'css',
'add_note',
'tos',
])
# Exceptional cases where the module name doesn't match the URL.
HANDLER_CLASSES[''] = 'start.Handler'
HANDLER_CLASSES['admin/api_keys'] = 'admin_api_keys.CreateOrUpdateApiKey'
HANDLER_CLASSES['admin/api_keys/list'] = 'admin_api_keys.ListApiKeys'
HANDLER_CLASSES['api/import'] = 'api.Import'
HANDLER_CLASSES['api/import/notes'] = 'api.Import'
HANDLER_CLASSES['api/import/persons'] = 'api.Import'
HANDLER_CLASSES['api/read'] = 'api.Read'
HANDLER_CLASSES['api/write'] = 'api.Write'
HANDLER_CLASSES['api/search'] = 'api.Search'
HANDLER_CLASSES['api/subscribe'] = 'api.Subscribe'
HANDLER_CLASSES['api/unsubscribe'] = 'api.Unsubscribe'
HANDLER_CLASSES['api/stats'] = 'api.Stats'
HANDLER_CLASSES['api/handle_sms'] = 'api.HandleSMS'
HANDLER_CLASSES['api/photo_upload'] = 'api.PhotoUpload'
HANDLER_CLASSES['feeds/repo'] = 'feeds.Repo'
HANDLER_CLASSES['feeds/note'] = 'feeds.Note'
HANDLER_CLASSES['feeds/person'] = 'feeds.Person'
HANDLER_CLASSES['tasks/count/note'] = 'tasks.CountNote'
HANDLER_CLASSES['tasks/count/person'] = 'tasks.CountPerson'
HANDLER_CLASSES['tasks/count/reindex'] = 'tasks.Reindex'
HANDLER_CLASSES['tasks/count/update_dead_status'] = 'tasks.UpdateDeadStatus'
HANDLER_CLASSES['tasks/count/update_status'] = 'tasks.UpdateStatus'
HANDLER_CLASSES['tasks/delete_expired'] = 'tasks.DeleteExpired'
HANDLER_CLASSES['tasks/delete_old'] = 'tasks.DeleteOld'
HANDLER_CLASSES['tasks/dump_csv'] = 'tasks.DumpCSV'
HANDLER_CLASSES['tasks/clean_up_in_test_mode'] = 'tasks.CleanUpInTestMode'
HANDLER_CLASSES['tasks/notify_many_unreviewed_notes'] = 'tasks.NotifyManyUnreviewedNotes'
HANDLER_CLASSES['tasks/thumbnail_preparer'] = 'tasks.ThumbnailPreparer'
if config.get('enable_react_ui'):
HANDLER_CLASSES['d/create'] = 'frontend_api.Create'
HANDLER_CLASSES['d/person'] = 'frontend_api.Person'
HANDLER_CLASSES['d/repo'] = 'frontend_api.Repo'
HANDLER_CLASSES['d/results'] = 'frontend_api.Results'
NON_REACT_UI_PATHS = ['api/', 'admin/', 'feeds/', 'sitemap', 'tasks/', 'd/']
def is_development_server():
"""Returns True if the app is running in development."""
server = os.environ.get('SERVER_SOFTWARE', '')
return 'Development' in server
def is_cron_task(request):
"""Returns True if the request is from appengine cron."""
return 'X-AppEngine-Cron' in request.headers
def is_task_queue_task(request):
"""Returns True if the request is from the appengine task queue."""
return 'X-AppEngine-TaskName' in request.headers
def get_repo_and_action(request):
"""Determines the repo and action for a request. The action is the part
of the URL path after the repo, with no leading or trailing slashes."""
scheme, netloc, path, _, _ = urlparse.urlsplit(request.url)
parts = path.lstrip('/').split('/')
# Depending on whether we're serving from appspot directly or
# google.org/personfinder we could have /global or /personfinder/global
# as the 'global' prefix.
if parts[0] == 'personfinder':
parts.pop(0)
repo = parts and parts.pop(0) or None
action = '/'.join(parts)
if repo == 'global':
repo = None
return repo, action
def select_charset(request):
"""Given a request, chooses a charset for encoding the response.
If the selected charset is UTF-8, it always returns
'utf-8' (const.CHARSET_UTF8), not 'utf8', 'UTF-8', etc.
For now, we always use UTF-8, because supporting anything else with WebOB
1.2.3 and webapp2 is impractical. We might revisit this once we migrate to
Django, with which it shouldn't be so difficult to support other character
sets.
"""
return const.CHARSET_UTF8
def select_lang(request, config=None):
"""Selects the best language to use for a given request. The 'lang' query
parameter has priority, then the django_language cookie, then
'Accept-Language' HTTP header, then the first language in the language menu,
then the default setting."""
default_lang = (
(config and
config.language_menu_options and
config.language_menu_options[0]) or
const.DEFAULT_LANGUAGE_CODE)
lang = (request.get('lang') or
request.cookies.get('django_language', None) or
select_lang_from_header(request, default_lang=default_lang))
lang = re.sub('[^A-Za-z0-9-]', '', lang)
lang = const.LANGUAGE_SYNONYMS.get(lang, lang)
if lang in const.LANGUAGE_ENDONYMS.keys():
return lang
else:
return default_lang
def select_lang_from_header(request, default_lang):
"""Selects the best language matching 'Accept-Language' HTTP header."""
# Either of the first item in the first argument or the default_match
# argument is used as the default depending on the situation. So we need to
# put the default language to both. See:
# https://docs.pylonsproject.org/projects/webob/en/stable/api/webob.html#webob.acceptparse.AcceptLanguageValidHeader.best_match
# https://docs.pylonsproject.org/projects/webob/en/stable/api/webob.html#webob.acceptparse.AcceptLanguageNoHeader.best_match
return request.accept_language.best_match(
[default_lang] + const.LANGUAGE_ENDONYMS.keys(),
default_match=default_lang)
def get_repo_options(request, lang):
"""Returns a list of the names and titles of the launched repositories."""
options = []
for repo in model.Repo.list_launched():
titles = config.get_for_repo(repo, 'repo_titles', {})
default_title = (titles.values() or ['?'])[0]
title = titles.get(lang, titles.get('en', default_title))
url = utils.get_repo_url(request, repo)
test_mode = config.get_for_repo(repo, 'test_mode')
options.append(utils.Struct(repo=repo, title=title, url=url,
test_mode=test_mode))
return options
def get_language_options(request, config, current_lang):
"""Returns a list of information needed to generate the language menu."""
primary_langs = (config and config.language_menu_options) or ['en']
all_langs = sorted(
const.LANGUAGE_ENDONYMS.keys(),
key=lambda s: const.LANGUAGE_ENDONYMS[s])
return {
'primary':
[get_language_option(request, lang, lang == current_lang)
for lang in primary_langs],
'all':
# We put both 'primary' and 'all' languages into a single <select>
# box (See app/resources/language-menu.html.template).
# If current_lang is in the primary languages, we mark the
# language as is_selected in 'primary', not in 'all', to make sure
# a single option is selected in the <select> box.
[get_language_option(
request, lang,
lang == current_lang and lang not in primary_langs)
for lang in all_langs],
}
def get_language_option(request, lang, is_selected):
return {
'lang': lang,
'endonym': const.LANGUAGE_ENDONYMS.get(lang, '?'),
'url': utils.set_url_param(request.url, 'lang', lang),
'is_selected': is_selected,
}
def get_localized_message(localized_messages, lang, default):
"""Gets the localized message for lang from a dictionary that maps language
codes to localized messages. Falls back to English if language 'lang' is
not available, or to a default message if English is not available."""
if not isinstance(localized_messages, dict):
return default
return localized_messages.get(lang, localized_messages.get('en', default))
def get_hidden_input_tags_for_preserved_query_params(request):
"""Gets HTML with <input type="hidden"> tags to preserve query parameters
listed in utils.PRESERVED_QUERY_PARAM_NAMES e.g. "ui"."""
tags_str = ''
for name in utils.PRESERVED_QUERY_PARAM_NAMES:
value = request.get(name)
if value:
tags_str += '<input type="hidden" name="%s" value="%s">\n' % (
django.utils.html.escape(name),
django.utils.html.escape(value))
return tags_str
def setup_env(request):
"""Constructs the 'env' object, which contains various template variables
that are commonly used by most handlers."""
env = utils.Struct()
env.repo, env.action = get_repo_and_action(request)
env.config = config.Configuration(env.repo or '*')
env.analytics_id = env.config.get('analytics_id')
env.amp_gtm_id = env.config.get('amp_gtm_id')
env.maps_api_key = env.config.get('maps_api_key')
# Internationalization-related stuff.
env.charset = select_charset(request)
env.lang = select_lang(request, env.config)
env.rtl = env.lang in const.LANGUAGES_BIDI
# Determine the resource bundle to use.
env.default_resource_bundle = env.config.get('default_resource_bundle', '1')
env.resource_bundle = (request.cookies.get('resource_bundle', '') or
env.default_resource_bundle)
# Information about the request.
env.url = utils.set_url_param(request.url, 'lang', env.lang)
env.scheme, env.netloc, env.path, _, _ = urlparse.urlsplit(request.url)
env.force_https = True
env.domain = env.netloc.split(':')[0]
env.global_url = utils.get_repo_url(request, 'global')
# Commonly used information that's rendered or localized for templates.
env.language_options = get_language_options(request, env.config, env.lang)
env.repo_options = get_repo_options(request, env.lang)
env.expiry_options = [
utils.Struct(value=value, text=const.PERSON_EXPIRY_TEXT[value])
for value in sorted(const.PERSON_EXPIRY_TEXT.keys(), key=int)
]
env.status_options = [
utils.Struct(value=value, text=const.NOTE_STATUS_TEXT[value])
for value in pfif.NOTE_STATUS_VALUES
if (value != 'believed_dead' or
not env.config or env.config.allow_believed_dead_via_ui)
]
env.hidden_input_tags_for_preserved_query_params = (
get_hidden_input_tags_for_preserved_query_params(request))
ui_param = request.get('ui', '').strip().lower()
# Interprets "small" and "style" parameters for backward compatibility.
# TODO(ichikawa): Delete these in near future when we decide to drop
# support of these parameters.
small_param = request.get('small', '').strip().lower()
style_param = request.get('style', '').strip().lower()
if not ui_param and small_param == 'yes':
ui_param = 'small'
elif not ui_param and style_param:
ui_param = style_param
if ui_param:
env.ui = ui_param
elif user_agents.is_jp_tier2_mobile_phone(request):
env.ui = 'light'
else:
env.ui = 'default'
# UI configurations.
#
# Enables features which require JavaScript.
env.enable_javascript = True
# Enables operations which requires Captcha.
env.enable_captcha = True
# Enables photo upload.
env.enable_photo_upload = True
# Enables to flag/unflag notes as spam, and to reveal spam notes.
env.enable_spam_ops = True
# Enables duplicate marking mode.
env.enable_dup_mode = True
# Shows a logo on top of the page.
env.show_logo = True
# Shows language menu.
env.show_language_menu = True
# Uses short labels for buttons.
env.use_short_buttons = False
# Optional "target" attribute for links to non-small pages.
env.target_attr = ''
# Shows record IDs in the results page.
env.show_record_ids_in_results = True
# Shows non AMP HTML pages by default.
env.amp = False
if env.ui == 'small':
env.show_logo = False
env.target_attr = ' target="_blank" '
elif env.ui == 'light':
# Disables features which requires JavaScript. Some feature phones
# doesn't support JavaScript.
env.enable_javascript = False
# Disables operations which requires Captcha because Captcha requires
# JavaScript.
env.enable_captcha = False
# Uploading is often not supported in feature phones.
env.enable_photo_upload = False
# Disables spam operations because it requires JavaScript and
# supporting more pages on ui=light.
env.enable_spam_ops = False
# Disables duplicate marking mode because it doesn't support
# small screens and it requires JavaScript.
env.enable_dup_mode = False
# Hides the logo on the top to save the space. Also, the logo links
# to the global page which doesn't support small screens.
env.show_logo = False
# Hides language menu because the menu in the current position is
# annoying in feature phones.
# TODO(ichikawa): Consider layout of the language menu.
env.show_language_menu = False
# Too long buttons are not fully shown in some feature phones.
env.use_short_buttons = True
# To make it simple.
env.show_record_ids_in_results = False
env.back_chevron = u'\xab'
back_chevron_in_charset = True
try:
env.back_chevron.encode(env.charset)
except UnicodeEncodeError:
# u'\xab' is not in the charset (e.g. Shift_JIS).
back_chevron_in_charset = False
if not back_chevron_in_charset or env.ui == 'light':
# Use ASCII characters on ui=light too because some feature phones
# support UTF-8 but don't render UTF-8 symbols such as u'\xab'.
env.back_chevron = u'<<'
env.enable_maps = (
env.enable_javascript
and not env.config.zero_rating_mode
and env.maps_api_key)
env.enable_analytics = (
env.enable_javascript
and not env.config.zero_rating_mode
and env.analytics_id)
env.enable_translate = (
env.enable_javascript
and not env.config.zero_rating_mode
and env.config.translate_api_key)
# Repo-specific information.
if env.repo:
# repo_url is the root URL for the repository.
env.repo_url = utils.get_repo_url(request, env.repo)
# start_url is like repo_url but preserves parameters such as 'ui'.
env.start_url = utils.get_url(request, env.repo, '')
# URL of the link in the heading. The link on ui=small links to the
# normal UI.
env.repo_title_url = (
env.repo_url if env.ui == 'small' else env.start_url)
# URL to force default UI. Note that we show ui=light version in some
# user agents when ui parameter is not specified.
env.default_ui_url = utils.get_url(request, env.repo, '', ui='default')
env.repo_path = urlparse.urlsplit(env.repo_url)[2]
env.repo_title = get_localized_message(
env.config.repo_titles, env.lang, '?')
env.start_page_custom_html = get_localized_message(
env.config.start_page_custom_htmls, env.lang, '')
env.results_page_custom_html = get_localized_message(
env.config.results_page_custom_htmls, env.lang, '')
env.view_page_custom_html = get_localized_message(
env.config.view_page_custom_htmls, env.lang, '')
env.seek_query_form_custom_html = get_localized_message(
env.config.seek_query_form_custom_htmls, env.lang, '')
env.footer_custom_html = get_localized_message(
env.config.footer_custom_htmls, env.lang, '')
# If the repository is deactivated, we should not show test mode
# notification.
env.repo_test_mode = (
env.config.test_mode and not env.config.deactivated)
env.force_https = env.config.force_https
env.params_full_name = request.get('full_name', '').strip()
if not env.params_full_name:
# Preformat the name from 'given_name' and 'family_name' parameters.
given_name = request.get('given_name', '').strip()
family_name = request.get('family_name', '').strip()
env.params_full_name = utils.get_full_name(
given_name, family_name, env.config)
return env
def flush_caches(*keywords):
"""Flushes the specified set of caches. Pass '*' to flush everything."""
if '*' in keywords or 'resource' in keywords:
resources.clear_caches()
if '*' in keywords or 'memcache' in keywords:
memcache.flush_all()
if '*' in keywords or 'config' in keywords:
config.cache.flush()
for keyword in keywords:
if keyword.startswith('config/'):
config.cache.delete(keyword[7:])
class Main(webapp.RequestHandler):
"""The main request handler. All dynamic requests except for remote_api are
handled by this handler, which dispatches to all other dynamic handlers."""
def initialize(self, request, response):
webapp.RequestHandler.initialize(self, request, response)
# If requested, set the clock before doing anything clock-related.
# Only works on localhost for testing. Specify ?utcnow=1293840000 to
# set the clock to 2011-01-01, or ?utcnow=real to revert to real time.
utcnow = request.get('utcnow')
if request.remote_addr == '127.0.0.1' and utcnow:
if utcnow == 'real':
utils.set_utcnow_for_test(None)
else:
utils.set_utcnow_for_test(float(utcnow))
# If requested, flush caches before we touch anything that uses them.
# This is used for certain tests.
if utils.is_dev_app_server():
flush_caches(*request.get('flush', '').split(','))
# Gather commonly used information into self.env.
self.env = setup_env(request)
# Force a redirect if requested, except where https is not supported:
# - for cron jobs
# - for task queue jobs
# - in development
if (self.env.force_https and self.env.scheme == 'http'
and not is_cron_task(self.request)
and not is_task_queue_task(self.request)
and not is_development_server()):
self.redirect(self.env.url.replace('http:', 'https:'))
# Activate the selected language.
response.headers['Content-Language'] = self.env.lang
response.headers['Set-Cookie'] = \
'django_language=%s; path=/' % self.env.lang
django_setup.activate(self.env.lang)
# Activate the appropriate resource bundle.
resources.set_active_bundle_name(self.env.resource_bundle)
def should_serve_react_ui(self):
for path_prefix in NON_REACT_UI_PATHS:
if self.env.action.startswith(path_prefix):
return False
return True
def set_content_security_policy(self):
"""Sets the CSP in the headers. Returns the nonce to use for scripts."""
csp_nonce = utils.generate_random_key(20)
csp_value = (
'object-src \'none\'; '
'script-src \'nonce-%s\' \'unsafe-inline\' '
'\'strict-dynamic\' https: http:; '
'base-uri \'none\';'
) % csp_nonce
self.response.headers['Content-Security-Policy'] = csp_value
return csp_nonce
def serve(self):
request, response, env = self.request, self.response, self.env
# If the Person Finder instance has not been initialized yet,
# prepend to any served page a warning and a link to the admin
# page where the datastore can be initialized.
if not env.config.get('initialized'):
if request.get('operation') == 'setup_datastore':
setup_pf.setup_datastore()
self.redirect(env.global_url + '/')
return
else:
get_vars = lambda: {'env': env}
content = resources.get_rendered('setup_datastore.html', env.lang,
(env.repo, env.charset), get_vars)
response.out.write(content)
if env.config.get('enable_react_ui'):
# TODO(nworden): serve static files from /global/static
if env.repo == 'static':
self.serve_static_content(self.env.action)
elif self.should_serve_react_ui():
csp_nonce = self.set_content_security_policy()
response.out.write(
resources.get_rendered(
'react_index.html', env.lang,
get_vars=lambda: {'env': env, 'csp_nonce': csp_nonce}))
return
if not env.action and not env.repo:
# A request for the root path ('/'). Renders the home page.
self.serve_static_content(HOME_ACTION)
elif env.action in HANDLER_CLASSES:
# Dispatch to the handler for the specified action.
module_name, class_name = HANDLER_CLASSES[env.action].split('.')
handler = getattr(__import__(module_name), class_name)(
request, response, env)
getattr(handler, request.method.lower())() # get() or post()
elif env.action.endswith('.template'):
# Don't serve template source code.
response.set_status(404)
response.out.write('Not found')
else:
self.serve_static_content(self.env.action)
def serve_static_content(self, resource_name):
"""Serve a static page or file in app/resources/static directory."""
response, env = self.response, self.env
env.robots_ok = True
get_vars = lambda: {'env': env, 'config': env.config}
content = resources.get_rendered(
'static/%s' % resource_name,
env.lang,
(env.repo, env.charset),
get_vars)
if content is None:
response.set_status(404)
response.out.write('Not found')
else:
content_type, encoding = mimetypes.guess_type(resource_name)
response.headers['Content-Type'] = (
(content_type or 'text/plain') +
('; charset=%s' % encoding if encoding else ''))
response.out.write(content)
def get(self):
self.serve()
def post(self):
self.serve()
def head(self):
self.request.method = 'GET'
self.serve()
self.response.clear()
if __name__ == '__main__':
webapp.util.run_wsgi_app(webapp.WSGIApplication([('.*', Main)]))
|
[] |
[] |
[
"SERVER_SOFTWARE"
] |
[]
|
["SERVER_SOFTWARE"]
|
python
| 1 | 0 | |
secretmanager/secretmanager_test.go
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package secretmanager
import (
"bytes"
"context"
"fmt"
"os"
"reflect"
"strings"
"testing"
secretmanager "cloud.google.com/go/secretmanager/apiv1"
"github.com/GoogleCloudPlatform/golang-samples/internal/testutil"
"github.com/gofrs/uuid"
secretmanagerpb "google.golang.org/genproto/googleapis/cloud/secretmanager/v1"
grpccodes "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
)
func testClient(tb testing.TB) (*secretmanager.Client, context.Context) {
tb.Helper()
ctx := context.Background()
client, err := secretmanager.NewClient(ctx)
if err != nil {
tb.Fatalf("testClient: failed to create client: %v", err)
}
return client, ctx
}
func testName(tb testing.TB) string {
tb.Helper()
u, err := uuid.NewV4()
if err != nil {
tb.Fatalf("testName: failed to generate uuid: %v", err)
}
return u.String()
}
func testSecret(tb testing.TB, projectID string) *secretmanagerpb.Secret {
tb.Helper()
secretID := testName(tb)
client, ctx := testClient(tb)
secret, err := client.CreateSecret(ctx, &secretmanagerpb.CreateSecretRequest{
Parent: fmt.Sprintf("projects/%s", projectID),
SecretId: secretID,
Secret: &secretmanagerpb.Secret{
Replication: &secretmanagerpb.Replication{
Replication: &secretmanagerpb.Replication_Automatic_{
Automatic: &secretmanagerpb.Replication_Automatic{},
},
},
},
})
if err != nil {
tb.Fatalf("testSecret: failed to create secret: %v", err)
}
return secret
}
func testSecretVersion(tb testing.TB, parent string, payload []byte) *secretmanagerpb.SecretVersion {
tb.Helper()
client, ctx := testClient(tb)
version, err := client.AddSecretVersion(ctx, &secretmanagerpb.AddSecretVersionRequest{
Parent: parent,
Payload: &secretmanagerpb.SecretPayload{
Data: payload,
},
})
if err != nil {
tb.Fatalf("testSecretVersion: failed to create secret version: %v", err)
}
return version
}
func testCleanupSecret(tb testing.TB, name string) {
tb.Helper()
client, ctx := testClient(tb)
if err := client.DeleteSecret(ctx, &secretmanagerpb.DeleteSecretRequest{
Name: name,
}); err != nil {
if terr, ok := grpcstatus.FromError(err); !ok || terr.Code() != grpccodes.NotFound {
tb.Fatalf("testCleanupSecret: failed to delete secret: %v", err)
}
}
}
func testIamUser(tb testing.TB) string {
tb.Helper()
v := os.Getenv("GOLANG_SAMPLES_SERVICE_ACCOUNT_EMAIL")
if v == "" {
tb.Skip("testIamUser: missing GOLANG_SAMPLES_SERVICE_ACCOUNT_EMAIL")
}
return fmt.Sprintf("serviceAccount:%s", v)
}
func TestAccessSecretVersion(t *testing.T) {
tc := testutil.SystemTest(t)
payload := []byte("my-secret")
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
version := testSecretVersion(t, secret.Name, payload)
var b bytes.Buffer
if err := accessSecretVersion(&b, version.Name); err != nil {
t.Fatal(err)
}
if got, want := b.String(), string(payload); !strings.Contains(got, want) {
t.Errorf("accessSecretVersion: expected %q to contain %q", got, want)
}
}
func TestAddSecretVersion(t *testing.T) {
tc := testutil.SystemTest(t)
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
var b bytes.Buffer
if err := addSecretVersion(&b, secret.Name); err != nil {
t.Fatal(err)
}
if got, want := b.String(), "Added secret version:"; !strings.Contains(got, want) {
t.Errorf("addSecretVersion: expected %q to contain %q", got, want)
}
}
func TestConsumeEventNotification(t *testing.T) {
v, err := ConsumeEventNotification(context.Background(), PubSubMessage{
Attributes: PubSubAttributes{
SecretId: "projects/p/secrets/s",
EventType: "SECRET_UPDATE",
},
Data: []byte("hello!"),
})
if err != nil {
t.Fatal(err)
}
if got, want := v, `Received SECRET_UPDATE for projects/p/secrets/s. New metadata: "hello!".`; !strings.Contains(got, want) {
t.Errorf("consumeEventNotification: expected %q to contain %q", got, want)
}
}
func TestCreateSecret(t *testing.T) {
tc := testutil.SystemTest(t)
secretID := "createSecret"
parent := fmt.Sprintf("projects/%s", tc.ProjectID)
defer testCleanupSecret(t, fmt.Sprintf("projects/%s/secrets/%s", tc.ProjectID, secretID))
var b bytes.Buffer
if err := createSecret(&b, parent, secretID); err != nil {
t.Fatal(err)
}
if got, want := b.String(), "Created secret:"; !strings.Contains(got, want) {
t.Errorf("createSecret: expected %q to contain %q", got, want)
}
}
func TestDeleteSecret(t *testing.T) {
tc := testutil.SystemTest(t)
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
if err := deleteSecret(secret.Name); err != nil {
t.Fatal(err)
}
client, ctx := testClient(t)
_, err := client.GetSecret(ctx, &secretmanagerpb.GetSecretRequest{
Name: secret.Name,
})
if terr, ok := grpcstatus.FromError(err); !ok || terr.Code() != grpccodes.NotFound {
t.Errorf("deleteSecret: expected %v to be not found", err)
}
}
func TestDeleteSecretWithEtag(t *testing.T) {
tc := testutil.SystemTest(t)
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
if err := deleteSecretWithEtag(secret.Name, secret.Etag); err != nil {
t.Fatal(err)
}
client, ctx := testClient(t)
_, err := client.GetSecret(ctx, &secretmanagerpb.GetSecretRequest{
Name: secret.Name,
})
if terr, ok := grpcstatus.FromError(err); !ok || terr.Code() != grpccodes.NotFound {
t.Errorf("deleteSecret: expected %v to be not found", err)
}
}
func TestDestroySecretVersion(t *testing.T) {
tc := testutil.SystemTest(t)
payload := []byte("my-secret")
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
version := testSecretVersion(t, secret.Name, payload)
if err := destroySecretVersion(version.Name); err != nil {
t.Fatal(err)
}
client, ctx := testClient(t)
v, err := client.GetSecretVersion(ctx, &secretmanagerpb.GetSecretVersionRequest{
Name: version.Name,
})
if err != nil {
t.Fatal(err)
}
if got, want := v.State, secretmanagerpb.SecretVersion_DESTROYED; got != want {
t.Errorf("testSecretVersion: expected %v to be %v", got, want)
}
}
func TestDestroySecretVersionWithEtag(t *testing.T) {
tc := testutil.SystemTest(t)
payload := []byte("my-secret")
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
version := testSecretVersion(t, secret.Name, payload)
if err := destroySecretVersionWithEtag(version.Name, version.Etag); err != nil {
t.Fatal(err)
}
client, ctx := testClient(t)
v, err := client.GetSecretVersion(ctx, &secretmanagerpb.GetSecretVersionRequest{
Name: version.Name,
})
if err != nil {
t.Fatal(err)
}
if got, want := v.State, secretmanagerpb.SecretVersion_DESTROYED; got != want {
t.Errorf("testSecretVersion: expected %v to be %v", got, want)
}
}
func TestDisableEnableSecretVersion(t *testing.T) {
tc := testutil.SystemTest(t)
payload := []byte("my-secret")
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
version := testSecretVersion(t, secret.Name, payload)
if err := disableSecretVersion(version.Name); err != nil {
t.Fatal(err)
}
client, ctx := testClient(t)
v, err := client.GetSecretVersion(ctx, &secretmanagerpb.GetSecretVersionRequest{
Name: version.Name,
})
if err != nil {
t.Fatal(err)
}
if got, want := v.State, secretmanagerpb.SecretVersion_DISABLED; got != want {
t.Errorf("testSecretVersion: expected %v to be %v", got, want)
}
if err := enableSecretVersion(version.Name); err != nil {
t.Fatal(err)
}
v, err = client.GetSecretVersion(ctx, &secretmanagerpb.GetSecretVersionRequest{
Name: version.Name,
})
if err != nil {
t.Fatal(err)
}
if got, want := v.State, secretmanagerpb.SecretVersion_ENABLED; got != want {
t.Errorf("testSecretVersion: expected %v to be %v", got, want)
}
}
func TestDisableEnableSecretVersionWithEtag(t *testing.T) {
tc := testutil.SystemTest(t)
payload := []byte("my-secret")
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
version := testSecretVersion(t, secret.Name, payload)
if err := disableSecretVersionWithEtag(version.Name, version.Etag); err != nil {
t.Fatal(err)
}
client, ctx := testClient(t)
v, err := client.GetSecretVersion(ctx, &secretmanagerpb.GetSecretVersionRequest{
Name: version.Name,
})
if err != nil {
t.Fatal(err)
}
if got, want := v.State, secretmanagerpb.SecretVersion_DISABLED; got != want {
t.Errorf("testSecretVersion: expected %v to be %v", got, want)
}
if err := enableSecretVersionWithEtag(version.Name, v.Etag); err != nil {
t.Fatal(err)
}
v, err = client.GetSecretVersion(ctx, &secretmanagerpb.GetSecretVersionRequest{
Name: version.Name,
})
if err != nil {
t.Fatal(err)
}
if got, want := v.State, secretmanagerpb.SecretVersion_ENABLED; got != want {
t.Errorf("testSecretVersion: expected %v to be %v", got, want)
}
}
func TestGetSecretVersion(t *testing.T) {
tc := testutil.SystemTest(t)
payload := []byte("my-secret")
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
version := testSecretVersion(t, secret.Name, payload)
var b bytes.Buffer
if err := getSecretVersion(&b, version.Name); err != nil {
t.Fatal(err)
}
if got, want := b.String(), "Found secret version"; !strings.Contains(got, want) {
t.Errorf("testSecretVersion: expected %q to contain %q", got, want)
}
}
func TestGetSecret(t *testing.T) {
tc := testutil.SystemTest(t)
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
var b bytes.Buffer
if err := getSecret(&b, secret.Name); err != nil {
t.Fatal(err)
}
if got, want := b.String(), "Found secret"; !strings.Contains(got, want) {
t.Errorf("getSecret: expected %q to contain %q", got, want)
}
}
func TestIamGrantAccess(t *testing.T) {
tc := testutil.SystemTest(t)
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
iamUser := testIamUser(t)
var b bytes.Buffer
if err := iamGrantAccess(&b, secret.Name, iamUser); err != nil {
t.Fatal(err)
}
if got, want := b.String(), "Updated IAM policy"; !strings.Contains(got, want) {
t.Errorf("getSecret: expected %q to contain %q", got, want)
}
client, ctx := testClient(t)
policy, err := client.IAM(secret.Name).Policy(ctx)
if err != nil {
t.Fatal(err)
}
found := false
members := policy.Members("roles/secretmanager.secretAccessor")
for _, m := range members {
if m == iamUser {
found = true
}
}
if !found {
t.Errorf("expected %q to include %q", members, iamUser)
}
}
func TestIamRevokeAccess(t *testing.T) {
tc := testutil.SystemTest(t)
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
iamUser := testIamUser(t)
var b bytes.Buffer
if err := iamRevokeAccess(&b, secret.Name, iamUser); err != nil {
t.Fatal(err)
}
if got, want := b.String(), "Updated IAM policy"; !strings.Contains(got, want) {
t.Errorf("getSecret: expected %q to contain %q", got, want)
}
client, ctx := testClient(t)
policy, err := client.IAM(secret.Name).Policy(ctx)
if err != nil {
t.Fatal(err)
}
members := policy.Members("roles/secretmanager.secretAccessor")
for _, m := range members {
if m == iamUser {
t.Errorf("expected %q to not include %q", members, iamUser)
}
}
}
func TestListSecretVersions(t *testing.T) {
tc := testutil.SystemTest(t)
payload := []byte("my-secret")
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
version1 := testSecretVersion(t, secret.Name, payload)
version2 := testSecretVersion(t, secret.Name, payload)
var b bytes.Buffer
if err := listSecretVersions(&b, secret.Name); err != nil {
t.Fatal(err)
}
if got, want := b.String(), fmt.Sprintf("%s with state ENABLED", version1.Name); !strings.Contains(got, want) {
t.Errorf("listSecretVersions: expected %q to contain %q", got, want)
}
if got, want := b.String(), fmt.Sprintf("%s with state ENABLED", version2.Name); !strings.Contains(got, want) {
t.Errorf("listSecretVersions: expected %q to contain %q", got, want)
}
}
func TestListSecretVersionsWithFilter(t *testing.T) {
tc := testutil.SystemTest(t)
payload := []byte("my-secret")
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
version1 := testSecretVersion(t, secret.Name, payload)
version2 := testSecretVersion(t, secret.Name, payload)
var b bytes.Buffer
if err := listSecretVersionsWithFilter(&b, secret.Name, fmt.Sprintf("name:%s", version1.Name)); err != nil {
t.Fatal(err)
}
if got, want := b.String(), fmt.Sprintf("%s with state ENABLED", version1.Name); !strings.Contains(got, want) {
t.Errorf("listSecretVersions: expected %q to contain %q", got, want)
}
if got, lacked := b.String(), fmt.Sprintf("%s with state ENABLED", version2.Name); strings.Contains(got, lacked) {
t.Errorf("listSecretVersions: expected %q to not contain %q", got, lacked)
}
}
func TestListSecrets(t *testing.T) {
tc := testutil.SystemTest(t)
secret1 := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret1.Name)
secret2 := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret2.Name)
var b bytes.Buffer
if err := listSecrets(&b, fmt.Sprintf("projects/%s", tc.ProjectID)); err != nil {
t.Fatal(err)
}
if got, want := b.String(), secret1.Name; !strings.Contains(got, want) {
t.Errorf("listSecrets: expected %q to contain %q", got, want)
}
if got, want := b.String(), secret2.Name; !strings.Contains(got, want) {
t.Errorf("listSecrets: expected %q to contain %q", got, want)
}
}
func TestListSecretsWithFilter(t *testing.T) {
tc := testutil.SystemTest(t)
secret1 := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret1.Name)
secret2 := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret2.Name)
var b bytes.Buffer
if err := listSecretsWithFilter(&b, fmt.Sprintf("projects/%s", tc.ProjectID), fmt.Sprintf("name:%s", secret1.Name)); err != nil {
t.Fatal(err)
}
if got, want := b.String(), secret1.Name; !strings.Contains(got, want) {
t.Errorf("listSecrets: expected %q to contain %q", got, want)
}
if got, lacked := b.String(), secret2.Name; strings.Contains(got, lacked) {
t.Errorf("listSecrets: expected %q to not contain %q", got, lacked)
}
}
func TestUpdateSecret(t *testing.T) {
tc := testutil.SystemTest(t)
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
var b bytes.Buffer
if err := updateSecret(&b, secret.Name); err != nil {
t.Fatal(err)
}
if got, want := b.String(), "Updated secret"; !strings.Contains(got, want) {
t.Errorf("updateSecret: expected %q to contain %q", got, want)
}
client, ctx := testClient(t)
s, err := client.GetSecret(ctx, &secretmanagerpb.GetSecretRequest{
Name: secret.Name,
})
if err != nil {
t.Fatal(err)
}
if got, want := s.Labels, map[string]string{"secretmanager": "rocks"}; !reflect.DeepEqual(got, want) {
t.Errorf("updateSecret: expected %q to be %q", got, want)
}
}
func TestUpdateSecretWithEtag(t *testing.T) {
tc := testutil.SystemTest(t)
secret := testSecret(t, tc.ProjectID)
defer testCleanupSecret(t, secret.Name)
var b bytes.Buffer
if err := updateSecretWithEtag(&b, secret.Name, secret.Etag); err != nil {
t.Fatal(err)
}
if got, want := b.String(), "Updated secret"; !strings.Contains(got, want) {
t.Errorf("updateSecret: expected %q to contain %q", got, want)
}
client, ctx := testClient(t)
s, err := client.GetSecret(ctx, &secretmanagerpb.GetSecretRequest{
Name: secret.Name,
})
if err != nil {
t.Fatal(err)
}
if got, want := s.Labels, map[string]string{"secretmanager": "rocks"}; !reflect.DeepEqual(got, want) {
t.Errorf("updateSecret: expected %q to be %q", got, want)
}
}
|
[
"\"GOLANG_SAMPLES_SERVICE_ACCOUNT_EMAIL\""
] |
[] |
[
"GOLANG_SAMPLES_SERVICE_ACCOUNT_EMAIL"
] |
[]
|
["GOLANG_SAMPLES_SERVICE_ACCOUNT_EMAIL"]
|
go
| 1 | 0 | |
queue_services/business-events-listener/src/business_events_listener/worker.py
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The unique worker functionality for this service is contained here.
The entry-point is the **cb_nr_subscription_handler**
The design and flow leverage a few constraints that are placed upon it
by NATS Streaming and using AWAIT on the default loop.
- NATS streaming queues require one message to be processed at a time.
- AWAIT on the default loop effectively runs synchronously
If these constraints change, the use of Flask-SQLAlchemy would need to change.
Flask-SQLAlchemy currently allows the base model to be changed, or reworking
the model to a standalone SQLAlchemy usage with an async engine would need
to be pursued.
"""
import json
import os
from typing import Dict
import nats
from auth_api.models import Affiliation as AffiliationModel
from auth_api.models import Entity as EntityModel
from auth_api.models import Org as OrgModel
from auth_api.models import db
from auth_api.services.rest_service import RestService
from auth_api.utils.enums import CorpType
from dateutil import parser
from entity_queue_common.service import QueueServiceManager
from entity_queue_common.service_utils import QueueException, logger
from flask import Flask # pylint: disable=wrong-import-order
from business_events_listener import config
async def cb_nr_subscription_handler(msg: nats.aio.client.Msg):
"""Use Callback to process Queue Msg objects."""
try:
logger.info('Received raw message seq:%s, data= %s', msg.sequence, msg.data.decode())
event_message = json.loads(msg.data.decode('utf-8'))
logger.debug('Event Message Received: %s', event_message)
await process_event(event_message, FLASK_APP)
except Exception: # noqa pylint: disable=broad-except
# Catch Exception so that any error is still caught and the message is removed from the queue
logger.error('Queue Error: %s', json.dumps(event_message), exc_info=True)
async def process_event(event_message, flask_app):
"""Render the org status."""
if not flask_app:
raise QueueException('Flask App not available.')
with flask_app.app_context():
message_type = event_message.get('type', None)
if message_type == 'bc.registry.names.events':
await process_name_events(event_message)
async def process_name_events(event_message: Dict[str, any]):
"""Process name events.
1. Check if the NR already exists in entities table, if yes apply changes. If not create entity record.
2. Check if new status is DRAFT, if yes call pay-api and get the account details for the payments against the NR.
3. If an account is found, affiliate to that account.
Args:
event_message (object): cloud event message, sample below.
{
'specversion': '1.0.1',
'type': 'bc.registry.names.events',
'source': '/requests/6724165',
'id': id,
'time': '',
'datacontenttype': 'application/json',
'identifier': '781020202',
'data': {
'request': {
'nrNum': 'NR 5659951',
'newState': 'APPROVED',
'previousState': 'DRAFT'
}
}
}
"""
logger.debug('>>>>>>>process_name_events>>>>>')
request_data = event_message.get('data').get('request')
nr_number = request_data['nrNum']
nr_status = request_data['newState']
nr_entity = EntityModel.find_by_business_identifier(nr_number)
if nr_entity is None:
logger.info('Entity doesn''t exist, creating a new entity.')
nr_entity = EntityModel(
business_identifier=nr_number,
corp_type_code=CorpType.NR.value
)
nr_entity.status = nr_status
nr_entity.name = request_data.get('name', '') # its not part of event now, this is to handle if they include it.
nr_entity.last_modified_by = None # TODO not present in event message.
nr_entity.last_modified = parser.parse(event_message.get('time'))
if nr_status == 'DRAFT' and AffiliationModel.find_affiliations_by_business_identifier(nr_number) is None:
logger.info('Status is DRAFT, getting invoices for account')
# Find account details for the NR.
invoices = RestService.get(
f'{APP_CONFIG.PAY_API_URL}/payment-requests?businessIdentifier={nr_number}',
token=RestService.get_service_account_token()
).json()
# Ideally there should be only one or two (priority fees) payment request for the NR.
if invoices and (auth_account_id := invoices['invoices'][0].get('paymentAccount').get('accountId')) \
and str(auth_account_id).isnumeric():
logger.info('Account ID received : %s', auth_account_id)
# Auth account id can be service account value too, so doing a query lookup than find_by_id
org: OrgModel = db.session.query(OrgModel).filter(OrgModel.id == auth_account_id).one_or_none()
if org:
nr_entity.pass_code_claimed = True
# Create an affiliation.
logger.info('Creating affiliation between Entity : %s and Org : %s', nr_entity, org)
affiliation: AffiliationModel = AffiliationModel(entity=nr_entity, org=org)
affiliation.flush()
nr_entity.save()
logger.debug('<<<<<<<process_name_events<<<<<<<<<<')
qsm = QueueServiceManager() # pylint: disable=invalid-name
APP_CONFIG = config.get_named_config(os.getenv('DEPLOYMENT_ENV', 'production'))
FLASK_APP = Flask(__name__)
FLASK_APP.config.from_object(APP_CONFIG)
db.init_app(FLASK_APP)
|
[] |
[] |
[
"DEPLOYMENT_ENV"
] |
[]
|
["DEPLOYMENT_ENV"]
|
python
| 1 | 0 | |
qa/rpc-tests/maxblocksinflight.py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(SyscoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("SYSD", "syscoind"),
help="Binary to test max block requests behavior")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
|
[] |
[] |
[
"SYSD"
] |
[]
|
["SYSD"]
|
python
| 1 | 0 | |
test/e2e/main_test.go
|
// Copyright 2016 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"context"
"flag"
"log"
"os"
"testing"
operatorFramework "github.com/coreos/prometheus-operator/test/framework"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
)
var (
framework *operatorFramework.Framework
opImage *string
)
func skipPrometheusTests(t *testing.T) {
if os.Getenv("EXCLUDE_PROMETHEUS_TESTS") != "" {
t.Skip("Skipping Prometheus tests")
}
}
func skipAlertmanagerTests(t *testing.T) {
if os.Getenv("EXCLUDE_ALERTMANAGER_TESTS") != "" {
t.Skip("Skipping Alertmanager tests")
}
}
func skipThanosRulerTests(t *testing.T) {
if os.Getenv("EXCLUDE_THANOSRULER_TESTS") != "" {
t.Skip("Skipping ThanosRuler tests")
}
}
func TestMain(m *testing.M) {
kubeconfig := flag.String(
"kubeconfig",
"",
"kube config path, e.g. $HOME/.kube/config",
)
opImage = flag.String(
"operator-image",
"",
"operator image, e.g. quay.io/coreos/prometheus-operator",
)
flag.Parse()
var (
err error
exitCode int
)
if framework, err = operatorFramework.New(*kubeconfig, *opImage); err != nil {
log.Printf("failed to setup framework: %v\n", err)
os.Exit(1)
}
exitCode = m.Run()
os.Exit(exitCode)
}
// TestAllNS tests the Prometheus Operator watching all namespaces in a
// Kubernetes cluster.
func TestAllNS(t *testing.T) {
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup(t)
ns := ctx.CreateNamespace(t, framework.KubeClient)
finalizers, err := framework.CreatePrometheusOperator(ns, *opImage, nil, nil, nil, nil, true)
if err != nil {
t.Fatal(err)
}
for _, f := range finalizers {
ctx.AddFinalizerFn(f)
}
t.Run("TestServerTLS", testServerTLS(t, ns))
// t.Run blocks until the function passed as the second argument (f) returns or
// calls t.Parallel to become a parallel test. Run reports whether f succeeded
// (or at least did not fail before calling t.Parallel). As all tests in
// testAllNS are parallel, the defered ctx.Cleanup above would be run before
// all tests finished. Wrapping it in testAllNSPrometheus and testAllNSAlertmanager
// fixes this.
t.Run("x", testAllNSAlertmanager)
t.Run("y", testAllNSPrometheus)
t.Run("z", testAllNSThanosRuler)
// Check if Prometheus Operator ever restarted.
opts := metav1.ListOptions{LabelSelector: fields.SelectorFromSet(fields.Set(map[string]string{
"app.kubernetes.io/name": "prometheus-operator",
})).String()}
pl, err := framework.KubeClient.CoreV1().Pods(ns).List(context.TODO(), opts)
if err != nil {
t.Fatal(err)
}
if expected := 1; len(pl.Items) != expected {
t.Fatalf("expected %v Prometheus Operator pods, but got %v", expected, len(pl.Items))
}
restarts, err := framework.GetPodRestartCount(ns, pl.Items[0].GetName())
if err != nil {
t.Fatalf("failed to retrieve restart count of Prometheus Operator pod: %v", err)
}
if len(restarts) != 1 {
t.Fatalf("expected to have 1 container but got %d", len(restarts))
}
for _, restart := range restarts {
if restart != 0 {
t.Fatalf(
"expected Prometheus Operator to never restart during entire test execution but got %d restarts",
restart,
)
}
}
}
func testAllNSAlertmanager(t *testing.T) {
skipAlertmanagerTests(t)
testFuncs := map[string]func(t *testing.T){
"AMCreateDeleteCluster": testAMCreateDeleteCluster,
"AMScaling": testAMScaling,
"AMVersionMigration": testAMVersionMigration,
"AMStorageUpdate": testAMStorageUpdate,
"AMExposingWithKubernetesAPI": testAMExposingWithKubernetesAPI,
"AMClusterInitialization": testAMClusterInitialization,
"AMClusterAfterRollingUpdate": testAMClusterAfterRollingUpdate,
"AMClusterGossipSilences": testAMClusterGossipSilences,
"AMReloadConfig": testAMReloadConfig,
"AMZeroDowntimeRollingDeployment": testAMZeroDowntimeRollingDeployment,
}
for name, f := range testFuncs {
t.Run(name, f)
}
}
func testAllNSPrometheus(t *testing.T) {
skipPrometheusTests(t)
testFuncs := map[string]func(t *testing.T){
"PromCreateDeleteCluster": testPromCreateDeleteCluster,
"PromScaleUpDownCluster": testPromScaleUpDownCluster,
"PromNoServiceMonitorSelector": testPromNoServiceMonitorSelector,
"PromVersionMigration": testPromVersionMigration,
"PromResourceUpdate": testPromResourceUpdate,
"PromStorageUpdate": testPromStorageUpdate,
"PromReloadConfig": testPromReloadConfig,
"PromAdditionalScrapeConfig": testPromAdditionalScrapeConfig,
"PromAdditionalAlertManagerConfig": testPromAdditionalAlertManagerConfig,
"PromReloadRules": testPromReloadRules,
"PromMultiplePrometheusRulesSameNS": testPromMultiplePrometheusRulesSameNS,
"PromMultiplePrometheusRulesDifferentNS": testPromMultiplePrometheusRulesDifferentNS,
"PromRulesExceedingConfigMapLimit": testPromRulesExceedingConfigMapLimit,
"PromRulesMustBeAnnotated": testPromRulesMustBeAnnotated,
"PromtestInvalidRulesAreRejected": testInvalidRulesAreRejected,
"PromOnlyUpdatedOnRelevantChanges": testPromOnlyUpdatedOnRelevantChanges,
"PromWhenDeleteCRDCleanUpViaOwnerRef": testPromWhenDeleteCRDCleanUpViaOwnerRef,
"PromDiscovery": testPromDiscovery,
"PromAlertmanagerDiscovery": testPromAlertmanagerDiscovery,
"PromExposingWithKubernetesAPI": testPromExposingWithKubernetesAPI,
"PromDiscoverTargetPort": testPromDiscoverTargetPort,
"PromOpMatchPromAndServMonInDiffNSs": testPromOpMatchPromAndServMonInDiffNSs,
"PromGetAuthSecret": testPromGetAuthSecret,
"PromArbitraryFSAcc": testPromArbitraryFSAcc,
"PromTLSConfigViaSecret": testPromTLSConfigViaSecret,
"Thanos": testThanos,
}
for name, f := range testFuncs {
t.Run(name, f)
}
}
func testAllNSThanosRuler(t *testing.T) {
skipThanosRulerTests(t)
testFuncs := map[string]func(t *testing.T){
"ThanosRulerCreateDeleteCluster": testTRCreateDeleteCluster,
}
for name, f := range testFuncs {
t.Run(name, f)
}
}
// TestMultiNS tests the Prometheus Operator configured to watch specific
// namespaces.
func TestMultiNS(t *testing.T) {
testFuncs := map[string]func(t *testing.T){
"OperatorNSScope": testOperatorNSScope,
}
for name, f := range testFuncs {
t.Run(name, f)
}
}
// TestDenylist tests the Prometheus Operator configured not to watch specific namespaces.
func TestDenylist(t *testing.T) {
skipPrometheusTests(t)
testFuncs := map[string]func(t *testing.T){
"Prometheus": testDenyPrometheus,
"ServiceMonitor": testDenyServiceMonitor,
}
for name, f := range testFuncs {
t.Run(name, f)
}
}
// TestPromInstanceNs tests prometheus operator in different scenarios when --prometheus-instance-namespace is given
func TestPromInstanceNs(t *testing.T) {
skipPrometheusTests(t)
testFuncs := map[string]func(t *testing.T){
"AllNs": testPrometheusInstanceNamespaces_AllNs,
"AllowList": testPrometheusInstanceNamespaces_AllowList,
"DenyList": testPrometheusInstanceNamespaces_DenyList,
}
for name, f := range testFuncs {
t.Run(name, f)
}
}
// TestAlertmanagerInstanceNs tests prometheus operator in different scenarios when --alertmanager-instance-namespace is given
func TestAlertmanagerInstanceNs(t *testing.T) {
skipAlertmanagerTests(t)
testFuncs := map[string]func(t *testing.T){
"AllNs": testAlertmanagerInstanceNamespaces_AllNs,
"DenyNs": testAlertmanagerInstanceNamespaces_DenyNs,
}
for name, f := range testFuncs {
t.Run(name, f)
}
}
const (
prometheusOperatorServiceName = "prometheus-operator"
)
func testServerTLS(t *testing.T, namespace string) func(t *testing.T) {
return func(t *testing.T) {
if err := operatorFramework.WaitForServiceReady(framework.KubeClient, namespace, prometheusOperatorServiceName); err != nil {
t.Fatal("waiting for prometheus operator service: ", err)
}
operatorService := framework.KubeClient.CoreV1().Services(namespace)
request := operatorService.ProxyGet("https", prometheusOperatorServiceName, "https", "/healthz", make(map[string]string))
_, err := request.DoRaw(context.TODO())
if err != nil {
t.Fatal(err)
}
}
}
|
[
"\"EXCLUDE_PROMETHEUS_TESTS\"",
"\"EXCLUDE_ALERTMANAGER_TESTS\"",
"\"EXCLUDE_THANOSRULER_TESTS\""
] |
[] |
[
"EXCLUDE_ALERTMANAGER_TESTS",
"EXCLUDE_THANOSRULER_TESTS",
"EXCLUDE_PROMETHEUS_TESTS"
] |
[]
|
["EXCLUDE_ALERTMANAGER_TESTS", "EXCLUDE_THANOSRULER_TESTS", "EXCLUDE_PROMETHEUS_TESTS"]
|
go
| 3 | 0 | |
main.go
|
package main
import (
"log"
"net/http"
"os"
"github.com/99designs/gqlgen/handler"
"github.com/Go-GraphQL-Group/GraphQL-Service/graphql"
"github.com/Go-GraphQL-Group/GraphQL-Service/resolver"
"github.com/Go-GraphQL-Group/GraphQL-Service/service"
"github.com/codegangsta/negroni"
"github.com/gorilla/mux"
)
const defaultPort = "9090"
func NewServer() *negroni.Negroni {
router := mux.NewRouter()
initRoutes(router)
n := negroni.Classic()
n.Use(negroni.NewStatic(http.Dir("./static")))
n.UseHandler(router)
return n
}
func initRoutes(router *mux.Router) {
router.HandleFunc("/api/login", service.LoginHandler).Methods("POST")
router.Use(service.TokenMiddleware)
router.HandleFunc("/api", service.ApiHandler).Methods("GET")
// router.HandleFunc("/", handler.Playground("GraphQL playground", "/api/query"))
router.HandleFunc("/api/query", handler.GraphQL(graphql.NewExecutableSchema(graphql.Config{Resolvers: &resolver.Resolver{}})))
router.HandleFunc("/api/logout", service.LogoutHandler).Methods("POST", "GET")
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
server := NewServer()
server.Run(":" + port)
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
tests/models/test_tpu.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
from unittest import mock
import pytest
import torch
from torch.utils.data import DataLoader
import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators import TPUAccelerator
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import TPUSpawnPlugin
from pytorch_lightning.utilities import _TPU_AVAILABLE
from pytorch_lightning.utilities.distributed import ReduceOp
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
from tests.helpers.utils import pl_multi_process_test
if _TPU_AVAILABLE:
import torch_xla
import torch_xla.distributed.xla_multiprocessing as xmp
SERIAL_EXEC = xmp.MpSerialExecutor()
_LARGER_DATASET = RandomDataset(32, 2000)
# 8 cores needs a big dataset
def _serial_train_loader():
return DataLoader(_LARGER_DATASET, batch_size=32)
class SerialLoaderBoringModel(BoringModel):
def train_dataloader(self):
return DataLoader(RandomDataset(32, 2000), batch_size=32)
def val_dataloader(self):
return DataLoader(RandomDataset(32, 2000), batch_size=32)
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_tpu_cores_1(tmpdir):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
tpu_cores=1,
limit_train_batches=4,
limit_val_batches=4,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
@pytest.mark.parametrize('tpu_core', [1, 5])
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_tpu_index(tmpdir, tpu_core):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
tpu_cores=[tpu_core],
limit_train_batches=4,
limit_val_batches=4,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
assert torch_xla._XLAC._xla_get_default_device() == f'xla:{tpu_core}'
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_tpu_cores_8(tmpdir):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
tpu_cores=8,
limit_train_batches=4,
limit_val_batches=4,
)
# 8 cores needs a big dataset
model = SerialLoaderBoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False, min_acc=0.05)
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_16bit_tpu_cores_1(tmpdir):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
precision=16,
progress_bar_refresh_rate=0,
max_epochs=2,
tpu_cores=1,
limit_train_batches=8,
limit_val_batches=2,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
assert os.environ.get('XLA_USE_BF16') == str(1), "XLA_USE_BF16 was not set in environment variables"
@pytest.mark.parametrize('tpu_core', [1, 5])
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_16bit_tpu_index(tmpdir, tpu_core):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
precision=16,
progress_bar_refresh_rate=0,
max_epochs=2,
tpu_cores=[tpu_core],
limit_train_batches=4,
limit_val_batches=2,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
assert torch_xla._XLAC._xla_get_default_device() == f'xla:{tpu_core}'
assert os.environ.get('XLA_USE_BF16') == str(1), "XLA_USE_BF16 was not set in environment variables"
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_16bit_tpu_cores_8(tmpdir):
"""Make sure model trains on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
precision=16,
progress_bar_refresh_rate=0,
max_epochs=1,
tpu_cores=8,
limit_train_batches=4,
limit_val_batches=4,
)
# 8 cores needs a big dataset
model = SerialLoaderBoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False, min_acc=0.05)
@RunIf(tpu=True)
@pl_multi_process_test
def test_model_tpu_early_stop(tmpdir):
"""Test if single TPU core training works"""
class CustomBoringModel(BoringModel):
def validation_step(self, *args, **kwargs):
out = super().validation_step(*args, **kwargs)
self.log('val_loss', out['x'])
return out
tutils.reset_seed()
model = CustomBoringModel()
trainer = Trainer(
callbacks=[EarlyStopping(monitor='val_loss')],
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=2,
limit_val_batches=2,
tpu_cores=8,
)
trainer.fit(model)
trainer.test(test_dataloaders=DataLoader(RandomDataset(32, 2000), batch_size=32))
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_grad_norm(tmpdir):
"""Test if grad_norm works on TPU."""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=4,
tpu_cores=1,
limit_train_batches=0.4,
limit_val_batches=0.4,
gradient_clip_val=0.5,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_clip_grad_by_value(tmpdir):
"""Test if clip_gradients by value works on TPU"""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=4,
tpu_cores=1,
limit_train_batches=10,
limit_val_batches=10,
gradient_clip_val=0.5,
gradient_clip_algorithm='value'
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
@RunIf(tpu=True)
@pl_multi_process_test
def test_dataloaders_passed_to_fit(tmpdir):
"""Test if dataloaders passed to trainer works on TPU"""
tutils.reset_seed()
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
tpu_cores=8,
)
trainer.fit(
model,
train_dataloader=model.train_dataloader(),
val_dataloaders=model.val_dataloader(),
)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@pytest.mark.parametrize(
['tpu_cores', 'expected_tpu_id'],
[pytest.param(1, None), pytest.param(8, None),
pytest.param([1], 1), pytest.param([8], 8)],
)
@RunIf(tpu=True)
def test_tpu_id_to_be_as_expected(tpu_cores, expected_tpu_id):
"""Test if trainer.tpu_id is set as expected"""
assert Trainer(tpu_cores=tpu_cores).accelerator_connector.tpu_id == expected_tpu_id
def test_tpu_misconfiguration():
"""Test if trainer.tpu_id is set as expected"""
with pytest.raises(MisconfigurationException, match="`tpu_cores` can only be"):
Trainer(tpu_cores=[1, 8])
@pytest.mark.skipif(_TPU_AVAILABLE, reason="test requires missing TPU")
def test_exception_when_no_tpu_found(tmpdir):
"""Test if exception is thrown when xla devices are not available"""
with pytest.raises(MisconfigurationException, match='No TPU devices were found.'):
Trainer(tpu_cores=8)
@pytest.mark.parametrize('tpu_cores', [1, 8, [1]])
@RunIf(tpu=True)
def test_distributed_backend_set_when_using_tpu(tmpdir, tpu_cores):
"""Test if distributed_backend is set to `tpu` when tpu_cores is not None"""
assert Trainer(tpu_cores=tpu_cores).distributed_backend == "tpu"
@RunIf(tpu=True)
@pl_multi_process_test
def test_broadcast_on_tpu():
""" Checks if an object from the master process is broadcasted to other processes correctly"""
def test_broadcast(rank):
trainer = Trainer(tpu_cores=8)
assert isinstance(trainer.accelerator, TPUAccelerator)
assert isinstance(trainer.training_type_plugin, TPUSpawnPlugin)
obj = ("ver_0.5", "logger_name", rank)
result = trainer.training_type_plugin.broadcast(obj)
assert result == ("ver_0.5", "logger_name", 0)
xmp.spawn(test_broadcast, nprocs=8, start_method='fork')
@pytest.mark.parametrize(
["tpu_cores", "expected_tpu_id", "error_expected"],
[
pytest.param(1, None, False),
pytest.param(8, None, False),
pytest.param([1], 1, False),
pytest.param([8], 8, False),
pytest.param("1,", 1, False),
pytest.param("1", None, False),
pytest.param("9, ", 9, True),
pytest.param([9], 9, True),
pytest.param([0], 0, True),
pytest.param(2, None, True),
pytest.param(10, None, True),
],
)
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_choice(tmpdir, tpu_cores, expected_tpu_id, error_expected):
if error_expected:
with pytest.raises(MisconfigurationException, match=r".*tpu_cores` can only be 1, 8 or [<1-8>]*"):
Trainer(default_root_dir=tmpdir, tpu_cores=tpu_cores)
else:
trainer = Trainer(default_root_dir=tmpdir, tpu_cores=tpu_cores)
assert trainer.accelerator_connector.tpu_id == expected_tpu_id
@pytest.mark.parametrize(
['cli_args', 'expected'],
[pytest.param('--tpu_cores=8', {'tpu_cores': 8}),
pytest.param("--tpu_cores=1,", {'tpu_cores': '1,'})]
)
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_cores_with_argparse(cli_args, expected):
"""Test passing tpu_cores in command line"""
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
parser = ArgumentParser(add_help=False)
parser = Trainer.add_argparse_args(parent_parser=parser)
args = Trainer.parse_argparser(parser)
for k, v in expected.items():
assert getattr(args, k) == v
assert Trainer.from_argparse_args(args)
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_reduce():
"""Test tpu spawn reduce operation """
def test_reduce(rank):
trainer = Trainer(tpu_cores=8)
# faster this way
reduce_ops = ["mean", "AVG", "undefined", "sum", ReduceOp.SUM, ReduceOp.MAX]
for reduce_op in reduce_ops:
if reduce_op == "undefined" or reduce_op == ReduceOp.MAX:
with pytest.raises(MisconfigurationException, match="TPUSpawn TrainingTypePlugin only support"):
result = trainer.training_type_plugin.reduce(1, reduce_op)
else:
result = trainer.training_type_plugin.reduce(1, reduce_op)
if isinstance(reduce_op, str) and reduce_op.lower() in ("mean", "avg"):
assert result.item() == 1
else:
assert result.item() == 8
xmp.spawn(test_reduce, nprocs=8, start_method='fork')
@RunIf(tpu=True)
@pl_multi_process_test
@pytest.mark.parametrize("clip_val", [10])
@mock.patch("torch.nn.utils.clip_grad_norm_")
def test_tpu_precision_16_clip_gradients(mock_clip_grad_norm, clip_val, tmpdir):
"""
Ensure that clip gradients is only called if the value is greater than 0.
TODO: Fix (test fails with parametrize)
"""
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
tpu_cores=1,
precision=16,
limit_train_batches=4,
limit_val_batches=4,
gradient_clip_val=clip_val,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
if clip_val > 0:
mock_clip_grad_norm.assert_called()
else:
mock_clip_grad_norm.assert_not_called()
@RunIf(tpu=True)
@pl_multi_process_test
def test_if_test_works_with_checkpoint_false(tmpdir):
"""Ensure that model trains properly when `checkpoint_callback` is set to False."""
# Train a model on TPU
model = BoringModel()
trainer = Trainer(max_epochs=1, tpu_cores=8, default_root_dir=tmpdir, fast_dev_run=True, checkpoint_callback=False)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_sync_dist():
"""Test tpu spawn sync dist operation """
def test_sync_dist(rank):
tensor = torch.tensor([1.0])
training_type_plugin = TPUSpawnPlugin()
res = Result()
res.log(
"test_tensor",
tensor,
sync_fn=training_type_plugin.reduce,
sync_dist=True,
sync_dist_op=torch.distributed.ReduceOp.SUM
)
assert res["test_tensor"].item() == 8, "Result-Log does not work properly with TPU Spawn and Tensors"
xmp.spawn(test_sync_dist, nprocs=8, start_method='fork')
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_debug_mode(tmpdir):
"""Test if debug mode works on TPU."""
class DebugModel(BoringModel):
def on_train_start(self):
assert os.environ.get("PT_XLA_DEBUG") == str(1), "PT_XLA_DEBUG was not set in environment variables"
def teardown(self, stage):
assert "PT_XLA_DEBUG" not in os.environ
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=4,
tpu_cores=8,
limit_train_batches=0.4,
limit_val_batches=0.4,
plugins=TPUSpawnPlugin(debug=True),
)
model = DebugModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
@RunIf(tpu=True)
@pl_multi_process_test
def test_tpu_host_world_size(tmpdir):
"""Test Host World size env setup on TPU."""
class DebugModel(BoringModel):
def on_train_start(self):
assert os.environ.get("XRT_HOST_WORLD_SIZE") == str(1)
def teardown(self, stage):
assert "XRT_HOST_WORLD_SIZE" not in os.environ
tutils.reset_seed()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=4,
tpu_cores=8,
limit_train_batches=0.4,
limit_val_batches=0.4,
)
model = DebugModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)
|
[] |
[] |
[
"PT_XLA_DEBUG",
"XRT_HOST_WORLD_SIZE",
"XLA_USE_BF16"
] |
[]
|
["PT_XLA_DEBUG", "XRT_HOST_WORLD_SIZE", "XLA_USE_BF16"]
|
python
| 3 | 0 | |
test/integration/test_compare_comply_v1.py
|
# coding: utf-8
import pytest
import watson_developer_cloud
import os
from os.path import abspath
from unittest import TestCase
@pytest.mark.skipif(
os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES')
class IntegrationTestCompareComplyV1(TestCase):
compare_comply = None
@classmethod
def setup_class(cls):
cls.compare_comply = watson_developer_cloud.CompareComplyV1(
'2018-10-15',
iam_apikey='YOUR IAM API KEY')
cls.compare_comply.set_default_headers({
'X-Watson-Learning-Opt-Out':
'1',
'X-Watson-Test':
'1'
})
def test_convert_to_html(self):
contract = abspath('resources/contract_A.pdf')
with open(contract, 'rb') as file:
result = self.compare_comply.convert_to_html(file).get_result()
assert result is not None
def test_classify_elements(self):
contract = abspath('resources/contract_A.pdf')
with open(contract, 'rb') as file:
result = self.compare_comply.classify_elements(file).get_result()
assert result is not None
def test_extract_tables(self):
table = abspath('resources/contract_A.pdf')
with open(table, 'rb') as file:
result = self.compare_comply.extract_tables(file).get_result()
assert result is not None
def test_compare_documents(self):
with open(os.path.join(os.path.dirname(__file__), '../../resources/contract_A.pdf'), 'rb') as file1, \
open(os.path.join(os.path.dirname(__file__), '../../resources/contract_B.pdf'), 'rb') as file2:
result = self.compare_comply.compare_documents(file1, file2).get_result()
assert result is not None
def test_feedback(self):
feedback_data = {
'feedback_type': 'element_classification',
'document': {
'hash': '',
'title': 'doc title'
},
'model_id': 'contracts',
'model_version': '11.00',
'location': {
'begin': '214',
'end': '237'
},
'text': '1. IBM will provide a Senior Managing Consultant / expert resource, for up to 80 hours, to assist Florida Power & Light (FPL) with the creation of an IT infrastructure unit cost model for existing infrastructure.',
'original_labels': {
'types': [
{
'label': {
'nature': 'Obligation',
'party': 'IBM'
},
'provenance_ids': [
'85f5981a-ba91-44f5-9efa-0bd22e64b7bc',
'ce0480a1-5ef1-4c3e-9861-3743b5610795'
]
},
{
'label': {
'nature': 'End User',
'party': 'Exclusion'
},
'provenance_ids': [
'85f5981a-ba91-44f5-9efa-0bd22e64b7bc',
'ce0480a1-5ef1-4c3e-9861-3743b5610795'
]
}
],
'categories': [
{
'label': 'Responsibilities',
'provenance_ids': []
},
{
'label': 'Amendments',
'provenance_ids': []
}
]
},
'updated_labels': {
'types': [
{
'label': {
'nature': 'Obligation',
'party': 'IBM'
}
},
{
'label': {
'nature': 'Disclaimer',
'party': 'Buyer'
}
}
],
'categories': [
{
'label': 'Responsibilities'
},
{
'label': 'Audits'
}
]
}
}
add_feedback = self.compare_comply.add_feedback(
feedback_data,
'wonder woman',
'test commment').get_result()
assert add_feedback is not None
assert add_feedback['feedback_id'] is not None
feedback_id = add_feedback['feedback_id']
self.compare_comply.set_default_headers({'x-watson-metadata': 'customer_id=sdk-test-customer-id'})
get_feedback = self.compare_comply.get_feedback(feedback_id).get_result()
assert get_feedback is not None
list_feedback = self.compare_comply.list_feedback(
feedback_type='element_classification').get_result()
assert list_feedback is not None
delete_feedback = self.compare_comply.delete_feedback(feedback_id).get_result()
assert delete_feedback is not None
@pytest.mark.skip(reason="Temporarily skip")
def test_batches(self):
list_batches = self.compare_comply.list_batches().get_result()
assert list_batches is not None
with open(os.path.join(os.path.dirname(__file__), '../../resources/cloud-object-storage-credentials-input.json'), 'rb') as input_credentials_file, \
open(os.path.join(os.path.dirname(__file__), '../../resources/cloud-object-storage-credentials-output.json'), 'rb') as output_credentials_file:
create_batch = self.compare_comply.create_batch(
'html_conversion',
input_credentials_file,
'us-south',
'compare-comply-integration-test-bucket-input',
output_credentials_file,
'us-south',
'compare-comply-integration-test-bucket-output').get_result()
assert create_batch is not None
assert create_batch['batch_id'] is not None
batch_id = create_batch['batch_id']
get_batch = self.compare_comply.get_batch(batch_id)
assert get_batch is not None
update_batch = self.compare_comply.update_batch(batch_id, 'rescan')
assert update_batch is not None
|
[] |
[] |
[
"VCAP_SERVICES"
] |
[]
|
["VCAP_SERVICES"]
|
python
| 1 | 0 | |
pkg/stash/with_azureblob_test.go
|
package stash
import (
"context"
"fmt"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/storage"
"github.com/gomods/athens/pkg/storage/mem"
"github.com/technosophos/moniker"
"golang.org/x/sync/errgroup"
)
// TestWithAzureBlob requires a real AzureBlob backend implementation
// and it will ensure that saving to modules at the same time
// is done synchronously so that only the first module gets saved.
func TestWithAzureBlob(t *testing.T) {
containerName := randomContainerName(os.Getenv("DRONE_PULL_REQUEST"))
cfg := getAzureTestConfig(containerName)
if cfg == nil {
t.SkipNow()
}
strg, err := mem.NewStorage()
if err != nil {
t.Fatal(err)
}
ms := &mockAzureBlobStasher{strg: strg}
wpr, err := WithAzureBlobLock(cfg, time.Second*10, strg)
if err != nil {
t.Fatal(err)
}
s := wpr(ms)
var eg errgroup.Group
for i := 0; i < 5; i++ {
eg.Go(func() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
_, err := s.Stash(ctx, "mod", "ver")
return err
})
}
err = eg.Wait()
if err != nil {
t.Fatal(err)
}
}
// mockAzureBlobStasher is like mockStasher
// but leverages in memory storage
// so that azure blob can determine
// whether to call the underlying stasher or not.
type mockAzureBlobStasher struct {
strg storage.Backend
mu sync.Mutex
num int
}
func (ms *mockAzureBlobStasher) Stash(ctx context.Context, mod, ver string) (string, error) {
time.Sleep(time.Millisecond * 100) // allow for second requests to come in.
ms.mu.Lock()
defer ms.mu.Unlock()
if ms.num == 0 {
err := ms.strg.Save(
ctx,
mod,
ver,
[]byte("mod file"),
strings.NewReader("zip file"),
[]byte("info file"),
)
if err != nil {
return "", err
}
ms.num++
return "", nil
}
return "", fmt.Errorf("second time error")
}
func getAzureTestConfig(containerName string) *config.AzureBlobConfig {
key := os.Getenv("ATHENS_AZURE_ACCOUNT_KEY")
if key == "" {
return nil
}
name := os.Getenv("ATHENS_AZURE_ACCOUNT_NAME")
if name == "" {
return nil
}
return &config.AzureBlobConfig{
AccountName: name,
AccountKey: key,
ContainerName: containerName,
}
}
func randomContainerName(prefix string) string {
// moniker is a cool library to produce mostly unique, human-readable names
// see https://github.com/technosophos/moniker for more details
namer := moniker.New()
if prefix != "" {
return fmt.Sprintf("%s_%s", prefix, namer.NameSep(""))
}
return namer.NameSep("")
}
|
[
"\"DRONE_PULL_REQUEST\"",
"\"ATHENS_AZURE_ACCOUNT_KEY\"",
"\"ATHENS_AZURE_ACCOUNT_NAME\""
] |
[] |
[
"DRONE_PULL_REQUEST",
"ATHENS_AZURE_ACCOUNT_NAME",
"ATHENS_AZURE_ACCOUNT_KEY"
] |
[]
|
["DRONE_PULL_REQUEST", "ATHENS_AZURE_ACCOUNT_NAME", "ATHENS_AZURE_ACCOUNT_KEY"]
|
go
| 3 | 0 | |
hyperglass_agent/cli/actions.py
|
"""Actions executed by commands."""
# Standard Library
import os
import shutil
from typing import Any, Iterable, Optional, Generator
from pathlib import Path
from datetime import datetime, timedelta
from ipaddress import ip_address
# Third Party
from click import echo, style, prompt, confirm
from inquirer import List as InquirerList
from inquirer import Checkbox
# Project
from hyperglass_agent.util import get_addresses
from hyperglass_agent.cli.echo import (
info,
error,
label,
status,
inquire,
success,
warning,
)
from hyperglass_agent.cli.static import CL, NL, WS, WARNING, E
def create_dir(path: Any, **kwargs: Any) -> bool:
"""Validate and attempt to create a directory, if it does not exist."""
# If input path is not a path object, try to make it one
if not isinstance(path, Path):
try:
path = Path(path)
except TypeError:
error("{p} is not a valid path", p=path)
# If path does not exist, try to create it
if not path.exists():
try:
path.mkdir(**kwargs)
except PermissionError:
error(
"{u} does not have permission to create {p}. Try running with sudo?",
u=os.getlogin(),
p=path,
)
# Verify the path was actually created
if path.exists():
success("Created {p}", p=path)
# If the path already exists, inform the user
elif path.exists():
info("{p} already exists", p=path)
return True
def generate_secret(length: int = 32) -> str:
"""Generate a secret for JWT encoding."""
import secrets
gen_secret = secrets.token_urlsafe(length)
status(
"""
This secret will be used to encrypt & decrypt the communication between
hyperglass and hyperglass-agent. Before proceeding any further, please
add the secret to the `password:` field of the device's configuration in
hyperglass's devices.yaml file, and restart hyperglass.
"""
)
label("Secret: {s}", s=gen_secret)
done = confirm(
"Press enter once complete...",
default=True,
prompt_suffix="",
show_default=False,
)
if done: # noqa: R503
return gen_secret
def migrate_config(force: bool = False, secret: Optional[str] = None) -> None:
"""Copy example config file and remove .example extensions."""
app_path = os.environ.get("hyperglass_agent_directory")
if app_path is None:
app_path = find_app_path()
else:
app_path = Path(app_path)
example = Path(__file__).parent.parent / "example_config.yaml"
target_file = app_path / "config.yaml"
def copy(secret):
shutil.copyfile(example, target_file)
if not target_file.exists():
raise FileNotFoundError(str(target_file) + "does not exist.")
with target_file.open("r") as f:
data = f.read()
if secret is None:
secret = generate_secret()
data = data.replace("secret: null", "secret: '{}'".format(secret))
with target_file.open("w") as f:
f.write(data)
success("Successfully migrated example config file to {t}", t=target_file)
try:
if target_file.exists():
if not force:
info("{f} already exists", f=str(target_file))
else:
copy(secret)
else:
copy(secret)
except Exception as e:
error("Failed to migrate '{f}': {e}", f=str(target_file), e=e)
def find_app_path() -> Path:
"""Try to find the app_path, prompt user to set one if it is not found."""
from hyperglass_agent.util import set_app_path
from hyperglass_agent.constants import APP_PATHS
try:
set_app_path(required=True)
app_path = Path(os.environ["hyperglass_agent_directory"])
except RuntimeError:
warning(
"None of the supported paths for hyperglass-agent were found.\n"
+ "Checked:\n{one}\n{two}",
one=APP_PATHS[0],
two=APP_PATHS[1],
)
create = confirm(style("Would you like to create one?", **WARNING))
if not create:
error(
"hyperglass-agent requires an application path, "
+ "but you've chosen not to create one."
)
elif create:
available_paths = [
InquirerList(
"selected",
message="Choose a directory for hyperglass-agent",
choices=APP_PATHS,
)
]
answer = inquire(available_paths)
if answer is None:
error("A directory for hyperglass-agent is required")
selected = answer["selected"]
if not selected.exists():
create_dir(selected)
app_path = selected
return app_path
def read_cert() -> Generator:
"""Read public key attributes."""
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.x509.extensions import ExtensionOID
from cryptography.hazmat.backends import default_backend
app_path = find_app_path()
cert_path = app_path / "agent_cert.pem"
cert = x509.load_pem_x509_certificate(cert_path.read_bytes(), default_backend())
for attr in cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME):
yield attr.value
for attr in cert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME
).value._general_names:
yield attr.value
def make_cert(
cn: str, sans: Iterable, o: str, start: datetime, end: datetime, size: int = 2048
) -> Generator:
"""Generate public & private key pair for SSL."""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
key = rsa.generate_private_key(
public_exponent=65537, key_size=size, backend=default_backend()
)
subject = issuer = x509.Name(
[
x509.NameAttribute(NameOID.COMMON_NAME, cn),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, o),
]
)
cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(start)
.not_valid_after(end)
.add_extension(
x509.SubjectAlternativeName(
[x509.DNSName(cn), *(x509.IPAddress(i) for i in sans)]
),
critical=False,
)
.sign(key, hashes.SHA256(), default_backend())
)
yield cert.public_bytes(serialization.Encoding.PEM)
yield key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
def write_cert(name: str, org: str, duration: int, starttime: str, size: int, show: bool) -> None:
"""Generate SSL certificate keypair."""
app_path = find_app_path()
cert_path = app_path / "agent_cert.pem"
key_path = app_path / "agent_key.pem"
start = starttime
end = start + timedelta(days=duration * 365)
label("Hostname: {cn}", cn=name)
status(
"""
A self-signed certificate with the above hostname as the common name
attribute will be generated. This hostname must be resolvable by
hyperglass via either DNS or a host file, and must match the device's
`address:` field in hyperglass's devices.yaml."""
)
use_name = confirm("Is this the correct hostname?", default=True)
if not use_name:
name = prompt("Please enter the correct hostname", type=str)
all_ips = [f"{a} [{i}]" for i, a in get_addresses()]
status(
"""
hyperglass-agent adds any IP addresses reachable by hyperglass as
subject alternative names to the SSL certificate. Please select any IP
addresses over which hyperglass may communicate with hyperglass-agent."""
)
ips = [Checkbox("ips", message="Select IPs", choices=all_ips)]
selected = [i.split("[")[0].strip() for i in inquire(ips)["ips"]]
selected_ips = [ip_address(i) for i in selected]
cert, key = make_cert(
cn=name, sans=selected_ips, o=org, start=start, end=end, size=size
)
if show:
info(f'Public Key:\n{cert.decode("utf8")}')
info(f'Private Key:\n{key.decode("utf8")}')
with cert_path.open("wb") as cf:
cf.write(cert)
if not cert_path.exists():
error("Error writing public key to {f}", f=cert_path.absolute())
success("Wrote public key to: {f}", f=cert_path.absolute())
with key_path.open("wb") as kf:
kf.write(key)
if not key_path.exists():
error("Error writing private key to {f}", f=key_path.absolute())
success("Wrote private key to: {f}", f=key_path.absolute())
def send_certificate() -> None:
"""Send this device's public key to hyperglass."""
from hyperglass_agent.config import params
from hyperglass_agent.util import send_public_key
from pydantic import AnyHttpUrl, create_model, ValidationError
app_path = find_app_path()
cert_file = app_path / "agent_cert.pem"
device_name = read_cert().send(None)
if params.ssl is not None and not params.ssl.enable:
confirm(
"SSL is disabled. Proceed with sending certificate to hyperglass?",
default=False,
abort=True,
)
if not cert_file.exists():
error("File {f} does not exist", f=cert_file)
with cert_file.open("r") as f:
cert = f.read().strip()
_hg_url = prompt("Enter hyperglass URL (e.g. https://lg.example.com)", type=str)
url_model = create_model("UrlModel", url=(AnyHttpUrl, ...))
try:
hg_url = url_model(url=_hg_url)
except ValidationError as ve:
msg = ve.errors()[0]["msg"]
warning("URL {u} is invalid: {e}", u=_hg_url, e=msg)
_hg_url = prompt("Enter hyperglass URL (e.g. https://lg.example.com)", type=str)
try:
hg_url = url_model(url=_hg_url)
except ValidationError as ve:
msg = ve.errors()[0]["msg"]
error("URL {u} is invalid: {e}", u=_hg_url, e=msg)
try:
status = send_public_key(
str(hg_url.url), device_name=device_name, certificate=cert, params=params
)
success(status)
except RuntimeError as re:
error(str(re))
def install_systemd(service_path: Path) -> bool:
"""Installs generated systemd file to system's systemd directory."""
systemd = Path("/etc/systemd/system")
installed = systemd / "hyperglass-agent.service"
if not systemd.exists():
error("{e} does not exist. Unable to install systemd service.", e=systemd)
if installed.is_symlink():
installed.unlink()
installed.symlink_to(service_path)
if not installed.exists():
error("Unable to symlink {s} to {d}", s=service_path, d=installed)
success("Symlinked {s} to {d}", s=service_path, d=installed)
return True
def make_systemd() -> bool:
"""Generate a systemd file based on the local system."""
from shutil import which
from getpass import getuser
template = """
[Unit]
Description=hyperglass-agent
After=network.target
[Service]
User={user}
Group={group}
ExecStart={bin_path} start
[Install]
WantedBy=multi-user.target
"""
app_path = find_app_path()
service_path = app_path / "hyperglass-agent.service"
cmd_path = which("hyperglass-agent")
if not cmd_path:
bin_path = "python3 -m hyperglass_agent.console"
warning("hyperglass executable not found, using {h}", h=bin_path)
else:
bin_path = cmd_path
if app_path == Path.home():
user = getuser()
else:
user = "root"
systemd = template.format(user=user, group=user, bin_path=bin_path)
info(f"Generated systemd service:\n{systemd}")
if service_path.exists():
service_path.unlink()
with service_path.open("w") as f:
f.write(systemd)
if not service_path.exists():
error("Error writing systemd file to {f}", f=service_path)
install_systemd(service_path)
return True
def start_web_server() -> None:
"""Start web server."""
find_app_path()
try:
from hyperglass_agent.config import params
from hyperglass_agent.api.web import start
msg_start = "Starting hyperglass agent web server on"
msg_uri = "http://"
msg_host = str(params.listen_address)
msg_port = str(params.port)
msg_len = len("".join([msg_start, WS[1], msg_uri, msg_host, CL[1], msg_port]))
echo(
NL[1]
+ WS[msg_len + 8]
+ E.ROCKET
+ NL[1]
+ E.CHECK
+ style(msg_start, fg="green", bold=True)
+ WS[1]
+ style(msg_uri, fg="white")
+ style(msg_host, fg="blue", bold=True)
+ style(CL[1], fg="white")
+ style(msg_port, fg="magenta", bold=True)
+ WS[1]
+ E.ROCKET
+ NL[1]
+ WS[1]
+ NL[1]
)
start()
except Exception as e:
error("Failed to start web server: {e}", e=e)
|
[] |
[] |
[
"hyperglass_agent_directory"
] |
[]
|
["hyperglass_agent_directory"]
|
python
| 1 | 0 | |
mission_control/gui.py
|
"""User interface examples."""
import sys, os, copy
#setup sdl
os.environ["PYSDL2_DLL_PATH"] = "..\env"
from sdl2 import *
import sdl2.ext
from controller import *
from timer import *
WIDTH = 800
HEIGHT = 600
# Define some global color constants
WHITE = sdl2.ext.Color(255, 255, 255)
GREY = sdl2.ext.Color(200, 200, 200)
RED = sdl2.ext.Color(255, 0, 0)
GREEN = sdl2.ext.Color(0, 255, 0)
BLACK = sdl2.ext.Color(0, 0, 0)
# A callback for the Button.motion event.
def onmotion(button, event):
#print("Mouse moves over the button!")
pass
# A callback for the Button.click event.
def onclick(button, event):
print("Button was clicked!")
# A callback for the TextEntry.input event.
def oninput(entry, event):
print("Input received with text '%s'" % event.text.text)
print("Text on the entry now is '%s'" % entry.text)
# A callback for the TextEntry.edit event.
def onedit(entry, event):
print("Edit received with text '%s', start '%d', length '%d'" %
(event.text.text, event.text.start, event.text.length))
def oncheck(button, event):
temp_file = sdl2.ext.Resources(__file__, "resources")
if button.checked:
tmpsprite = button.factory.from_image(temp_file.get_path("button_selected.png"))
button.texture, tmpsprite.texture = tmpsprite.texture, button.texture
del tmpsprite
else:
tmpsprite = button.factory.from_image(temp_file.get_path("button_unselected.png"))
button.texture, tmpsprite.texture = tmpsprite.texture, button.texture
del tmpsprite
def run():
# You know those from the helloworld.py example.
# Initialize the video subsystem, create a window and make it visible.
sdl2.ext.init()
SDL_Init(SDL_INIT_GAMECONTROLLER)
SDL_Init(SDL_RENDERER_PRESENTVSYNC)
fps_timer = Timer(60)
fps_counter = Speedometer()
window = sdl2.ext.Window("Mission Control", size=(WIDTH, HEIGHT))
# Create a resource, so we have easy access to the example images.
RESOURCES = sdl2.ext.Resources(__file__, "resources")
SDL_SetWindowIcon(window.window, sdl2.ext.image.load_image(RESOURCES.get_path('icon.png')))
elite_font = sdl2.ext.FontManager('resources/eurostile.ttf')
window.show()
if "-hardware" in sys.argv:
print("Using hardware acceleration")
renderer = sdl2.ext.Renderer(window)
factory = sdl2.ext.SpriteFactory(sdl2.ext.TEXTURE, renderer=renderer,
fontmanager=elite_font)
else:
print("Using software rendering")
factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE, fontmanager=elite_font)
uifactory = sdl2.ext.UIFactory(factory)
label = factory.from_text('Mission Control', size=40)
label.position = WIDTH/2-label.size[0]/2 , 0
#label = uifactory.from_text(sdl2.ext.BUTTON, 'Mission Control')
button = uifactory.from_image(sdl2.ext.BUTTON, RESOURCES.get_path("button.bmp"))
button.position = 50, 50
checkbutton = uifactory.from_image(sdl2.ext.CHECKBUTTON,
RESOURCES.get_path("button_unselected.png"))
checkbutton.position = 200, 200
button.click += onclick
button.motion += onmotion
checkbutton.click += oncheck
checkbutton.factory = factory
ds4 = ControllerGUI(factory, 240, 450)
print SDL_GameControllerName(ds4.controller)
ds4.update()
spriterenderer = factory.create_sprite_render_system(window)
uiprocessor = sdl2.ext.UIProcessor()
sprites = []
sprites = (label, checkbutton) + tuple(ds4.sprites)
running = True
while running:
events = sdl2.ext.get_events()
for event in events:
if event.type == sdl2.SDL_QUIT:
running = False
break
# Pass the SDL2 events to the UIProcessor, which takes care of
# the user interface logic.
uiprocessor.dispatch([button, checkbutton], event)
# Render all user interface elements on the window.
ds4.update()
sdl2.ext.fill(spriterenderer.surface, BLACK)
spriterenderer.render(sprites)
#render(sprites, renderer)
fps_timer.tick()
sdl2.ext.quit()
return 0
def render(sprites, renderer):
r = SDL_Rect()
dorender = SDL_RenderCopy
renderer.clear(BLACK)
for sprite in sprites:
r.x = int(sprite.x)
r.y = int(sprite.y)
r.w, r.h = sprite.size
if not sprite.hidden:
dorender(renderer.renderer, sprite.texture, None, r)
renderer.present()
if __name__ == "__main__":
sys.exit(run())
|
[] |
[] |
[
"PYSDL2_DLL_PATH"
] |
[]
|
["PYSDL2_DLL_PATH"]
|
python
| 1 | 0 | |
win64-postgresql/pgAdmin 4/venv/Lib/idlelib/pyshell.py
|
#! /usr/bin/env python3
import sys
if __name__ == "__main__":
sys.modules['idlelib.pyshell'] = sys.modules['__main__']
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
# Valid arguments for the ...Awareness call below are defined in the following.
# https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx
if sys.platform == 'win32':
try:
import ctypes
PROCESS_SYSTEM_DPI_AWARE = 1
ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE)
except (ImportError, AttributeError, OSError):
pass
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
tkMessageBox.showerror("Idle Cannot Start",
"Idle requires tcl/tk 8.5+, not %s." % TkVersion,
parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import linecache
import os
import os.path
from platform import python_version
import re
import socket
import subprocess
from textwrap import TextWrapper
import threading
import time
import tokenize
import warnings
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, PseudoInputFile, PseudoOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(
idleConf.userdir, 'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
debugger_r.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
if use_subprocess:
source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n"
+ source + "\ndel __file__")
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import iomenu
# try:
# source = source.encode(iomenu.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Connection Error",
"IDLE's subprocess didn't make connection.\n"
"See the 'Startup failure' section of the IDLE doc, online at\n"
"https://docs.python.org/3/library/idle.html#startup-failure",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
# Extend right-click context menu
rmenu_specs = OutputWindow.rmenu_specs + [
("Squeeze", "<<squeeze-current-text>>"),
]
# New classes
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.sys_ps1 = sys.ps1 if hasattr(sys, 'ps1') else '>>> '
self.prompt_last_line = self.sys_ps1.split('\n')[-1]
self.prompt = self.sys_ps1 # Changes when debug active
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
squeezer = self.Squeezer(self)
text.bind("<<squeeze-current-text>>",
squeezer.squeeze_current_text_event)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = PseudoInputFile(self, "stdin", iomenu.encoding)
self.stdout = PseudoOutputFile(self, "stdout", iomenu.encoding)
self.stderr = PseudoOutputFile(self, "stderr", iomenu.encoding)
self.console = PseudoOutputFile(self, "console", iomenu.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use text viewer someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
self.prompt = self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
self.prompt = "[DEBUG ON]\n" + self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
self.console.write(self.prompt)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def show_warning(self, msg):
width = self.interp.tkconsole.width
wrapper = TextWrapper(width=width, tabsize=8, expand_tabs=True)
wrapped_msg = '\n'.join(wrapper.wrap(msg))
if not wrapped_msg.endswith('\n'):
wrapped_msg += '\n'
self.per.bottom.insert("iomark linestart", wrapped_msg, "stderr")
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
# Tk doesn't support outputting non-BMP characters
# Let's assume what printed string is not very long,
# find first non-BMP character and construct informative
# UnicodeEncodeError exception.
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError("UCS-2", char, start, start+1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
import getopt
from platform import system
from idlelib import testing # bool value
from idlelib import macosx
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# Setup root. Don't break user code run in IDLE process.
# Don't change environment when testing.
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className="Idle")
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif not macosx.isAquaTk():
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(master=root, file=iconfile)
for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
# start editor and/or shell windows:
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosx.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic issues and print warning message(s) in
# the IDLE shell window; this is less intrusive than always
# opening a separate window.
# Warn if using a problematic OS X Tk version.
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.show_warning(tkversionwarning)
# Warn if the "Prefer tabs when opening documents" system
# preference is set to "Always".
prefer_tabs_preference_warning = macosx.preferTabsPreferenceWarning()
if prefer_tabs_preference_warning:
shell.show_warning(prefer_tabs_preference_warning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
[] |
[] |
[
"PYTHONSTARTUP",
"IDLESTARTUP"
] |
[]
|
["PYTHONSTARTUP", "IDLESTARTUP"]
|
python
| 2 | 0 | |
tensorboard/compat/tensorflow_stub/io/gfile_s3_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import boto3
import os
import unittest
from moto import mock_s3
from tensorboard.compat.tensorflow_stub import errors
from tensorboard.compat.tensorflow_stub.io import gfile
# Placeholder values to make sure any local keys are overridden
# and moto mock is being called
os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key")
os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret")
class GFileTest(unittest.TestCase):
@mock_s3
def testExists(self):
temp_dir = self._CreateDeepS3Structure()
ckpt_path = self._PathJoin(temp_dir, "model.ckpt")
self.assertTrue(gfile.exists(temp_dir))
self.assertTrue(gfile.exists(ckpt_path))
@mock_s3
def testGlob(self):
temp_dir = self._CreateDeepS3Structure()
# S3 glob includes subdirectory content, which standard
# filesystem does not. However, this is good for perf.
expected = [
"a.tfevents.1",
"bar/b.tfevents.1",
"bar/baz/c.tfevents.1",
"bar/baz/d.tfevents.1",
"bar/quux/some_flume_output.txt",
"bar/quux/some_more_flume_output.txt",
"bar/red_herring.txt",
"model.ckpt",
"quuz/e.tfevents.1",
"quuz/garply/corge/g.tfevents.1",
"quuz/garply/f.tfevents.1",
"quuz/garply/grault/h.tfevents.1",
"waldo/fred/i.tfevents.1",
]
expected_listing = [self._PathJoin(temp_dir, f) for f in expected]
gotten_listing = gfile.glob(self._PathJoin(temp_dir, "*"))
self.assertCountEqual(
expected_listing,
gotten_listing,
"Files must match. Expected %r. Got %r."
% (expected_listing, gotten_listing),
)
@mock_s3
def testIsdir(self):
temp_dir = self._CreateDeepS3Structure()
self.assertTrue(gfile.isdir(temp_dir))
@mock_s3
def testListdir(self):
temp_dir = self._CreateDeepS3Structure()
self._CreateDeepS3Structure(temp_dir)
expected_files = [
# Empty directory not returned
# 'foo',
"bar",
"quuz",
"a.tfevents.1",
"model.ckpt",
"waldo",
]
gotten_files = gfile.listdir(temp_dir)
self.assertCountEqual(expected_files, gotten_files)
@mock_s3
def testMakeDirs(self):
temp_dir = self._CreateDeepS3Structure()
new_dir = self._PathJoin(temp_dir, "newdir", "subdir", "subsubdir")
gfile.makedirs(new_dir)
self.assertTrue(gfile.isdir(new_dir))
@mock_s3
def testMakeDirsAlreadyExists(self):
temp_dir = self._CreateDeepS3Structure()
new_dir = self._PathJoin(temp_dir, "bar", "baz")
gfile.makedirs(new_dir)
@mock_s3
def testWalk(self):
temp_dir = self._CreateDeepS3Structure()
self._CreateDeepS3Structure(temp_dir)
expected = [
[
"",
[
"a.tfevents.1",
"model.ckpt",
],
],
# Empty directory not returned
# ['foo', []],
[
"bar",
[
"b.tfevents.1",
"red_herring.txt",
],
],
[
"bar/baz",
[
"c.tfevents.1",
"d.tfevents.1",
],
],
[
"bar/quux",
[
"some_flume_output.txt",
"some_more_flume_output.txt",
],
],
[
"quuz",
[
"e.tfevents.1",
],
],
[
"quuz/garply",
[
"f.tfevents.1",
],
],
[
"quuz/garply/corge",
[
"g.tfevents.1",
],
],
[
"quuz/garply/grault",
[
"h.tfevents.1",
],
],
["waldo", []],
[
"waldo/fred",
[
"i.tfevents.1",
],
],
]
for pair in expected:
# If this is not the top-level directory, prepend the high-level
# directory.
pair[0] = self._PathJoin(temp_dir, pair[0]) if pair[0] else temp_dir
gotten = gfile.walk(temp_dir)
self._CompareFilesPerSubdirectory(expected, gotten)
@mock_s3
def testStat(self):
ckpt_content = "asdfasdfasdffoobarbuzz"
temp_dir = self._CreateDeepS3Structure(ckpt_content=ckpt_content)
ckpt_path = self._PathJoin(temp_dir, "model.ckpt")
ckpt_stat = gfile.stat(ckpt_path)
self.assertEqual(ckpt_stat.length, len(ckpt_content))
bad_ckpt_path = self._PathJoin(temp_dir, "bad_model.ckpt")
with self.assertRaises(errors.NotFoundError):
gfile.stat(bad_ckpt_path)
@mock_s3
def testRead(self):
ckpt_content = "asdfasdfasdffoobarbuzz"
temp_dir = self._CreateDeepS3Structure(ckpt_content=ckpt_content)
ckpt_path = self._PathJoin(temp_dir, "model.ckpt")
with gfile.GFile(ckpt_path, "r") as f:
f.buff_chunk_size = 4 # Test buffering by reducing chunk size
ckpt_read = f.read()
self.assertEqual(ckpt_content, ckpt_read)
@mock_s3
def testReadLines(self):
ckpt_lines = ["\n"] + ["line {}\n".format(i) for i in range(10)] + [" "]
ckpt_content = "".join(ckpt_lines)
temp_dir = self._CreateDeepS3Structure(ckpt_content=ckpt_content)
ckpt_path = self._PathJoin(temp_dir, "model.ckpt")
with gfile.GFile(ckpt_path, "r") as f:
f.buff_chunk_size = 4 # Test buffering by reducing chunk size
ckpt_read_lines = list(f)
self.assertEqual(ckpt_lines, ckpt_read_lines)
@mock_s3
def testReadWithOffset(self):
ckpt_content = "asdfasdfasdffoobarbuzz"
ckpt_b_content = b"asdfasdfasdffoobarbuzz"
temp_dir = self._CreateDeepS3Structure(ckpt_content=ckpt_content)
ckpt_path = self._PathJoin(temp_dir, "model.ckpt")
with gfile.GFile(ckpt_path, "r") as f:
f.buff_chunk_size = 4 # Test buffering by reducing chunk size
ckpt_read = f.read(12)
self.assertEqual("asdfasdfasdf", ckpt_read)
ckpt_read = f.read(6)
self.assertEqual("foobar", ckpt_read)
ckpt_read = f.read(1)
self.assertEqual("b", ckpt_read)
ckpt_read = f.read()
self.assertEqual("uzz", ckpt_read)
ckpt_read = f.read(1000)
self.assertEqual("", ckpt_read)
with gfile.GFile(ckpt_path, "rb") as f:
ckpt_read = f.read()
self.assertEqual(ckpt_b_content, ckpt_read)
@mock_s3
def testWrite(self):
temp_dir = self._CreateDeepS3Structure()
ckpt_path = os.path.join(temp_dir, "model2.ckpt")
ckpt_content = "asdfasdfasdffoobarbuzz"
with gfile.GFile(ckpt_path, "w") as f:
f.write(ckpt_content)
with gfile.GFile(ckpt_path, "r") as f:
ckpt_read = f.read()
self.assertEqual(ckpt_content, ckpt_read)
@mock_s3
def testOverwrite(self):
temp_dir = self._CreateDeepS3Structure()
ckpt_path = os.path.join(temp_dir, "model2.ckpt")
ckpt_content = "asdfasdfasdffoobarbuzz"
with gfile.GFile(ckpt_path, "w") as f:
f.write("original")
with gfile.GFile(ckpt_path, "w") as f:
f.write(ckpt_content)
with gfile.GFile(ckpt_path, "r") as f:
ckpt_read = f.read()
self.assertEqual(ckpt_content, ckpt_read)
@mock_s3
def testWriteMultiple(self):
temp_dir = self._CreateDeepS3Structure()
ckpt_path = os.path.join(temp_dir, "model2.ckpt")
ckpt_content = "asdfasdfasdffoobarbuzz" * 5
with gfile.GFile(ckpt_path, "w") as f:
for i in range(0, len(ckpt_content), 3):
f.write(ckpt_content[i : i + 3])
# Test periodic flushing of the file
if i % 9 == 0:
f.flush()
with gfile.GFile(ckpt_path, "r") as f:
ckpt_read = f.read()
self.assertEqual(ckpt_content, ckpt_read)
@mock_s3
def testWriteEmpty(self):
temp_dir = self._CreateDeepS3Structure()
ckpt_path = os.path.join(temp_dir, "model2.ckpt")
ckpt_content = ""
with gfile.GFile(ckpt_path, "w") as f:
f.write(ckpt_content)
with gfile.GFile(ckpt_path, "r") as f:
ckpt_read = f.read()
self.assertEqual(ckpt_content, ckpt_read)
@mock_s3
def testWriteBinary(self):
temp_dir = self._CreateDeepS3Structure()
ckpt_path = os.path.join(temp_dir, "model.ckpt")
ckpt_content = b"asdfasdfasdffoobarbuzz"
with gfile.GFile(ckpt_path, "wb") as f:
f.write(ckpt_content)
with gfile.GFile(ckpt_path, "rb") as f:
ckpt_read = f.read()
self.assertEqual(ckpt_content, ckpt_read)
@mock_s3
def testWriteMultipleBinary(self):
temp_dir = self._CreateDeepS3Structure()
ckpt_path = os.path.join(temp_dir, "model2.ckpt")
ckpt_content = b"asdfasdfasdffoobarbuzz" * 5
with gfile.GFile(ckpt_path, "wb") as f:
for i in range(0, len(ckpt_content), 3):
f.write(ckpt_content[i : i + 3])
# Test periodic flushing of the file
if i % 9 == 0:
f.flush()
with gfile.GFile(ckpt_path, "rb") as f:
ckpt_read = f.read()
self.assertEqual(ckpt_content, ckpt_read)
def _PathJoin(self, *args):
"""Join directory and path with slash and not local separator."""
return "/".join(args)
def _CreateDeepS3Structure(
self,
top_directory="top_dir",
ckpt_content="",
region_name="us-east-1",
bucket_name="test",
):
"""Creates a reasonable deep structure of S3 subdirectories with files.
Args:
top_directory: The path of the top level S3 directory in which
to create the directory structure. Defaults to 'top_dir'.
ckpt_content: The content to put into model.ckpt. Default to ''.
region_name: The S3 region name. Defaults to 'us-east-1'.
bucket_name: The S3 bucket name. Defaults to 'test'.
Returns:
S3 URL of the top directory in the form 's3://bucket/path'
"""
s3_top_url = "s3://{}/{}".format(bucket_name, top_directory)
# Add a few subdirectories.
directory_names = (
# An empty directory.
"foo",
# A directory with an events file (and a text file).
"bar",
# A deeper directory with events files.
"bar/baz",
# A non-empty subdir that lacks event files (should be ignored).
"bar/quux",
# This 3-level deep set of subdirectories tests logic that replaces
# the full glob string with an absolute path prefix if there is
# only 1 subdirectory in the final mapping.
"quuz/garply",
"quuz/garply/corge",
"quuz/garply/grault",
# A directory that lacks events files, but contains a subdirectory
# with events files (first level should be ignored, second level
# should be included).
"waldo",
"waldo/fred",
)
client = boto3.client("s3", region_name=region_name)
client.create_bucket(Bucket=bucket_name)
client.put_object(Body="", Bucket=bucket_name, Key=top_directory)
for directory_name in directory_names:
# Add an end slash
path = top_directory + "/" + directory_name + "/"
# Create an empty object so the location exists
client.put_object(Body="", Bucket=bucket_name, Key=directory_name)
# Add a few files to the directory.
file_names = (
"a.tfevents.1",
"model.ckpt",
"bar/b.tfevents.1",
"bar/red_herring.txt",
"bar/baz/c.tfevents.1",
"bar/baz/d.tfevents.1",
"bar/quux/some_flume_output.txt",
"bar/quux/some_more_flume_output.txt",
"quuz/e.tfevents.1",
"quuz/garply/f.tfevents.1",
"quuz/garply/corge/g.tfevents.1",
"quuz/garply/grault/h.tfevents.1",
"waldo/fred/i.tfevents.1",
)
for file_name in file_names:
# Add an end slash
path = top_directory + "/" + file_name
if file_name == "model.ckpt":
content = ckpt_content
else:
content = ""
client.put_object(Body=content, Bucket=bucket_name, Key=path)
return s3_top_url
def _CompareFilesPerSubdirectory(self, expected, gotten):
"""Compares iterables of (subdirectory path, list of absolute paths)
Args:
expected: The expected iterable of 2-tuples.
gotten: The gotten iterable of 2-tuples.
"""
expected_directory_to_files = {
result[0]: list(result[1]) for result in expected
}
gotten_directory_to_files = {
# Note we ignore subdirectories and just compare files
result[0]: list(result[2])
for result in gotten
}
self.assertCountEqual(
expected_directory_to_files.keys(),
gotten_directory_to_files.keys(),
)
for subdir, expected_listing in expected_directory_to_files.items():
gotten_listing = gotten_directory_to_files[subdir]
self.assertCountEqual(
expected_listing,
gotten_listing,
"Files for subdir %r must match. Expected %r. Got %r."
% (subdir, expected_listing, gotten_listing),
)
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
ambari-server/src/main/python/ambari_server/setupSecurity.py
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import \
ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import base64
import fileinput
import getpass
import logging
import os
import re
import shutil
import stat
import sys
import tempfile
import time
import urllib2
from ambari_commons.exceptions import FatalException, NonFatalException
from ambari_commons.logging_utils import print_warning_msg, print_error_msg, print_info_msg, get_verbose
from ambari_commons.os_check import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons.os_utils import is_root, set_file_permissions, \
run_os_command, search_file, is_valid_filepath, change_owner, get_ambari_repo_file_full_name, get_file_owner
from ambari_server.dbConfiguration import ensure_jdbc_driver_is_installed
from ambari_server.serverClassPath import ServerClassPath
from ambari_server.serverConfiguration import configDefaults, parse_properties_file, \
encrypt_password, find_jdk, find_properties_file, get_alias_string, get_ambari_properties, get_conf_dir, \
get_credential_store_location, get_is_persisted, get_is_secure, get_master_key_location, get_db_type, write_property, \
get_original_master_key, get_value_from_properties, get_java_exe_path, is_alias_string, read_ambari_user, \
read_passwd_for_alias, remove_password_file, save_passwd_for_alias, store_password_file, update_properties_2, \
BLIND_PASSWORD, BOOTSTRAP_DIR_PROPERTY, JDBC_PASSWORD_FILENAME, JDBC_PASSWORD_PROPERTY, \
JDBC_RCA_PASSWORD_ALIAS, JDBC_RCA_PASSWORD_FILE_PROPERTY, JDBC_USE_INTEGRATED_AUTH_PROPERTY, \
LDAP_MGR_PASSWORD_ALIAS, LDAP_MGR_PASSWORD_PROPERTY, CLIENT_SECURITY, \
SECURITY_IS_ENCRYPTION_ENABLED, SECURITY_KEY_ENV_VAR_NAME, SECURITY_KERBEROS_JASS_FILENAME, \
SECURITY_PROVIDER_KEY_CMD, SECURITY_MASTER_KEY_FILENAME, SSL_TRUSTSTORE_PASSWORD_ALIAS, \
SSL_TRUSTSTORE_PASSWORD_PROPERTY, SSL_TRUSTSTORE_PATH_PROPERTY, SSL_TRUSTSTORE_TYPE_PROPERTY, \
JDK_NAME_PROPERTY, JCE_NAME_PROPERTY, JAVA_HOME_PROPERTY, \
get_resources_location, SECURITY_MASTER_KEY_LOCATION, SETUP_OR_UPGRADE_MSG, \
CHECK_AMBARI_KRB_JAAS_CONFIGURATION_PROPERTY
from ambari_server.serverUtils import is_server_runing, get_ambari_server_api_base, \
get_ambari_admin_username_password_pair, perform_changes_via_rest_api, get_ssl_context, get_cluster_name, \
get_eligible_services, get_boolean_from_dictionary, get_value_from_dictionary
from ambari_server.setupActions import SETUP_ACTION, LDAP_SETUP_ACTION
from ambari_server.userInput import get_validated_string_input, get_prompt_default, read_password, get_YN_input, \
quit_if_has_answer
from contextlib import closing
from urllib2 import HTTPError
logger = logging.getLogger(__name__)
LDAP_AD="AD"
LDAP_IPA="IPA"
LDAP_GENERIC="Generic"
LDAP_TYPES = [LDAP_AD, LDAP_IPA, LDAP_GENERIC]
REGEX_IP_ADDRESS = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
REGEX_HOSTNAME = "^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
REGEX_PORT = "^([0-9]{1,5}$)"
REGEX_HOSTNAME_PORT = "^(.*:[0-9]{1,5}$)"
REGEX_TRUE_FALSE = "^(true|false)?$"
REGEX_SKIP_CONVERT = "^(skip|convert)?$"
REGEX_REFERRAL = "^(follow|ignore)?$"
REGEX_LDAP_TYPE = "^({})?$".format("|".join(LDAP_TYPES))
REGEX_ANYTHING = ".*"
LDAP_TO_PAM_MIGRATION_HELPER_CMD = "{0} -cp {1} " + \
"org.apache.ambari.server.security.authentication.LdapToPamMigrationHelper" + \
" >> " + configDefaults.SERVER_OUT_FILE + " 2>&1"
AUTO_GROUP_CREATION = "auto.group.creation"
SERVER_API_LDAP_URL = 'ldap_sync_events'
SETUP_LDAP_CONFIG_URL = 'services/AMBARI/components/AMBARI_SERVER/configurations/ldap-configuration'
PAM_CONFIG_FILE = 'pam.configuration'
LDAP_MGR_USERNAME_PROPERTY = "ambari.ldap.connectivity.bind_dn"
LDAP_MGR_PASSWORD_FILENAME = "ldap-password.dat"
LDAP_ANONYMOUS_BIND="ambari.ldap.connectivity.anonymous_bind"
LDAP_USE_SSL="ambari.ldap.connectivity.use_ssl"
LDAP_DISABLE_ENDPOINT_IDENTIFICATION = "ambari.ldap.advanced.disable_endpoint_identification"
NO_AUTH_METHOD_CONFIGURED = "no auth method"
AMBARI_LDAP_AUTH_ENABLED = "ambari.ldap.authentication.enabled"
LDAP_MANAGE_SERVICES = "ambari.ldap.manage_services"
LDAP_ENABLED_SERVICES = "ambari.ldap.enabled_services"
WILDCARD_FOR_ALL_SERVICES = "*"
FETCH_SERVICES_FOR_LDAP_ENTRYPOINT = "clusters/%s/services?ServiceInfo/ldap_integration_supported=true&fields=ServiceInfo/*"
def read_master_key(isReset=False, options = None):
passwordPattern = ".*"
passwordPrompt = "Please provide master key for locking the credential store: "
passwordDescr = "Invalid characters in password. Use only alphanumeric or "\
"_ or - characters"
passwordDefault = ""
if isReset:
passwordPrompt = "Enter new Master Key: "
input = True
while(input):
masterKey = get_validated_string_input(passwordPrompt, passwordDefault, passwordPattern, passwordDescr,
True, True, answer = options.master_key)
if not masterKey:
print "Master Key cannot be empty!"
continue
masterKey2 = get_validated_string_input("Re-enter master key: ", passwordDefault, passwordPattern, passwordDescr,
True, True, answer = options.master_key)
if masterKey != masterKey2:
print "Master key did not match!"
continue
input = False
return masterKey
def save_master_key(options, master_key, key_location, persist=True):
if master_key:
jdk_path = find_jdk()
if jdk_path is None:
print_error_msg("No JDK found, please run the \"setup\" "
"command to install a JDK automatically or install any "
"JDK manually to " + configDefaults.JDK_INSTALL_DIR)
return 1
serverClassPath = ServerClassPath(get_ambari_properties(), options)
command = SECURITY_PROVIDER_KEY_CMD.format(get_java_exe_path(),
serverClassPath.get_full_ambari_classpath_escaped_for_shell(), master_key, key_location, persist)
(retcode, stdout, stderr) = run_os_command(command)
print_info_msg("Return code from credential provider save KEY: " +
str(retcode))
else:
print_error_msg("Master key cannot be None.")
def adjust_directory_permissions(ambari_user):
properties = get_ambari_properties()
bootstrap_dir = os.path.abspath(get_value_from_properties(properties, BOOTSTRAP_DIR_PROPERTY))
print_info_msg("Cleaning bootstrap directory ({0}) contents...".format(bootstrap_dir))
if os.path.exists(bootstrap_dir):
shutil.rmtree(bootstrap_dir) #Ignore the non-existent dir error
if not os.path.exists(bootstrap_dir):
try:
os.makedirs(bootstrap_dir)
except Exception, ex:
print_warning_msg("Failed recreating the bootstrap directory: {0}".format(str(ex)))
pass
else:
print_warning_msg("Bootstrap directory lingering around after 5s. Unable to complete the cleanup.")
pass
# Add master key and credential store if exists
keyLocation = get_master_key_location(properties)
masterKeyFile = search_file(SECURITY_MASTER_KEY_FILENAME, keyLocation)
if masterKeyFile:
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((masterKeyFile, configDefaults.MASTER_KEY_FILE_PERMISSIONS, "{0}", False))
credStoreFile = get_credential_store_location(properties)
if os.path.exists(credStoreFile):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((credStoreFile, configDefaults.CREDENTIALS_STORE_FILE_PERMISSIONS, "{0}", False))
trust_store_location = properties[SSL_TRUSTSTORE_PATH_PROPERTY]
if trust_store_location:
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((trust_store_location, configDefaults.TRUST_STORE_LOCATION_PERMISSIONS, "{0}", False))
# Update JDK and JCE permissions
resources_dir = get_resources_location(properties)
jdk_file_name = properties.get_property(JDK_NAME_PROPERTY)
jce_file_name = properties.get_property(JCE_NAME_PROPERTY)
java_home = properties.get_property(JAVA_HOME_PROPERTY)
if jdk_file_name:
jdk_file_path = os.path.abspath(os.path.join(resources_dir, jdk_file_name))
if(os.path.exists(jdk_file_path)):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jdk_file_path, "644", "{0}", False))
if jce_file_name:
jce_file_path = os.path.abspath(os.path.join(resources_dir, jce_file_name))
if(os.path.exists(jce_file_path)):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jce_file_path, "644", "{0}", False))
if java_home:
jdk_security_dir = os.path.abspath(os.path.join(java_home, configDefaults.JDK_SECURITY_DIR))
if(os.path.exists(jdk_security_dir)):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jdk_security_dir + "/*", "644", "{0}", True))
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((jdk_security_dir, "755", "{0}", False))
# Grant read permissions to all users. This is required when a non-admin user is configured to setup ambari-server.
# However, do not change ownership of the repo file to ambari user.
ambari_repo_file = get_ambari_repo_file_full_name()
if ambari_repo_file:
if (os.path.exists(ambari_repo_file)):
ambari_repo_file_owner = get_file_owner(ambari_repo_file)
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((ambari_repo_file, "644", ambari_repo_file_owner, False))
print "Adjusting ambari-server permissions and ownership..."
for pack in configDefaults.NR_ADJUST_OWNERSHIP_LIST:
file = pack[0]
mod = pack[1]
user = pack[2].format(ambari_user)
recursive = pack[3]
print_info_msg("Setting file permissions: {0} {1} {2} {3}".format(file, mod, user, recursive))
set_file_permissions(file, mod, user, recursive)
for pack in configDefaults.NR_CHANGE_OWNERSHIP_LIST:
path = pack[0]
user = pack[1].format(ambari_user)
recursive = pack[2]
print_info_msg("Changing ownership: {0} {1} {2}".format(path, user, recursive))
change_owner(path, user, recursive)
def configure_ldap_password(ldap_manager_password_option, interactive_mode):
password_default = ""
password_prompt = 'Enter Bind DN Password: '
confirm_password_prompt = 'Confirm Bind DN Password: '
password_pattern = ".*"
password_descr = "Invalid characters in password."
password = read_password(password_default, password_pattern, password_prompt, password_descr, ldap_manager_password_option, confirm_password_prompt) if interactive_mode else ldap_manager_password_option
return password
#
# Get the principal names from the given CSV file and set them on the given LDAP event specs.
#
def get_ldap_event_spec_names(file, specs, new_specs):
try:
if os.path.exists(file):
new_spec = new_specs[0]
with open(file, 'r') as names_file:
names = names_file.read()
new_spec['names'] = names.replace('\n', '').replace('\t', '')
names_file.close()
specs += new_specs
else:
err = 'Sync event creation failed. File ' + file + ' not found.'
raise FatalException(1, err)
except Exception as exception:
err = 'Caught exception reading file ' + file + ' : ' + str(exception)
raise FatalException(1, err)
class LdapSyncOptions:
def __init__(self, options):
try:
self.ldap_sync_all = options.ldap_sync_all
except AttributeError:
self.ldap_sync_all = False
try:
self.ldap_sync_existing = options.ldap_sync_existing
except AttributeError:
self.ldap_sync_existing = False
try:
self.ldap_sync_users = options.ldap_sync_users
except AttributeError:
self.ldap_sync_users = None
try:
self.ldap_sync_groups = options.ldap_sync_groups
except AttributeError:
self.ldap_sync_groups = None
try:
self.ldap_sync_admin_name = options.ldap_sync_admin_name
except AttributeError:
self.ldap_sync_admin_name = None
try:
self.ldap_sync_admin_password = options.ldap_sync_admin_password
except AttributeError:
self.ldap_sync_admin_password = None
try:
self.ldap_sync_post_process_existing_users = options.ldap_sync_post_process_existing_users
except AttributeError:
self.ldap_sync_post_process_existing_users = False
def no_ldap_sync_options_set(self):
return not self.ldap_sync_all and not self.ldap_sync_existing and self.ldap_sync_users is None and self.ldap_sync_groups is None
def get_ldap_property_from_db(properties, admin_login, admin_password, property_name):
ldap_properties_from_db = get_ldap_properties_from_db(properties, admin_login, admin_password)
return ldap_properties_from_db[property_name] if ldap_properties_from_db else None
def get_ldap_properties_from_db(properties, admin_login, admin_password):
ldap_properties = None
url = get_ambari_server_api_base(properties) + SETUP_LDAP_CONFIG_URL
admin_auth = base64.encodestring('%s:%s' % (admin_login, admin_password)).replace('\n', '')
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
request.get_method = lambda: 'GET'
request_in_progress = True
sys.stdout.write('\nFetching LDAP configuration from DB')
num_of_tries = 0
while request_in_progress:
num_of_tries += 1
if num_of_tries == 60:
raise FatalException(1, "Could not fetch LDAP configuration within a minute; giving up!")
sys.stdout.write('.')
sys.stdout.flush()
try:
with closing(urllib2.urlopen(request, context=get_ssl_context(properties))) as response:
response_status_code = response.getcode()
if response_status_code != 200:
request_in_progress = False
err = 'Error while fetching LDAP configuration. Http status code - ' + str(response_status_code)
raise FatalException(1, err)
else:
response_body = json.loads(response.read())
ldap_properties = response_body['Configuration']['properties']
if not ldap_properties:
time.sleep(1)
else:
request_in_progress = False
except HTTPError as e:
if e.code == 404:
sys.stdout.write(' No configuration.')
return None
err = 'Error while fetching LDAP configuration. Error details: %s' % e
raise FatalException(1, err)
except Exception as e:
err = 'Error while fetching LDAP configuration. Error details: %s' % e
raise FatalException(1, err)
return ldap_properties
def is_ldap_enabled(properties, admin_login, admin_password):
ldap_enabled = get_ldap_property_from_db(properties, admin_login, admin_password, AMBARI_LDAP_AUTH_ENABLED)
return ldap_enabled if ldap_enabled is not None else 'false'
#
# Sync users and groups with configured LDAP
#
def sync_ldap(options):
logger.info("Sync users and groups with configured LDAP.")
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY,"") == 'pam':
err = "PAM is configured. Can not sync LDAP."
raise FatalException(1, err)
server_status, pid = is_server_runing()
if not server_status:
err = 'Ambari Server is not running.'
raise FatalException(1, err)
if properties == -1:
raise FatalException(1, "Failed to read properties file.")
# set ldap sync options
ldap_sync_options = LdapSyncOptions(options)
if ldap_sync_options.no_ldap_sync_options_set():
err = 'Must specify a sync option (all, existing, users or groups). Please invoke ambari-server.py --help to print the options.'
raise FatalException(1, err)
#TODO: use serverUtils.get_ambari_admin_username_password_pair (requires changes in ambari-server.py too to modify option names)
admin_login = ldap_sync_options.ldap_sync_admin_name\
if ldap_sync_options.ldap_sync_admin_name is not None and ldap_sync_options.ldap_sync_admin_name \
else get_validated_string_input(prompt="Enter Ambari Admin login: ", default=None,
pattern=None, description=None,
is_pass=False, allowEmpty=False)
admin_password = ldap_sync_options.ldap_sync_admin_password \
if ldap_sync_options.ldap_sync_admin_password is not None and ldap_sync_options.ldap_sync_admin_password \
else get_validated_string_input(prompt="Enter Ambari Admin password: ", default=None,
pattern=None, description=None,
is_pass=True, allowEmpty=False)
if is_ldap_enabled(properties, admin_login, admin_password) != 'true':
err = "LDAP is not configured. Run 'ambari-server setup-ldap' first."
raise FatalException(1, err)
url = get_ambari_server_api_base(properties) + SERVER_API_LDAP_URL
admin_auth = base64.encodestring('%s:%s' % (admin_login, admin_password)).replace('\n', '')
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
if ldap_sync_options.ldap_sync_all:
sys.stdout.write('\nSyncing all.')
bodies = [{"Event":{"specs":[{"principal_type":"users","sync_type":"all"},{"principal_type":"groups","sync_type":"all"}]}}]
elif ldap_sync_options.ldap_sync_existing:
sys.stdout.write('\nSyncing existing.')
bodies = [{"Event":{"specs":[{"principal_type":"users","sync_type":"existing"},{"principal_type":"groups","sync_type":"existing"}]}}]
else:
sys.stdout.write('\nSyncing specified users and groups.')
bodies = [{"Event":{"specs":[]}}]
body = bodies[0]
events = body['Event']
specs = events['specs']
if ldap_sync_options.ldap_sync_users is not None:
new_specs = [{"principal_type":"users","sync_type":"specific","names":""}]
get_ldap_event_spec_names(ldap_sync_options.ldap_sync_users, specs, new_specs)
if ldap_sync_options.ldap_sync_groups is not None:
new_specs = [{"principal_type":"groups","sync_type":"specific","names":""}]
get_ldap_event_spec_names(ldap_sync_options.ldap_sync_groups, specs, new_specs)
if ldap_sync_options.ldap_sync_post_process_existing_users:
for spec in bodies[0]["Event"]["specs"]:
spec["post_process_existing_users"] = "true"
if get_verbose():
sys.stdout.write('\nCalling API ' + url + ' : ' + str(bodies) + '\n')
request.add_data(json.dumps(bodies))
request.get_method = lambda: 'POST'
try:
response = urllib2.urlopen(request, context=get_ssl_context(properties))
except Exception as e:
err = 'Sync event creation failed. Error details: %s' % e
raise FatalException(1, err)
response_status_code = response.getcode()
if response_status_code != 201:
err = 'Error during syncing. Http status code - ' + str(response_status_code)
raise FatalException(1, err)
response_body = json.loads(response.read())
url = response_body['resources'][0]['href']
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
body = [{"LDAP":{"synced_groups":"*","synced_users":"*"}}]
request.add_data(json.dumps(body))
request.get_method = lambda: 'GET'
request_in_progress = True
while request_in_progress:
sys.stdout.write('.')
sys.stdout.flush()
try:
response = urllib2.urlopen(request, context=get_ssl_context(properties))
except Exception as e:
request_in_progress = False
err = 'Sync event check failed. Error details: %s' % e
raise FatalException(1, err)
response_status_code = response.getcode()
if response_status_code != 200:
err = 'Error during syncing. Http status code - ' + str(response_status_code)
raise FatalException(1, err)
response_body = json.loads(response.read())
sync_info = response_body['Event']
if sync_info['status'] == 'ERROR':
raise FatalException(1, str(sync_info['status_detail']))
elif sync_info['status'] == 'COMPLETE':
print '\n\nCompleted LDAP Sync.'
print 'Summary:'
for principal_type, summary in sync_info['summary'].iteritems():
print ' {0}:'.format(principal_type)
for action, amount in summary.iteritems():
print ' {0} = {1!s}'.format(action, amount)
request_in_progress = False
else:
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
def setup_master_key(options):
if not is_root():
warn = 'ambari-server setup-https is run as ' \
'non-root user, some sudo privileges might be required'
print warn
properties = get_ambari_properties()
if properties == -1:
raise FatalException(1, "Failed to read properties file.")
db_windows_auth_prop = properties.get_property(JDBC_USE_INTEGRATED_AUTH_PROPERTY)
db_sql_auth = False if db_windows_auth_prop and db_windows_auth_prop.lower() == 'true' else True
db_password = properties.get_property(JDBC_PASSWORD_PROPERTY)
# Encrypt passwords cannot be called before setup
if db_sql_auth and not db_password:
print 'Please call "setup" before "encrypt-passwords". Exiting...'
return 1
# Check configuration for location of master key
isSecure = get_is_secure(properties)
(isPersisted, masterKeyFile) = get_is_persisted(properties)
# Read clear text DB password from file
if db_sql_auth and not is_alias_string(db_password) and os.path.isfile(db_password):
with open(db_password, 'r') as passwdfile:
db_password = passwdfile.read()
ts_password = properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
resetKey = False
masterKey = None
if isSecure:
print "Password encryption is enabled."
resetKey = True if options.security_option is not None else get_YN_input("Do you want to reset Master Key? [y/n] (n): ", False)
# For encrypting of only unencrypted passwords without resetting the key ask
# for master key if not persisted.
if isSecure and not isPersisted and not resetKey:
print "Master Key not persisted."
masterKey = get_original_master_key(properties, options)
pass
# Make sure both passwords are clear-text if master key is lost
if resetKey:
if not isPersisted:
print "Master Key not persisted."
masterKey = get_original_master_key(properties, options)
# Unable get the right master key or skipped question <enter>
if not masterKey:
print "To disable encryption, do the following:"
print "- Edit " + find_properties_file() + \
" and set " + SECURITY_IS_ENCRYPTION_ENABLED + " = " + "false."
err = "{0} is already encrypted. Please call {1} to store unencrypted" \
" password and call 'encrypt-passwords' again."
if db_sql_auth and db_password and is_alias_string(db_password):
print err.format('- Database password', "'" + SETUP_ACTION + "'")
if ts_password and is_alias_string(ts_password):
print err.format('TrustStore password', "'" + LDAP_SETUP_ACTION + "'")
return 1
pass
pass
pass
# Read back any encrypted passwords
if db_sql_auth and db_password and is_alias_string(db_password):
db_password = read_passwd_for_alias(JDBC_RCA_PASSWORD_ALIAS, masterKey)
if ts_password and is_alias_string(ts_password):
ts_password = read_passwd_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS, masterKey)
# Read master key, if non-secure or reset is true
if resetKey or not isSecure:
masterKey = read_master_key(resetKey, options)
persist = get_YN_input("Do you want to persist master key. If you choose " \
"not to persist, you need to provide the Master " \
"Key while starting the ambari server as an env " \
"variable named " + SECURITY_KEY_ENV_VAR_NAME + \
" or the start will prompt for the master key."
" Persist [y/n] (y)? ", True, options.master_key_persist)
if persist:
save_master_key(options, masterKey, get_master_key_location(properties) + os.sep +
SECURITY_MASTER_KEY_FILENAME, persist)
elif not persist and masterKeyFile:
try:
os.remove(masterKeyFile)
print_info_msg("Deleting master key file at location: " + str(
masterKeyFile))
except Exception, e:
print 'ERROR: Could not remove master key file. %s' % e
# Blow up the credential store made with previous key, if any
store_file = get_credential_store_location(properties)
if os.path.exists(store_file):
try:
os.remove(store_file)
except:
print_warning_msg("Failed to remove credential store file.")
pass
pass
pass
propertyMap = {SECURITY_IS_ENCRYPTION_ENABLED: 'true'}
# Encrypt only un-encrypted passwords
if db_password and not is_alias_string(db_password):
retCode = save_passwd_for_alias(JDBC_RCA_PASSWORD_ALIAS, db_password, masterKey)
if retCode != 0:
print 'Failed to save secure database password.'
else:
propertyMap[JDBC_PASSWORD_PROPERTY] = get_alias_string(JDBC_RCA_PASSWORD_ALIAS)
remove_password_file(JDBC_PASSWORD_FILENAME)
if properties.get_property(JDBC_RCA_PASSWORD_FILE_PROPERTY):
propertyMap[JDBC_RCA_PASSWORD_FILE_PROPERTY] = get_alias_string(JDBC_RCA_PASSWORD_ALIAS)
pass
if ts_password and not is_alias_string(ts_password):
retCode = save_passwd_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS, ts_password, masterKey)
if retCode != 0:
print 'Failed to save secure TrustStore password.'
else:
propertyMap[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS)
pass
update_properties_2(properties, propertyMap)
# Since files for store and master are created we need to ensure correct
# permissions
ambari_user = read_ambari_user()
if ambari_user:
adjust_directory_permissions(ambari_user)
return 0
def setup_ambari_krb5_jaas(options):
jaas_conf_file = search_file(SECURITY_KERBEROS_JASS_FILENAME, get_conf_dir())
if os.path.exists(jaas_conf_file):
print 'Setting up Ambari kerberos JAAS configuration to access ' + \
'secured Hadoop daemons...'
principal = get_validated_string_input('Enter ambari server\'s kerberos '
'principal name ([email protected]): ', '[email protected]', '.*', '', False,
False, answer = options.jaas_principal)
keytab = get_validated_string_input('Enter keytab path for ambari '
'server\'s kerberos principal: ',
'/etc/security/keytabs/ambari.keytab', '.*', False, False,
validatorFunction=is_valid_filepath, answer = options.jaas_keytab)
for line in fileinput.FileInput(jaas_conf_file, inplace=1):
line = re.sub('keyTab=.*$', 'keyTab="' + keytab + '"', line)
line = re.sub('principal=.*$', 'principal="' + principal + '"', line)
print line,
write_property(CHECK_AMBARI_KRB_JAAS_CONFIGURATION_PROPERTY, "true")
else:
raise NonFatalException('No jaas config file found at location: ' +
jaas_conf_file)
class LdapPropTemplate:
def __init__(self, properties, i_option, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_default=None):
self.prop_name = i_prop_name
self.option = i_option
stored_value = get_value_from_properties(properties, i_prop_name)
self.default_value = LdapDefault(stored_value) if stored_value else i_prop_default
self.prompt_pattern = i_prop_val_pattern
self.prompt_regex = i_prompt_regex
self.allow_empty_prompt = i_allow_empty_prompt
def get_default_value(self, ldap_type):
return self.default_value.get_default_value(ldap_type) if self.default_value else None
def get_prompt_text(self, ldap_type):
default_value = self.get_default_value(ldap_type)
return format_prop_val_prompt(self.prompt_pattern, default_value)
def get_input(self, ldap_type, interactive_mode):
default_value = self.get_default_value(ldap_type)
return get_validated_string_input(self.get_prompt_text(ldap_type),
default_value, self.prompt_regex,
"Invalid characters in the input!", False, self.allow_empty_prompt,
answer = self.option) if interactive_mode else self.option
def should_query_ldap_type(self):
return not self.allow_empty_prompt and not self.option and self.default_value and self.default_value.depends_on_ldap_type()
class LdapDefault:
def __init__(self, value):
self.default_value = value
def get_default_value(self, ldap_type):
return self.default_value
def depends_on_ldap_type(self):
return False
class LdapDefaultMap(LdapDefault):
def __init__(self, value_map):
LdapDefault.__init__(self, None)
self.default_value_map = value_map
def get_default_value(self, ldap_type):
return self.default_value_map[ldap_type] if self.default_value_map and ldap_type in self.default_value_map else None
def depends_on_ldap_type(self):
return True
def format_prop_val_prompt(prop_prompt_pattern, prop_default_value):
default_value = get_prompt_default(prop_default_value)
return prop_prompt_pattern.format((" " + default_value) if default_value is not None and default_value != "" else "")
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def init_ldap_properties_list_reqd(properties, options):
# python2.x dict is not ordered
ldap_properties = [
LdapPropTemplate(properties, options.ldap_primary_host, "ambari.ldap.connectivity.server.host", "Primary LDAP Host{0}: ", REGEX_HOSTNAME, False, LdapDefaultMap({LDAP_IPA:'ipa.ambari.apache.org', LDAP_GENERIC:'ldap.ambari.apache.org'})),
LdapPropTemplate(properties, options.ldap_primary_port, "ambari.ldap.connectivity.server.port", "Primary LDAP Port{0}: ", REGEX_PORT, False, LdapDefaultMap({LDAP_IPA:'636', LDAP_GENERIC:'389'})),
LdapPropTemplate(properties, options.ldap_secondary_host, "ambari.ldap.connectivity.secondary.server.host", "Secondary LDAP Host <Optional>{0}: ", REGEX_HOSTNAME, True),
LdapPropTemplate(properties, options.ldap_secondary_port, "ambari.ldap.connectivity.secondary.server.port", "Secondary LDAP Port <Optional>{0}: ", REGEX_PORT, True),
LdapPropTemplate(properties, options.ldap_ssl, "ambari.ldap.connectivity.use_ssl", "Use SSL [true/false]{0}: ", REGEX_TRUE_FALSE, False, LdapDefaultMap({LDAP_AD:'false', LDAP_IPA:'true', LDAP_GENERIC:'false'})),
LdapPropTemplate(properties, options.ldap_user_attr, "ambari.ldap.attributes.user.name_attr", "User ID attribute{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'sAMAccountName', LDAP_IPA:'uid', LDAP_GENERIC:'uid'})),
LdapPropTemplate(properties, options.ldap_base_dn, "ambari.ldap.attributes.user.search_base", "Search Base{0}: ", REGEX_ANYTHING, False, LdapDefault("dc=ambari,dc=apache,dc=org")),
LdapPropTemplate(properties, options.ldap_referral, "ambari.ldap.advanced.referrals", "Referral method [follow/ignore]{0}: ", REGEX_REFERRAL, True, LdapDefault("follow")),
LdapPropTemplate(properties, options.ldap_bind_anonym, "ambari.ldap.connectivity.anonymous_bind" "Bind anonymously [true/false]{0}: ", REGEX_TRUE_FALSE, False, LdapDefault("false"))
]
return ldap_properties
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def init_ldap_properties_list_reqd(properties, options):
ldap_properties = [
LdapPropTemplate(properties, options.ldap_primary_host, "ambari.ldap.connectivity.server.host", "Primary LDAP Host{0}: ", REGEX_HOSTNAME, False, LdapDefaultMap({LDAP_IPA:'ipa.ambari.apache.org', LDAP_GENERIC:'ldap.ambari.apache.org'})),
LdapPropTemplate(properties, options.ldap_primary_port, "ambari.ldap.connectivity.server.port", "Primary LDAP Port{0}: ", REGEX_PORT, False, LdapDefaultMap({LDAP_IPA:'636', LDAP_GENERIC:'389'})),
LdapPropTemplate(properties, options.ldap_secondary_host, "ambari.ldap.connectivity.secondary.server.host", "Secondary LDAP Host <Optional>{0}: ", REGEX_HOSTNAME, True),
LdapPropTemplate(properties, options.ldap_secondary_port, "ambari.ldap.connectivity.secondary.server.port", "Secondary LDAP Port <Optional>{0}: ", REGEX_PORT, True),
LdapPropTemplate(properties, options.ldap_ssl, "ambari.ldap.connectivity.use_ssl", "Use SSL [true/false]{0}: ", REGEX_TRUE_FALSE, False, LdapDefaultMap({LDAP_AD:'false', LDAP_IPA:'true', LDAP_GENERIC:'false'})),
LdapPropTemplate(properties, options.ldap_user_class, "ambari.ldap.attributes.user.object_class", "User object class{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'user', LDAP_IPA:'posixAccount', LDAP_GENERIC:'posixUser'})),
LdapPropTemplate(properties, options.ldap_user_attr, "ambari.ldap.attributes.user.name_attr", "User ID attribute{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'sAMAccountName', LDAP_IPA:'uid', LDAP_GENERIC:'uid'})),
LdapPropTemplate(properties, options.ldap_user_group_member_attr, "ambari.ldap.attributes.user.group_member_attr", "User group member attribute{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'memberof', LDAP_IPA:'member', LDAP_GENERIC:'memberof'})),
LdapPropTemplate(properties, options.ldap_group_class, "ambari.ldap.attributes.group.object_class", "Group object class{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'group', LDAP_IPA:'posixGroup', LDAP_GENERIC:'posixGroup'})),
LdapPropTemplate(properties, options.ldap_group_attr, "ambari.ldap.attributes.group.name_attr", "Group name attribute{0}: ", REGEX_ANYTHING, False, LdapDefault("cn")),
LdapPropTemplate(properties, options.ldap_member_attr, "ambari.ldap.attributes.group.member_attr", "Group member attribute{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'member', LDAP_IPA:'member', LDAP_GENERIC:'memberUid'})),
LdapPropTemplate(properties, options.ldap_dn, "ambari.ldap.attributes.dn_attr", "Distinguished name attribute{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'distinguishedName', LDAP_IPA:'dn', LDAP_GENERIC:'dn'})),
LdapPropTemplate(properties, options.ldap_base_dn, "ambari.ldap.attributes.user.search_base", "Search Base{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({LDAP_AD:'dc=ambari,dc=apache,dc=org', LDAP_IPA:'cn=accounts,dc=ambari,dc=apache,dc=org', LDAP_GENERIC:'dc=ambari,dc=apache,dc=org'})),
LdapPropTemplate(properties, options.ldap_referral, "ambari.ldap.advanced.referrals", "Referral method [follow/ignore]{0}: ", REGEX_REFERRAL, True, LdapDefault("follow")),
LdapPropTemplate(properties, options.ldap_bind_anonym, "ambari.ldap.connectivity.anonymous_bind", "Bind anonymously [true/false]{0}: ", REGEX_TRUE_FALSE, False, LdapDefault("false")),
LdapPropTemplate(properties, options.ldap_sync_username_collisions_behavior, "ambari.ldap.advanced.collision_behavior", "Handling behavior for username collisions [convert/skip] for LDAP sync{0}: ", REGEX_SKIP_CONVERT, False, LdapDefault("skip")),
LdapPropTemplate(properties, options.ldap_force_lowercase_usernames, "ambari.ldap.advanced.force_lowercase_usernames", "Force lower-case user names [true/false]{0}:", REGEX_TRUE_FALSE, True),
LdapPropTemplate(properties, options.ldap_pagination_enabled, "ambari.ldap.advanced.pagination_enabled", "Results from LDAP are paginated when requested [true/false]{0}:", REGEX_TRUE_FALSE, True)
]
return ldap_properties
def update_ldap_configuration(admin_login, admin_password, properties, ldap_property_value_map):
request_data = {
"Configuration": {
"category": "ldap-configuration",
"properties": {
}
}
}
request_data['Configuration']['properties'] = ldap_property_value_map
perform_changes_via_rest_api(properties, admin_login, admin_password, SETUP_LDAP_CONFIG_URL, 'PUT', request_data)
def should_query_ldap_type(ldap_property_list_reqd):
for ldap_prop in ldap_property_list_reqd:
if ldap_prop.should_query_ldap_type():
return True
return False
def query_ldap_type(ldap_type_option):
return get_validated_string_input("Please select the type of LDAP you want to use [{}]({}):".format("/".join(LDAP_TYPES), LDAP_GENERIC),
LDAP_GENERIC,
REGEX_LDAP_TYPE,
"Please enter one of the followings '{}'!".format("', '".join(LDAP_TYPES)),
False,
False,
answer = ldap_type_option)
def is_interactive(property_list):
for prop in property_list:
if not prop.option and not prop.allow_empty_prompt:
return True
return False
def setup_ldap(options):
logger.info("Setup LDAP.")
properties = get_ambari_properties()
server_status, pid = is_server_runing()
if not server_status:
err = 'Ambari Server is not running.'
raise FatalException(1, err)
enforce_ldap = options.ldap_force_setup if options.ldap_force_setup is not None else False
if not enforce_ldap:
current_client_security = get_value_from_properties(properties, CLIENT_SECURITY, NO_AUTH_METHOD_CONFIGURED)
if current_client_security != 'ldap':
query = "Currently '{0}' is configured, do you wish to use LDAP instead [y/n] ({1})? "
ldap_setup_default = 'y' if current_client_security == NO_AUTH_METHOD_CONFIGURED else 'n'
if get_YN_input(query.format(current_client_security, ldap_setup_default), ldap_setup_default == 'y'):
pass
else:
err = "Currently '" + current_client_security + "' configured. Can not setup LDAP."
raise FatalException(1, err)
admin_login, admin_password = get_ambari_admin_username_password_pair(options)
ldap_properties = get_ldap_properties_from_db(properties, admin_login, admin_password)
if ldap_properties:
properties.update(ldap_properties)
sys.stdout.write('\n')
isSecure = get_is_secure(properties)
if options.ldap_url:
options.ldap_primary_host = options.ldap_url.split(':')[0]
options.ldap_primary_port = options.ldap_url.split(':')[1]
if options.ldap_secondary_url:
options.ldap_secondary_host = options.ldap_secondary_url.split(':')[0]
options.ldap_secondary_port = options.ldap_secondary_url.split(':')[1]
ldap_property_list_reqd = init_ldap_properties_list_reqd(properties, options)
ldap_bind_dn_template = LdapPropTemplate(properties, options.ldap_manager_dn, LDAP_MGR_USERNAME_PROPERTY, "Bind DN{0}: ", REGEX_ANYTHING, False, LdapDefaultMap({
LDAP_AD:'cn=ldapbind,dc=ambari,dc=apache,dc=org',
LDAP_IPA:'uid=ldapbind,cn=users,cn=accounts,dc=ambari,dc=apache,dc=org',
LDAP_GENERIC:'uid=ldapbind,cn=users,dc=ambari,dc=apache,dc=org'}))
ldap_type = query_ldap_type(options.ldap_type) if options.ldap_type or should_query_ldap_type(ldap_property_list_reqd) else LDAP_GENERIC
ldap_property_list_opt = [LDAP_MGR_USERNAME_PROPERTY,
LDAP_MGR_PASSWORD_PROPERTY,
LDAP_DISABLE_ENDPOINT_IDENTIFICATION,
SSL_TRUSTSTORE_TYPE_PROPERTY,
SSL_TRUSTSTORE_PATH_PROPERTY,
SSL_TRUSTSTORE_PASSWORD_PROPERTY,
LDAP_MANAGE_SERVICES,
LDAP_ENABLED_SERVICES]
ldap_property_list_passwords=[LDAP_MGR_PASSWORD_PROPERTY, SSL_TRUSTSTORE_PASSWORD_PROPERTY]
ssl_truststore_type_default = get_value_from_properties(properties, SSL_TRUSTSTORE_TYPE_PROPERTY, "jks")
ssl_truststore_path_default = get_value_from_properties(properties, SSL_TRUSTSTORE_PATH_PROPERTY)
disable_endpoint_identification_default = get_value_from_properties(properties, LDAP_DISABLE_ENDPOINT_IDENTIFICATION, "False")
ldap_property_value_map = {}
ldap_property_values_in_ambari_properties = {}
interactive_mode = is_interactive(ldap_property_list_reqd)
for ldap_prop in ldap_property_list_reqd:
input = ldap_prop.get_input(ldap_type, interactive_mode)
if input is not None and input != "":
ldap_property_value_map[ldap_prop.prop_name] = input
if ldap_prop.prop_name == LDAP_ANONYMOUS_BIND:
anonymous = (input and input.lower() == 'true')
mgr_password = None
# Ask for manager credentials only if bindAnonymously is false
if not anonymous:
username = ldap_bind_dn_template.get_input(ldap_type, interactive_mode)
ldap_property_value_map[LDAP_MGR_USERNAME_PROPERTY] = username
mgr_password = configure_ldap_password(options.ldap_manager_password, interactive_mode)
ldap_property_value_map[LDAP_MGR_PASSWORD_PROPERTY] = mgr_password
elif ldap_prop.prop_name == LDAP_USE_SSL:
ldaps = (input and input.lower() == 'true')
ts_password = None
if ldaps:
disable_endpoint_identification = get_validated_string_input("Disable endpoint identification during SSL handshake [true/false] ({0}): ".format(disable_endpoint_identification_default),
disable_endpoint_identification_default,
REGEX_TRUE_FALSE, "Invalid characters in the input!", False, allowEmpty=True,
answer=options.ldap_sync_disable_endpoint_identification) if interactive_mode else options.ldap_sync_disable_endpoint_identification
if disable_endpoint_identification is not None:
ldap_property_value_map[LDAP_DISABLE_ENDPOINT_IDENTIFICATION] = disable_endpoint_identification
truststore_default = "n"
truststore_set = bool(ssl_truststore_path_default)
if truststore_set:
truststore_default = "y"
custom_trust_store = True if options.trust_store_path is not None and options.trust_store_path else False
if not custom_trust_store:
custom_trust_store = get_YN_input("Do you want to provide custom TrustStore for Ambari [y/n] ({0})?".
format(truststore_default),
truststore_set) if interactive_mode else None
if custom_trust_store:
ts_type = get_validated_string_input("TrustStore type [jks/jceks/pkcs12] {0}:".format(get_prompt_default(ssl_truststore_type_default)),
ssl_truststore_type_default, "^(jks|jceks|pkcs12)?$", "Wrong type", False, answer=options.trust_store_type) if interactive_mode else options.trust_store_type
ts_path = None
while True:
ts_path = get_validated_string_input(format_prop_val_prompt("Path to TrustStore file{0}: ", ssl_truststore_path_default),
ssl_truststore_path_default, ".*", False, False, answer = options.trust_store_path) if interactive_mode else options.trust_store_path
if os.path.exists(ts_path):
break
else:
print 'File not found.'
hasAnswer = options.trust_store_path is not None and options.trust_store_path
quit_if_has_answer(hasAnswer)
ts_password = read_password("", ".*", "Password for TrustStore:", "Invalid characters in password", options.trust_store_password) if interactive_mode else options.trust_store_password
ldap_property_values_in_ambari_properties[SSL_TRUSTSTORE_TYPE_PROPERTY] = ts_type
ldap_property_values_in_ambari_properties[SSL_TRUSTSTORE_PATH_PROPERTY] = ts_path
ldap_property_values_in_ambari_properties[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = ts_password
pass
elif properties.get_property(SSL_TRUSTSTORE_TYPE_PROPERTY):
print 'The TrustStore is already configured: '
print ' ' + SSL_TRUSTSTORE_TYPE_PROPERTY + ' = ' + properties.get_property(SSL_TRUSTSTORE_TYPE_PROPERTY)
print ' ' + SSL_TRUSTSTORE_PATH_PROPERTY + ' = ' + properties.get_property(SSL_TRUSTSTORE_PATH_PROPERTY)
print ' ' + SSL_TRUSTSTORE_PASSWORD_PROPERTY + ' = ' + properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
if get_YN_input("Do you want to remove these properties [y/n] (y)? ", True, options.trust_store_reconfigure):
properties.removeOldProp(SSL_TRUSTSTORE_TYPE_PROPERTY)
properties.removeOldProp(SSL_TRUSTSTORE_PATH_PROPERTY)
properties.removeOldProp(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
pass
pass
populate_ambari_requires_ldap(options, ldap_property_value_map)
populate_service_management(options, ldap_property_value_map, properties, admin_login, admin_password)
print '=' * 20
print 'Review Settings'
print '=' * 20
for property in ldap_property_list_reqd:
if ldap_property_value_map.has_key(property.prop_name):
print("%s %s" % (property.get_prompt_text(ldap_type), ldap_property_value_map[property.prop_name]))
for property in ldap_property_list_opt:
if ldap_property_value_map.has_key(property):
if property not in ldap_property_list_passwords:
print("%s: %s" % (property, ldap_property_value_map[property]))
else:
print("%s: %s" % (property, BLIND_PASSWORD))
for property in ldap_property_list_opt:
if ldap_property_values_in_ambari_properties.has_key(property):
if property not in ldap_property_list_passwords:
print("%s: %s" % (property, ldap_property_values_in_ambari_properties[property]))
else:
print("%s: %s" % (property, BLIND_PASSWORD))
save_settings = True if options.ldap_save_settings is not None else get_YN_input("Save settings [y/n] (y)? ", True)
if save_settings:
if isSecure:
if ts_password:
encrypted_passwd = encrypt_password(SSL_TRUSTSTORE_PASSWORD_ALIAS, ts_password, options)
if ts_password != encrypted_passwd:
ldap_property_values_in_ambari_properties[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = encrypted_passwd
print 'Saving LDAP properties...'
#Saving LDAP configuration in Ambari DB using the REST API
update_ldap_configuration(admin_login, admin_password, properties, ldap_property_value_map)
#The only properties we want to write out in Ambari.properties are the client.security type being LDAP and the custom Truststore related properties (if any)
ldap_property_values_in_ambari_properties[CLIENT_SECURITY] = 'ldap'
update_properties_2(properties, ldap_property_values_in_ambari_properties)
print 'Saving LDAP properties finished'
return 0
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def generate_env(options, ambari_user, current_user):
properties = get_ambari_properties()
isSecure = get_is_secure(properties)
(isPersisted, masterKeyFile) = get_is_persisted(properties)
environ = os.environ.copy()
# Need to handle master key not persisted scenario
if isSecure and not masterKeyFile:
prompt = False
masterKey = environ.get(SECURITY_KEY_ENV_VAR_NAME)
if masterKey is not None and masterKey != "":
pass
else:
keyLocation = environ.get(SECURITY_MASTER_KEY_LOCATION)
if keyLocation is not None:
try:
# Verify master key can be read by the java process
with open(keyLocation, 'r'):
pass
except IOError:
print_warning_msg("Cannot read Master key from path specified in "
"environemnt.")
prompt = True
else:
# Key not provided in the environment
prompt = True
if prompt:
import pwd
masterKey = get_original_master_key(properties)
environ[SECURITY_KEY_ENV_VAR_NAME] = masterKey
tempDir = tempfile.gettempdir()
tempFilePath = tempDir + os.sep + "masterkey"
save_master_key(options, masterKey, tempFilePath, True)
if ambari_user != current_user:
uid = pwd.getpwnam(ambari_user).pw_uid
gid = pwd.getpwnam(ambari_user).pw_gid
os.chown(tempFilePath, uid, gid)
else:
os.chmod(tempFilePath, stat.S_IREAD | stat.S_IWRITE)
if tempFilePath is not None:
environ[SECURITY_MASTER_KEY_LOCATION] = tempFilePath
return environ
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def generate_env(options, ambari_user, current_user):
return os.environ.copy()
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def ensure_can_start_under_current_user(ambari_user):
#Ignore the requirement to run as root. In Windows, by default the child process inherits the security context
# and the environment from the parent process.
return ""
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def ensure_can_start_under_current_user(ambari_user):
current_user = getpass.getuser()
if ambari_user is None:
err = "Unable to detect a system user for Ambari Server.\n" + SETUP_OR_UPGRADE_MSG
raise FatalException(1, err)
if current_user != ambari_user and not is_root():
err = "Unable to start Ambari Server as user {0}. Please either run \"ambari-server start\" " \
"command as root, as sudo or as user \"{1}\"".format(current_user, ambari_user)
raise FatalException(1, err)
return current_user
class PamPropTemplate:
def __init__(self, properties, i_option, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
self.prop_name = i_prop_name
self.option = i_option
self.pam_prop_name = get_value_from_properties(properties, i_prop_name, i_prop_name_default)
self.pam_prop_val_prompt = i_prop_val_pattern.format(get_prompt_default(self.pam_prop_name))
self.prompt_regex = i_prompt_regex
self.allow_empty_prompt = i_allow_empty_prompt
def init_pam_properties_list_reqd(properties, options):
properties = [
PamPropTemplate(properties, options.pam_config_file, PAM_CONFIG_FILE, "PAM configuration file* {0}: ", REGEX_ANYTHING, False, "/etc/pam.d/ambari"),
PamPropTemplate(properties, options.pam_auto_create_groups, AUTO_GROUP_CREATION, "Do you want to allow automatic group creation* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
]
return properties
def setup_pam(options):
if not is_root():
err = 'Ambari-server setup-pam should be run with root-level privileges'
raise FatalException(4, err)
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY,"") == 'ldap':
query = "LDAP is currently configured, do you wish to use PAM instead [y/n] (n)? "
if get_YN_input(query, False):
pass
else:
err = "LDAP is configured. Can not setup PAM."
raise FatalException(1, err)
pam_property_list_reqd = init_pam_properties_list_reqd(properties, options)
pam_property_value_map = {}
pam_property_value_map[CLIENT_SECURITY] = 'pam'
for pam_prop in pam_property_list_reqd:
input = get_validated_string_input(pam_prop.pam_prop_val_prompt, pam_prop.pam_prop_name, pam_prop.prompt_regex,
"Invalid characters in the input!", False, pam_prop.allow_empty_prompt,
answer = pam_prop.option)
if input is not None and input != "":
pam_property_value_map[pam_prop.prop_name] = input
# Verify that the PAM config file exists, else show warning...
pam_config_file = pam_property_value_map[PAM_CONFIG_FILE]
if not os.path.exists(pam_config_file):
print_warning_msg("The PAM configuration file, {0} does not exist. " \
"Please create it before restarting Ambari.".format(pam_config_file))
update_properties_2(properties, pam_property_value_map)
print 'Saving...done'
return 0
#
# Migration of LDAP users & groups to PAM
#
def migrate_ldap_pam(args):
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY,"") != 'pam':
err = "PAM is not configured. Please configure PAM authentication first."
raise FatalException(1, err)
db_title = get_db_type(properties).title
confirm = get_YN_input("Ambari Server configured for %s. Confirm "
"you have made a backup of the Ambari Server database [y/n] (y)? " % db_title, True)
if not confirm:
print_error_msg("Database backup is not confirmed")
return 1
jdk_path = get_java_exe_path()
if jdk_path is None:
print_error_msg("No JDK found, please run the \"setup\" "
"command to install a JDK automatically or install any "
"JDK manually to " + configDefaults.JDK_INSTALL_DIR)
return 1
# At this point, the args does not have the ambari database information.
# Augment the args with the correct ambari database information
parse_properties_file(args)
ensure_jdbc_driver_is_installed(args, properties)
print 'Migrating LDAP Users & Groups to PAM'
serverClassPath = ServerClassPath(properties, args)
class_path = serverClassPath.get_full_ambari_classpath_escaped_for_shell()
command = LDAP_TO_PAM_MIGRATION_HELPER_CMD.format(jdk_path, class_path)
ambari_user = read_ambari_user()
current_user = ensure_can_start_under_current_user(ambari_user)
environ = generate_env(args, ambari_user, current_user)
(retcode, stdout, stderr) = run_os_command(command, env=environ)
print_info_msg("Return code from LDAP to PAM migration command, retcode = " + str(retcode))
if stdout:
print "Console output from LDAP to PAM migration command:"
print stdout
print
if stderr:
print "Error output from LDAP to PAM migration command:"
print stderr
print
if retcode > 0:
print_error_msg("Error executing LDAP to PAM migration, please check the server logs.")
else:
print_info_msg('LDAP to PAM migration completed')
return retcode
def populate_ambari_requires_ldap(options, properties):
if options.ldap_enabled_ambari is None:
enabled = get_boolean_from_dictionary(properties, AMBARI_LDAP_AUTH_ENABLED, False)
enabled = get_YN_input("Use LDAP authentication for Ambari [y/n] ({0})? ".format('y' if enabled else 'n'), enabled)
else:
enabled = 'true' == options.ldap_enabled_ambari
properties[AMBARI_LDAP_AUTH_ENABLED] = 'true' if enabled else 'false'
def populate_service_management(options, properties, ambari_properties, admin_login, admin_password):
services = ""
if options.ldap_enabled_services is None:
if options.ldap_manage_services is None:
manage_services = get_boolean_from_dictionary(properties, LDAP_MANAGE_SERVICES, False)
manage_services = get_YN_input("Manage LDAP configurations for eligible services [y/n] ({0})? ".format('y' if manage_services else 'n'), manage_services)
else:
manage_services = 'true' == options.ldap_manage_services
stored_manage_services = get_boolean_from_dictionary(properties, LDAP_MANAGE_SERVICES, False)
print("Manage LDAP configurations for eligible services [y/n] ({0})? {1}".format('y' if stored_manage_services else 'n', 'y' if manage_services else 'n'))
if manage_services:
enabled_services = get_value_from_dictionary(properties, LDAP_ENABLED_SERVICES, "").upper().split(',')
all = WILDCARD_FOR_ALL_SERVICES in enabled_services
configure_for_all_services = get_YN_input(" Manage LDAP for all services [y/n] ({0})? ".format('y' if all else 'n'), all)
if configure_for_all_services:
services = WILDCARD_FOR_ALL_SERVICES
else:
cluster_name = get_cluster_name(ambari_properties, admin_login, admin_password)
if cluster_name:
eligible_services = get_eligible_services(ambari_properties, admin_login, admin_password, cluster_name, FETCH_SERVICES_FOR_LDAP_ENTRYPOINT, 'LDAP')
if eligible_services and len(eligible_services) > 0:
service_list = []
for service in eligible_services:
enabled = service.upper() in enabled_services
question = " Manage LDAP for {0} [y/n] ({1})? ".format(service, 'y' if enabled else 'n')
if get_YN_input(question, enabled):
service_list.append(service)
services = ','.join(service_list)
else:
print (" There are no eligible services installed.")
else:
if options.ldap_manage_services:
manage_services = 'true' == options.ldap_manage_services
else:
manage_services = True
services = options.ldap_enabled_services.upper() if options.ldap_enabled_services else ""
properties[LDAP_MANAGE_SERVICES] = 'true' if manage_services else 'false'
properties[LDAP_ENABLED_SERVICES] = services
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/test_coord.py
|
import os
import shutil
import subprocess
import sys
import pytest
from kaggle_runner import utils
from kaggle_runner.runners import coordinator
@pytest.fixture(scope="module")
def runner_configs():
return [
{"port":23454, "size": 384, "network": "intercept", "AMQPURL": utils.AMQPURL()},
{"port":23454, "size": 384, "network": "intercept-resnet", "AMQPURL": utils.AMQPURL()},
]
class TestCoordinator:
coordinator = None
tmp_path = "."
@classmethod
def setup_class(cls):
cls.tmp_path = ".runners"
cls.coordinator = coordinator.Coordinator(cls.tmp_path, "Test Runner")
print("setup_class called once for the class")
@classmethod
def teardown_class(cls):
print("teardown_class called once for the class")
def setup_method(self, method):
if os.path.exists(self.tmp_path):
shutil.rmtree(self.tmp_path)
os.mkdir(self.tmp_path)
print("setup_method called for every method")
def teardown_method(self, method):
# shutil.rmtree(self.tmp_path) # for debug
print("teardown_method called for every method")
def test_generate_runner(self, runner_configs):
self.coordinator.create_runner(runner_configs[1], 19999, False)
# ret = self.coordinator.run_local(path)
# assert ret.returncode == 0
@pytest.mark.timeout(15)
def test_push_runner_nb(self, runner_configs):
path = self.coordinator.create_runner(runner_configs[1], 19999, False)
# ret = self.coordinator.run_local(path)
# assert ret.returncode == 0
if os.getenv("CI") != "true":
ret = self.coordinator.push(path) # just push first
assert ret.returncode == 0
def test_push_runner_cmd(self, runner_configs):
subprocess.run(f"python -m kaggle_runner "
f"{runner_configs[1]['port']} dev", shell=True, check=True)
@pytest.mark.timeout(10)
@pytest.mark.skip("runner runs in computation server, no need test local")
def test_get_mq_back(self, runner_configs):
path = self.coordinator.create_runner(runner_configs[1], 20202)
ret = self.coordinator.push(path)
assert ret.returncode == 0
# just use a timeout, not within then return error
self.coordinator._get_result(timeout=100)
@pytest.mark.skip("runner runs in computation server, no need test local")
def test_create_runners(self, runner_configs):
"""Should just use unit test setup and teardown
"""
for c in runner_configs:
r = self.coordinator.create_runner(c) # we need to let it run
assert r.AMQPURL is not None
class TestMain:
def test_call_remote_mq(self):
call_params = [
"python",
"main.py",
"amqp://drdsfaew:[email protected]/drdsfaew",
"384", # size 256+128
"123",
"intercept-resnet",
]
utils.logger.debug(" ".join(call_params))
ret = subprocess.run(call_params)
assert ret.returncode == 0
@pytest.mark.skip("test done")
def test_call_local(self):
call_params = [
"python",
"main.py",
"amqp://guest:[email protected]/",
"384", # size 256+128
"123",
"intercept-resnet",
]
utils.logger.debug(" ".join(call_params))
ret = subprocess.run(call_params)
assert ret.returncode == 0
|
[] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
python
| 1 | 0 | |
example_project/example_project/settings/base.py
|
"""
Base Django settings
====================
For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
import pathlib
from django.urls import reverse_lazy
PROJECT_PACKAGE_NAME = 'example'
# BASE DIRECTORIES
# ------------------------------------------------------------------------------
# Two base directories are considered for this project:
# The PROJECT_PATH corresponds to the path towards the root of this project (the root of the
# repository).
# The INSTALL_PATH corresponds to the path towards the directory where the project's repository
# is present on the filesystem.
# By default INSTALL_PATH has the same than PROJECT_PATH.
PROJECT_PATH = pathlib.Path(__file__).parents[2]
INSTALL_PATH = pathlib.Path(os.environ.get('DJANGO_INSTALL_PATH')) \
if 'DJANGO_INSTALL_PATH' in os.environ else PROJECT_PATH
# APP CONFIGURATION
# ------------------------------------------------------------------------------
INSTALLED_APPS = (
# Django apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.sitemaps',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third-party apps
'oidc_rp',
# Django's admin app
'django.contrib.admin',
)
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oidc_rp.middleware.OIDCRefreshIDTokenMiddleware',
)
# DEBUG CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': str(PROJECT_PATH / 'example.db'),
},
}
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'EST'
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*', ]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#languages
LANGUAGES = (
('en', 'English'),
('fr', 'Français'),
)
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = 'INSECURE'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (
str(PROJECT_PATH / PROJECT_PACKAGE_NAME / 'templates'),
),
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
'loaders': [
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
]
},
},
]
# FILE STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(INSTALL_PATH / 'static')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(PROJECT_PATH / PROJECT_PACKAGE_NAME / 'static' / 'build'),
str(PROJECT_PATH / PROJECT_PACKAGE_NAME / 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-STATICFILES_STORAGE
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(INSTALL_PATH / 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL CONFIGURATION
# ------------------------------------------------------------------------------
ROOT_URLCONF = PROJECT_PACKAGE_NAME + '_project.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
# AUTH CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = reverse_lazy('oidc_auth_request')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'oidc_rp.backends.OIDCAuthBackend',
'django.contrib.auth.backends.ModelBackend',
]
# OIDC RELYING PARTY CONFIGURATION
# ------------------------------------------------------------------------------
OIDC_RP_PROVIDER_ENDPOINT = 'https://example.com/a/'
OIDC_RP_CLIENT_ID = 'CLIENT_ID'
OIDC_RP_CLIENT_SECRET = 'INSECURE_CLIENT_SECRET'
OIDC_RP_SIGNUP_URL = 'https://example.com/signup/'
|
[] |
[] |
[
"DJANGO_INSTALL_PATH"
] |
[]
|
["DJANGO_INSTALL_PATH"]
|
python
| 1 | 0 | |
smarthome_kvant/smarthome_kvant/wsgi.py
|
"""
WSGI config for smarthome_kvant project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'smarthome_kvant.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
rimproject/wsgi.py
|
"""
WSGI config for rimproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rimproject.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cit_038_waccm_atmospheric_chemistry_model/contents/src/__init__.py
|
from __future__ import unicode_literals
import os
import sys
import urllib
import datetime
import logging
import subprocess
import eeUtil
import urllib.request
import requests
from bs4 import BeautifulSoup
import copy
import numpy as np
import ee
import time
# This dataset owner has created a subset of the data specifically for our needs on Resource Watch.
# If you want to switch back to pulling from the original source, set the following variable to False.
rw_subset = True
# Version of model to use
# h0 version is 3-hourly data
# h3 version is 6-hourly data
VERSION = 'h3'
# get time intervals in each day - specific to version number
if VERSION == 'h0':
TIME_HOURS = list(range(0, 24, 3))
elif VERSION == 'h3':
TIME_HOURS = list(range(0, 24, 6))
if rw_subset==True:
# url for air quality data
SOURCE_URL = 'https://www.acom.ucar.edu/waccm/subsets/resourcewatch/f.e22.beta02.FWSD.f09_f09_mg17.cesm2_2_beta02.forecast.001.cam.%s.{date}_surface_subset.nc' % VERSION
# list variables (as named in netcdf) that we want to pull
VARS = ['NO2', 'CO', 'O3', 'SO2', 'PM25', 'bc_a4']
# list of pressure levels available in the netcdf for each variable
# the RW subset only contains surface level data
NUM_AVAILABLE_LEVELS = [1, 1, 1, 1, 1, 1]
# which pressure level do we want to use for each variable
DESIRED_LEVELS = [1, 1, 1, 1, 1, 1]
else:
# url for air quality data
SOURCE_URL = 'https://www.acom.ucar.edu/waccm/DATA/f.e21.FWSD.f09_f09_mg17.forecast.001.cam.%s.{date}-00000.nc' % VERSION
# list variables (as named in netcdf) that we want to pull
VARS = ['NO2', 'CO', 'O3', 'SO2', 'PM25_SRF', 'bc_a4']
# list of pressure levels available in the netcdf for each variable
# most variables have 88 pressure levels; PM 2.5 only has one level (surface)
# need to specify which pressure level of data we was for each (level 1 being the lowest pressure)
# the highest level is the highest pressure (992.5 hPa), and therefore, closest to surface level
NUM_AVAILABLE_LEVELS = [88, 88, 88, 88, 1, 88]
# which pressure level do we want to use for each variable
DESIRED_LEVELS = [88, 88, 88, 88, 1, 88]
# subdataset to be converted to tif
# should be of the format 'NETCDF:"filename.nc":variable'
SDS_NAME = 'NETCDF:"{fname}":{var}'
# nodata value for netcdf
NODATA_VALUE = None
# name of data directory in Docker container
DATA_DIR = 'data'
# name of collection in GEE where we will upload the final data
COLLECTION = '/projects/resource-watch-gee/cit_038_WACCM_atmospheric_chemistry_model'
# generate name for dataset's parent folder on GEE which will be used to store
# several collections - one collection per variable
PARENT_FOLDER = COLLECTION
# generate generic string that can be formatted to name each variable's GEE collection
EE_COLLECTION_GEN = PARENT_FOLDER + '/{var}'
# generate generic string that can be formatted to name each variable's asset name
FILENAME = PARENT_FOLDER.split('/')[-1] + '_{var}_{date}'
# specify Google Cloud Storage folder name
GS_FOLDER = COLLECTION[1:]
# do you want to delete everything currently in the GEE collection when you run this script?
CLEAR_COLLECTION_FIRST = True
# how many days of data do we want to use?
# MAXDAYS = 1 only fetches today
# maximum value of 10: today plus 9 days of forecast
MAX_DAYS = 2
# If we don't want to show the last time available for the last day, how many time steps before
# the last is the time we want to show?
# For now, we want to show 12:00. Because the version we use is on 6-hour intervals, we want to pull 1 time step
# before the last for the second day (last time would be 18:00 for this version)
TS_FROM_END = 1
# format of date used in GEE
DATE_FORMAT = '%y-%m-%d_%H%M'
# Resource Watch dataset API IDs
# Important! Before testing this script:
# Please change these IDs OR comment out the getLayerIDs(DATASET_ID) function in the script below
# Failing to do so will overwrite the last update date on different datasets on Resource Watch
DATASET_IDS = {
'NO2':'2c2c614a-8678-443a-8874-33335771ecc0',
'CO':'266ed113-396c-4c69-885a-ead30df95810',
'O3':'ec011d66-a99b-425c-accd-d04e75966094',
'SO2':'d82186a4-7885-4fa9-9e82-26799853093b',
'PM25':'348e4d57-a345-411d-986e-5863fffebda7',
'bc_a4':'fe0a0042-8430-419b-a60f-9b69ec81a0ec'
}
'''
FUNCTIONS FOR ALL DATASETS
The functions below must go in every near real-time script.
Their format should not need to be changed.
'''
def lastUpdateDate(dataset, date):
'''
Given a Resource Watch dataset's API ID and a datetime,
this function will update the dataset's 'last update date' on the API with the given datetime
INPUT dataset: Resource Watch API dataset ID (string)
date: date to set as the 'last update date' for the input dataset (datetime)
'''
# generate the API url for this dataset
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset)
# create headers to send with the request to update the 'last update date'
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
# create the json data to send in the request
body = {
"dataLastUpdated": date.isoformat() # date should be a string in the format 'YYYY-MM-DDTHH:MM:SS'
}
# send the request
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
'''
FUNCTIONS FOR RASTER DATASETS
The functions below must go in every near real-time script for a RASTER dataset.
Their format should not need to be changed.
'''
def getLastUpdate(dataset):
'''
Given a Resource Watch dataset's API ID,
this function will get the current 'last update date' from the API
and return it as a datetime
INPUT dataset: Resource Watch API dataset ID (string)
RETURN lastUpdateDT: current 'last update date' for the input dataset (datetime)
'''
# generate the API url for this dataset
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{}'.format(dataset)
# pull the dataset from the API
r = requests.get(apiUrl)
# find the 'last update date'
lastUpdateString=r.json()['data']['attributes']['dataLastUpdated']
# split this date into two pieces at the seconds decimal so that the datetime module can read it:
# ex: '2020-03-11T00:00:00.000Z' will become '2020-03-11T00:00:00' (nofrag) and '000Z' (frag)
nofrag, frag = lastUpdateString.split('.')
# generate a datetime object
nofrag_dt = datetime.datetime.strptime(nofrag, "%Y-%m-%dT%H:%M:%S")
# add back the microseconds to the datetime
lastUpdateDT = nofrag_dt.replace(microsecond=int(frag[:-1])*1000)
return lastUpdateDT
def getLayerIDs(dataset):
'''
Given a Resource Watch dataset's API ID,
this function will return a list of all the layer IDs associated with it
INPUT dataset: Resource Watch API dataset ID (string)
RETURN layerIDs: Resource Watch API layer IDs for the input dataset (list of strings)
'''
# generate the API url for this dataset - this must include the layers
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{}?includes=layer'.format(dataset)
# pull the dataset from the API
r = requests.get(apiUrl)
#get a list of all the layers
layers = r.json()['data']['attributes']['layer']
# create an empty list to store the layer IDs
layerIDs =[]
# go through each layer and add its ID to the list
for layer in layers:
# only add layers that have Resource Watch listed as its application
if layer['attributes']['application']==['rw']:
layerIDs.append(layer['id'])
return layerIDs
def flushTileCache(layer_id):
"""
Given the API ID for a GEE layer on Resource Watch,
this function will clear the layer cache.
If the cache is not cleared, when you view the dataset on Resource Watch, old and new tiles will be mixed together.
INPUT layer_id: Resource Watch API layer ID (string)
"""
# generate the API url for this layer's cache
apiUrl = 'http://api.resourcewatch.org/v1/layer/{}/expire-cache'.format(layer_id)
# create headers to send with the request to clear the cache
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
# clear the cache for the layer
# sometimetimes this fails, so we will try multiple times, if it does
# specify that we are on the first try
try_num=1
tries = 4
while try_num<tries:
try:
# try to delete the cache
r = requests.delete(url = apiUrl, headers = headers, timeout=1000)
# if we get a 200, the cache has been deleted
# if we get a 504 (gateway timeout) - the tiles are still being deleted, but it worked
if r.ok or r.status_code==504:
logging.info('[Cache tiles deleted] for {}: status code {}'.format(layer_id, r.status_code))
return r.status_code
# if we don't get a 200 or 504:
else:
# if we are not on our last try, wait 60 seconds and try to clear the cache again
if try_num < (tries-1):
logging.info('Cache failed to flush: status code {}'.format(r.status_code))
time.sleep(60)
logging.info('Trying again.')
# if we are on our last try, log that the cache flush failed
else:
logging.error('Cache failed to flush: status code {}'.format(r.status_code))
logging.error('Aborting.')
try_num += 1
except Exception as e:
logging.error('Failed: {}'.format(e))
'''
FUNCTIONS FOR THIS DATASET
The functions below have been tailored to this specific dataset.
They should all be checked because their format likely will need to be changed.
'''
def getCollectionName(var):
'''
get GEE collection name
INPUT var: variable to be used in asset name (string)
RETURN GEE collection name for input date (string)
'''
return EE_COLLECTION_GEN.format(var=var)
def getAssetName(var, date):
'''
get asset name
INPUT var: variable to be used in asset name (string)
date: date in the format of the DATE_FORMAT variable (string)
RETURN GEE asset name for input date (string)
'''
collection = getCollectionName(var)
return os.path.join(collection, FILENAME.format(var=var, date=date))
def getTiffname(file, hour, var):
'''
generate names for tif files that we are going to create from netcdf
INPUT file: netcdf filename (string)
hour: integer representing hour to be used in tif name, 0-23 (integer)
var: variable to be used in tif name (string)
RETURN name: file name to save tif file created from netcdf (string)
'''
# generate time string to be used in tif file name
# if hour is a single digit, add a zero before to make it a 4-digit time
if hour < 10:
time_str = '0' + str(hour) + '00'
else:
time_str = str(hour) + '00'
# generate date string to be used in tif file name
date = os.path.splitext(file)[0][-10:]
# generate name for tif file
name = os.path.join(DATA_DIR, FILENAME.format(var=var, date=date)) + '_' + time_str
return name
def getFilename(date):
'''
generate file name to store the netcdf in after download
INPUT date: date of file in the format YYYY-MM-DD (string)
RETURN netcdf filename for date (string)
'''
return os.path.join(DATA_DIR, '{}.nc'.format(
FILENAME.format(var='all_vars', date=date)))
def getDateTimeString(filename):
'''
get date and time from filename (last 13 characters of filename after removing extension)
INPUT filename: file name that ends in a date of the format YY-MM-DD_HHMM (string)
RETURN date in the format YY-MM-DD_HHMM (string)
'''
return os.path.splitext(os.path.basename(filename))[0][-13:]
def getDate_GEE(filename):
'''
get date from Google Earth Engine asset name
INPUT filename: asset name that ends in a date of the format YY-MM-DD_HHMM (string)
RETURN date in the format YY-MM-DD (string)
'''
return os.path.splitext(os.path.basename(filename))[0][-13:-5]
def list_available_files(url, ext=''):
'''
Fetch a list of filenames from source url by year
INPUT url: url for data source where we want to check for download links (string)
ext: extension of file type we are checking for (string)
RETURN list of files available for download from source website (list of strings)
'''
# open and read the url
page = requests.get(url).text
# use BeautifulSoup to read the content as a nested data structure
soup = BeautifulSoup(page, 'html.parser')
# Extract all the <a> tags within the html content to find the files available for download marked with these tags.
# Get only the files that ends with input extension (if specified)
return [node.get('href') for node in soup.find_all('a') if type(node.get('href'))==str and node.get('href').endswith(ext)]
def getNewDates(existing_dates):
'''
Get new dates we want to try to fetch data for
INPUT existing_dates: list of dates that we already have in GEE, in the format of the DATE_FORMAT variable (list of strings)
RETURN new_dates: list of new dates we want to try to get, in the format of the DATE_FORMAT variable (list of strings)
last_date: name of file for last date of forecast (string)
'''
# get a list of files available from the source
url = os.path.split(SOURCE_URL)[0]
available_files = list_available_files(url, ext='.nc')[-9:]
# get the most recent available file
recent_forecast_start = available_files[0]
# pull the date that the most recent forecast was created on
recent_forecast_start_date = recent_forecast_start[-26:-18]
# sort and get the forecast start date for the data we already have
if existing_dates:
existing_dates.sort()
existing_forecast_start_date = existing_dates[0]
else:
existing_forecast_start_date = None
# if we have the most recent forecast, we don't need new data
if existing_forecast_start_date==recent_forecast_start_date:
new_dates = []
# otherwise, we need to go get the days of interest
else:
# get start date of forecast through the day we want to show on RW
recent_files = available_files[:MAX_DAYS]
new_dates = [file[-28:-18] for file in recent_files]
# get last date because this file only has one time output so we need to process it differently
last_date = available_files[-1]
return new_dates, last_date
def getBands(var_num, file, last_date):
'''
get bands for all available times in netcdf at the desired pressure level
INPUT var_num: index number for variable we are currently processing (integer)
file: file we are currently processing (string)
last_date: name of file for last date of forecast (string)
RETURN bands: bands in netcdf for all available times at desired pressure level (list of integers)
'''
# get specified pressure level for the current variable
level = DESIRED_LEVELS[var_num]
# the pressure and time dimensions are flattened into one dimension in the netcdfs
# for the pressure level that we chose, we want all the times available
# we will make a list of the bands that have the data we need
if VERSION == 'h0':
# h0 has 8 times - get all times at specified pressure level
bands = [x * NUM_AVAILABLE_LEVELS[var_num] + level for x in
list(range(0, 8))]
elif VERSION == 'h3':
# h3 has 4 times - get all times at specified pressure level
bands = [x * NUM_AVAILABLE_LEVELS[var_num] + level for x in
list(range(0, 4))]
# if we are on the last file, only one time is available
if file[-13:-3] == last_date:
bands = [x * NUM_AVAILABLE_LEVELS[var_num] + level for x in
list(range(0, 1))]
return bands
def convert(files, var_num, last_date):
'''
Convert netcdf files to tifs
INPUT files: list of file names for netcdfs that have already been downloaded (list of strings)
var_num: index number for variable we are currently processing (integer)
last_date: name of file for last date of forecast (string)
RETURN all_tifs: list of file names for tifs that have been generated - all available times (list of strings)
tifs: list of file names for tifs that have been generated - through desired endpoint (list of strings)
'''
# get name of variable we are converting files for
var = VARS[var_num]
# make an empty list to store the names of tif files that we create
all_tifs = []
for f in files:
# get list of bands in netcdf for all available times at desired pressure level
bands = getBands(var_num, f, last_date)
logging.info('Converting {} to tiff'.format(f))
for band in bands:
# generate the subdatset name for current netcdf file for a particular variable
sds_path = SDS_NAME.format(fname=f, var=var)
'''
Google Earth Engine needs to get tif files with longitudes of -180 to 180.
These files have longitudes from 0 to 360. I checked this using gdalinfo.
I downloaded a file onto my local computer and in command line, ran:
gdalinfo NETCDF:"{file_loc/file_name}":{variable}
with the values in {} replaced with the correct information.
I looked at the 'Corner Coordinates' that were printed out.
Since the longitude is in the wrong format, we will have to fix it. First,
we will convert the files from netcdfs to tifs using gdal_translate,
then we will fix the longitude values using gdalwarp.
'''
#generate names for tif files that we are going to create from netcdf
file_name_with_time = getTiffname(file=f, hour=TIME_HOURS[bands.index(band)], var=var)
#create a file for the initial tif that is in the 0 to 360 longitude format
tif_0_360 = '{}_0_360.tif'.format(file_name_with_time)
# create a file name for the final tif that is in the -180 to 180 file format
tif = '{}.tif'.format(file_name_with_time)
# first we will translate this file from a netcdf to a tif
cmd = ['gdal_translate', '-b', str(band), '-q', '-a_nodata', str(NODATA_VALUE), '-a_srs', 'EPSG:4326', sds_path, tif_0_360] #'-q' means quiet so you don't see it
subprocess.call(cmd)
# Now we will fix the longitude. To do this we need the x and y resolution.
# I also got x and y res for data set using the gdalinfo command described above.
xres='1.250000000000000'
yres= '-0.942408376963351'
cmd_warp = ['gdalwarp', '-t_srs', 'EPSG:4326', '-tr', xres, yres, tif_0_360, tif, '-wo', 'SOURCE_EXTRA=1000', '--config', 'CENTER_LONG', '0']
subprocess.call(cmd_warp) #using the gdal from command line from inside python
# add the new tif files to the list of tifs
all_tifs.append(tif)
# If we don't want to use all the times available, we should have set the TS_FROM_END parameter at the beginning.
if TS_FROM_END>0:
# from the list of all the tifs created, get a list of the tifs you actually want to upload
# this should be all the files through the desired end point
tifs = all_tifs[:-TS_FROM_END]
return all_tifs, tifs
def fetch(new_dates, unformatted_source_url):
'''
Fetch files by datestamp
INPUT new_dates: list of dates we want to try to fetch, in the format YYYY-MM-DD (list of strings)
unformatted_source_url: url for air quality data (string)
RETURN files: list of file names for netcdfs that have been downloaded (list of strings)
'''
# make an empty list to store names of the files we downloaded
files = []
# Loop over the new dates, check if there is data available, and download netcdfs
for date in new_dates:
# Set up the url of the filename to download
url = unformatted_source_url.format(date=date)
# Create a file name to store the netcdf in after download
f = getFilename(date)
# get file name of source file you are about to try to download
file_name = os.path.split(url)[1]
# get list of files available from the source
file_list = list_available_files(os.path.split(url)[0], ext='.nc')
# if the file is available, download it
if file_name in file_list:
logging.info('Retrieving {}'.format(file_name))
# try to download file
try:
# download files from url and put in specified file location (f)
urllib.request.urlretrieve(url, f)
# add file name/location to list of files downloaded
files.append(f)
logging.info('Successfully retrieved {}'.format(file_name))# gives us "Successully retrieved file name"
# if download fails, log an error
except Exception as e:
logging.error('Unable to retrieve data from {}'.format(url))
logging.error(e)
logging.debug(e)
# if file is not available, log that
else:
logging.info('{} not available yet'.format(file_name))
return files
def processNewData(files, var_num, last_date):
'''
Process and upload clean new data
INPUT files: list of file names for netcdfs that have been downloaded (list of strings)
var_num: index number for variable we are currently processing (integer)
last_date: name of file for last date of forecast (string)
RETURN assets: list of file names for netcdfs that have been downloaded (list of strings)
'''
# get name of variable we are processing files for
var = VARS[var_num]
# if files is empty list do nothing, otherwise, process data
if files:
logging.info('Converting files')
# Convert netcdfs to tifs
all_tifs, tifs = convert(files, var_num, last_date) # naming tiffs
# get new list of date strings (in case order is different) from the tifs
dates = [getDateTimeString(tif) for tif in tifs]
# generate datetime objects for each tif date
datestamps = [datetime.datetime.strptime(date, DATE_FORMAT) for date in dates]
# Get a list of the names we want to use for the assets once we upload the files to GEE
assets = [getAssetName(var, date) for date in dates]
logging.info('Uploading files:')
for asset in assets:
logging.info(os.path.split(asset)[1])
# Upload new files (tifs) to GEE
eeUtil.uploadAssets(tifs, assets, GS_FOLDER, datestamps, timeout=3000)
# Delete local tif files
logging.info('Cleaning local TIFF files')
delete_local(ext='.tif')
#if no new assets, return empty list
else:
assets = []
return assets
def checkCreateCollection(VARS):
'''
List assets in collection if it exists, else create new collection
INPUT VARS: list variables (as named in netcdf) that we want to check collections for (list of strings)
RETURN existing_dates_all_vars: list of dates, in the format of the DATE_FORMAT variable, that exist for all variable collections in GEE (list of strings)
existing_dates_by_var: list of dates, in the format of the DATE_FORMAT variable, that exist for each individual variable collection in GEE (list containing list of strings for each variable)
'''
# create a master list (not variable-specific) to store the dates for which all variables already have data for
existing_dates = []
# create an empty list to store the dates that we currently have for each AQ variable
# will be used in case the previous script run crashed before completing the data upload for every variable.
existing_dates_by_var = []
# loop through each variables that we want to pull
for var in VARS:
# For one of the variables, get the date of the most recent dataset
# All variables come from the same file
# If we have one for a particular data, we should have them all
collection = getCollectionName(var)
# Check if folder to store GEE collections exists. If not, create it.
# we will make one collection per variable, all stored in the parent folder for the dataset
if not eeUtil.exists(PARENT_FOLDER):
logging.info('{} does not exist, creating'.format(PARENT_FOLDER))
eeUtil.createFolder(PARENT_FOLDER)
# If the GEE collection for a particular variable exists, get a list of existing assets
if eeUtil.exists(collection):
existing_assets = eeUtil.ls(collection)
# get a list of the dates from these existing assets
dates = [getDate_GEE(a) for a in existing_assets]
# append this list of dates to our list of dates by variable
existing_dates_by_var.append(dates)
# for each of the dates that we have for this variable, append the date to the master
# list of which dates we already have data for (if it isn't already in the list)
for date in dates:
if date not in existing_dates:
existing_dates.append(date)
# If the GEE collection does not exist, append an empty list to our list of dates by variable
else:
existing_dates_by_var.append([])
# create a collection for this variable
logging.info('{} does not exist, creating'.format(collection))
eeUtil.createFolder(collection, True)
'''
We want make sure all variables correctly uploaded the data on the last run. To do this, we will
check that we have the correct number of appearances of the data in our GEE collection. If we do
not, we will want to re-upload this date's data.
'''
# Create a copy of the master list of dates that will store the dates that were properly uploaded for all variables.
existing_dates_all_vars = copy.copy(existing_dates)
for date in existing_dates:
# check how many times each date appears in our list of dates by variable
date_count = sum(x.count(date) for x in existing_dates_by_var)
# divide this count by the number of time intervals we have (because the date will be
# repeated for each time)
count = date_count / len(TIME_HOURS)
# If this count is less than the number of variables we have, one of the variables did not finish
# uploading for this date, and we need to re-upload this file.
if count < len(VARS):
# remove this from the list of existing dates for all variables
existing_dates_all_vars.remove(date)
return existing_dates_all_vars, existing_dates_by_var
def get_forecast_run_date(var):
'''
Get the date that the most recent forecast was run from
INPUT var: variable for which we are pulling forecast run date (string)
RETURN most_recent_forecast_date: date of most recent forecast run (datetime)
'''
# pull existing assets in the collection
collection = getCollectionName(var)
existing_assets = eeUtil.ls(collection)
# sort these dates oldest to newest
existing_assets.sort()
# get the forecast run date (first in the list) and turn it into a datetime
most_recent_forecast_date = datetime.datetime.strptime(existing_assets[0][-13:], DATE_FORMAT)
return most_recent_forecast_date
def clearCollectionMultiVar():
'''
Clear the GEE collection for all variables
'''
logging.info('Clearing collections.')
for var_num in range(len(VARS)):
# get name of variable we are clearing GEE collections for
var = VARS[var_num]
# get name of GEE collection for variable
collection = getCollectionName(var)
# if the collection exists,
if eeUtil.exists(collection):
# remove the / from the beginning of the collection name to be used in ee module
if collection[0] == '/':
collection = collection[1:]
# pull the image collection
a = ee.ImageCollection(collection)
# check how many assets are in the collection
collection_size = a.size().getInfo()
# if there are assets in the collection
if collection_size > 0:
# create a list of assets in the collection
list = a.toList(collection_size)
# delete each asset
for item in list.getInfo():
ee.data.deleteAsset(item['id'])
def initialize_ee():
'''
Initialize eeUtil and ee modules
'''
# get GEE credentials from env file
GEE_JSON = os.environ.get("GEE_JSON")
_CREDENTIAL_FILE = 'credentials.json'
GEE_SERVICE_ACCOUNT = os.environ.get("GEE_SERVICE_ACCOUNT")
with open(_CREDENTIAL_FILE, 'w') as f:
f.write(GEE_JSON)
auth = ee.ServiceAccountCredentials(GEE_SERVICE_ACCOUNT, _CREDENTIAL_FILE)
ee.Initialize(auth)
def updateResourceWatch():
'''
This function should update Resource Watch to reflect the new data.
This may include updating the 'last update date', flushing the tile cache, and updating any dates on layers
'''
for var_num in range(len(VARS)):
# get variable we are updating layers for
var = VARS[var_num]
# Get most recent forecast run date
most_recent_date = get_forecast_run_date(var)
# Get the current 'last update date' from the dataset on Resource Watch
current_date = getLastUpdate(DATASET_IDS[var])
# If the most recent date from the GEE collection does not match the 'last update date' on the RW API, update it
if current_date != most_recent_date:
logging.info('Updating last update date and flushing cache.')
# Update dataset's last update date on Resource Watch
lastUpdateDate(DATASET_IDS[var], most_recent_date)
# get layer ids and flush tile cache for each
layer_ids = getLayerIDs(DATASET_IDS[var])
for layer_id in layer_ids:
flushTileCache(layer_id)
# Update the dates on layer legends - TO BE ADDED IN FUTURE
def delete_local(ext=None):
'''
This function will delete local files in the Docker container with a specific extension, if specified.
If no extension is specified, all local files will be deleted.
INPUT ext: optional, file extension for files you want to delete, ex: '.tif' (string)
'''
try:
if ext:
[file for file in os.listdir(DATA_DIR) if file.endswith(ext)]
else:
files = os.listdir(DATA_DIR)
for f in files:
logging.info('Removing {}'.format(f))
os.remove(DATA_DIR+'/'+f)
except NameError:
logging.info('No local files to clean.')
def main():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logging.info('STARTING')
# Initialize eeUtil and ee modules
eeUtil.initJson()
initialize_ee()
# Clear collection in GEE if desired
if CLEAR_COLLECTION_FIRST:
clearCollectionMultiVar()
# Check if collection exists. If not, create it.
# Return a list of dates that exist for all variables collections in GEE (existing_dates),
# as well as a list of which dates exist for each individual variable (existing_dates_by_var).
# The latter will be used in case the previous script run crashed before completing the data upload for every variable.
logging.info('Getting existing dates.')
existing_dates, existing_dates_by_var = checkCreateCollection(VARS)
# Get a list of the dates that are available, minus the ones we have already uploaded correctly for all variables.
logging.info('Getting new dates to pull.')
all_new_dates, last_date = getNewDates(existing_dates)
# if new data is available, clear the collection because we want to store the most
# recent forecast, not the old forecast
if all_new_dates:
logging.info('New forecast available.')
clearCollectionMultiVar()
else:
logging.info('No new forecast.')
# The Docker container isonly big enough to hold 3 files at once,
# so break into groups to process
new_date_groups = [all_new_dates[x:x+3] for x in range(0, len(all_new_dates), 3)]
for new_dates in new_date_groups:
# Fetch new files
logging.info('Fetching files for {}'.format(new_dates))
files = fetch(new_dates, SOURCE_URL)
# Process data, one variable at a time
for var_num in range(len(VARS)):
# get variable name
var = VARS[var_num]
# Process new data files, delete all forecast assets currently in collection
new_assets = processNewData(files, var_num, last_date)
logging.info('New assets for {}: {}'.format(var, len(new_assets)))
logging.info('SUCCESS for {}'.format(var))
# Delete local netcdf files
delete_local()
# Update Resource Watch
updateResourceWatch()
logging.info('SUCCESS')
|
[] |
[] |
[
"GEE_JSON",
"GEE_SERVICE_ACCOUNT",
"apiToken"
] |
[]
|
["GEE_JSON", "GEE_SERVICE_ACCOUNT", "apiToken"]
|
python
| 3 | 0 | |
main.go
|
package main
import (
"context"
"io"
"net/http"
"os"
"github.com/joho/godotenv"
"github.com/rs/zerolog/log"
"google.golang.org/api/option"
"google.golang.org/api/youtube/v3"
)
var YoutubeService *youtube.Service
func main() {
// .envの読み込み(開発環境の時のみ読み込むようにしたい)
err := godotenv.Load()
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
ctx := context.Background()
YoutubeService, err = youtube.NewService(ctx, option.WithAPIKey(os.Getenv("YOUTUBE_API_KEY")))
if err != nil {
log.Fatal().Err(err).Msg("youtube.NewService create failed")
}
// DB接続初期化
DBInit()
defer DB.Close()
h1 := func(w http.ResponseWriter, _ *http.Request) {
log.Info().Str("severity", "INFO").Msg("pong!!!")
io.WriteString(w, "pong\n")
}
h2 := func(w http.ResponseWriter, _ *http.Request) {
log.Info().Str("severity", "ERROR").Msg("error!!!")
io.WriteString(w, "error-demo\n")
}
http.HandleFunc("/ping", h1)
http.HandleFunc("/error", h2)
http.HandleFunc("/youtube", YoutubeHandler)
http.HandleFunc("/twitter", TwitterHandler)
// log.Debug().Msgf("listening on port %s", port)
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatal().Err(err).Msg("start http server failed")
}
}
|
[
"\"PORT\"",
"\"YOUTUBE_API_KEY\""
] |
[] |
[
"PORT",
"YOUTUBE_API_KEY"
] |
[]
|
["PORT", "YOUTUBE_API_KEY"]
|
go
| 2 | 0 | |
samples/kubeclient/main.go
|
package main
import (
"flag"
"os"
"path/filepath"
"time"
"github.com/elastisys/kubeaware-cloudpool-proxy/pkg/config"
"github.com/golang/glog"
"github.com/elastisys/kubeaware-cloudpool-proxy/pkg/kube"
)
var (
apiServerURL string
kubeConfigFile string
certFile string
certKeyFile string
caCertFile string
)
func init() {
defaultKubeConfigFile := filepath.Join(os.Getenv("HOME"), ".kube", "config")
flag.StringVar(&apiServerURL, "apiserver-url", "", "Kubernetes API server base URL.")
flag.StringVar(&kubeConfigFile, "kubeconfig", defaultKubeConfigFile, "kubeconfig file.")
flag.StringVar(&certFile, "cert-file", "", "Client certificate.")
flag.StringVar(&certKeyFile, "cert-key", "", "Client private key.")
flag.StringVar(&caCertFile, "ca-cert", "", "CA certificate.")
// make glog write to stderr by default by setting --logtostderr to true
flag.Lookup("logtostderr").Value.Set("true")
// default glog verbosity level is 0 (higher values produce more output)
flag.Lookup("v").Value.Set("0")
}
func main() {
flag.Parse()
// make sure any buffered output gets written when we exit
defer glog.Flush()
if apiServerURL == "" {
glog.Exitf("error: no --apiserver-url given")
}
conf := &config.APIServerConfig{
URL: apiServerURL,
Auth: config.APIServerAuthConfig{
KubeConfigPath: kubeConfigFile,
ClientCertPath: certFile,
ClientKeyPath: certKeyFile,
CACertPath: caCertFile,
},
Timeout: config.Duration{Duration: 10 * time.Second},
}
kubeclient, err := kube.NewKubeClient(conf)
if err != nil {
glog.Exitf("could not create kubernetes client: %s\n", err)
}
nodeList, err := kubeclient.ListNodes()
if err != nil {
glog.Exitf("failed to list nodes: %s", err)
}
for _, node := range nodeList.Items {
glog.Infof("node: %s", node.Name)
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
wfdm-clamav-service/wfdm-clamav-scan-handler/src/main/java/ca/bc/gov/nrs/wfdm/wfdm_clamav_scan_handler/SendSNSNotification.java
|
package ca.bc.gov.nrs.wfdm.wfdm_clamav_scan_handler;
import java.util.HashMap;
import java.util.Map;
import org.json.JSONObject;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.sns.AmazonSNS;
import com.amazonaws.services.sns.AmazonSNSClient;
import com.amazonaws.services.sns.model.MessageAttributeValue;
import com.amazonaws.services.sns.model.PublishRequest;
import com.amazonaws.services.sns.model.PublishResult;
public class SendSNSNotification {
private static String region = "ca-central-1";
static final AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
private static String subject = "Virus Alert on WFDM-File-Indexing";
public static void publicshMessagetoSNS(JSONObject messageDetails) {
String topicArn = System.getenv("WFDM_SNS_VIRUS_ALERT").trim();
AmazonSNS snsClient = AmazonSNSClient.builder().withRegion(region).withCredentials(credentialsProvider).build();
final Map<String, MessageAttributeValue> attributes = new HashMap<String, MessageAttributeValue>();
attributes.put("subject", new MessageAttributeValue().withDataType("String").withStringValue(subject));
final String message = "The source " + messageDetails.getJSONObject("responsePayload").getString("source")
+ " found a file " + messageDetails.getJSONObject("responsePayload").getString("input_key")
+ " on S3 bucket " + messageDetails.getJSONObject("responsePayload").getString("input_bucket") + " at "
+ messageDetails.getString("timestamp") + ".\n\n The scan status from ClamAv \n "
+ messageDetails.getJSONObject("responsePayload").getString("message");
final PublishRequest publishRequest = new PublishRequest().withTopicArn(topicArn).withSubject(subject)
.withMessage(message).withMessageAttributes(attributes);
final PublishResult publishResponse = snsClient.publish(publishRequest);
// Print the MessageId of the message.
System.out.println("MessageId from sns publicsh response: " + publishResponse.getMessageId());
}
}
|
[
"\"WFDM_SNS_VIRUS_ALERT\""
] |
[] |
[
"WFDM_SNS_VIRUS_ALERT"
] |
[]
|
["WFDM_SNS_VIRUS_ALERT"]
|
java
| 1 | 0 | |
goroot-1.9/src/appengine_internal/api_dev.go
|
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package appengine_internal
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"strings"
"sync/atomic"
"time"
basepb "appengine_internal/base"
lpb "appengine_internal/log"
"appengine_internal/remote_api"
rpb "appengine_internal/runtime_config"
proto "appengine_internal/github.com/golang/protobuf/proto"
)
// IsDevAppServer returns whether the App Engine app is running in the
// development App Server.
func IsDevAppServer() bool {
return true
}
// serveHTTP serves App Engine HTTP requests.
func serveHTTP() {
// The development server reads the HTTP address and port that the
// server is listening to from stdout. We listen on 127.0.0.1:0 or
// [::1]:0 to avoid firewall restrictions.
conn, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
log.Print("appengine: couldn't listen on IPv4 TCP socket: ", err)
conn, err = net.Listen("tcp", "[::1]:0")
if err != nil {
log.Fatal("appengine: couldn't listen on IPv6 TCP socket: ", err)
}
}
addr := conn.Addr().(*net.TCPAddr)
fmt.Fprintf(os.Stdout, "%s\t%d\n", addr.IP, addr.Port)
os.Stdout.Close()
err = http.Serve(conn, http.HandlerFunc(handleFilteredHTTP))
if err != nil {
log.Fatal("appengine: ", err)
}
}
func init() {
// If the user's application has a transitive dependency on appengine_internal
// then this init will be called before any user code.
// Read configuration from stdin when the application is being run by
// devappserver2. The user application should not be reading from stdin.
if os.Getenv("RUN_WITH_DEVAPPSERVER") != "1" {
log.Print("appengine: not running under devappserver2; using some default configuration")
return
}
c := readConfig(os.Stdin)
instanceConfig.AppID = string(c.AppId)
instanceConfig.APIHost = c.GetApiHost()
instanceConfig.APIPort = int(*c.ApiPort)
instanceConfig.VersionID = string(c.VersionId)
instanceConfig.InstanceID = *c.InstanceId
instanceConfig.Datacenter = *c.Datacenter
apiAddress = fmt.Sprintf("http://%s:%d", instanceConfig.APIHost, instanceConfig.APIPort)
}
func handleFilteredHTTP(w http.ResponseWriter, r *http.Request) {
// Patch up RemoteAddr so it looks reasonable.
if addr := r.Header.Get("X-Appengine-Remote-Addr"); addr != "" {
r.RemoteAddr = addr
} else {
// Should not normally reach here, but pick
// a sensible default anyway.
r.RemoteAddr = "127.0.0.1"
}
// Create a private copy of the Request that includes headers that are
// private to the runtime and strip those headers from the request that the
// user application sees.
creq := *r
r.Header = make(http.Header)
for name, values := range creq.Header {
if !strings.HasPrefix(name, "X-Appengine-Dev-") {
r.Header[name] = values
}
}
ctx := &httpContext{req: &creq, done: make(chan struct{})}
r = registerContext(r, ctx)
http.DefaultServeMux.ServeHTTP(w, r)
close(ctx.done)
unregisterContext(r)
}
var (
apiAddress string
apiHTTPClient = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
}
instanceConfig = struct {
AppID string
VersionID string
InstanceID string
Datacenter string
APIHost string
APIPort int
}{
// Default configuration for when this file is loaded outside the context
// of devappserver2.
AppID: "dev~my~app",
VersionID: "1.2345",
InstanceID: "deadbeef",
Datacenter: "us1",
APIHost: "localhost",
APIPort: 1,
}
)
func readConfig(r io.Reader) *rpb.Config {
raw, err := ioutil.ReadAll(r)
if err != nil {
log.Fatal("appengine: could not read from stdin: ", err)
}
if len(raw) == 0 {
log.Fatal("appengine: no config provided on stdin")
}
b := make([]byte, base64.StdEncoding.DecodedLen(len(raw)))
n, err := base64.StdEncoding.Decode(b, raw)
if err != nil {
log.Fatal("appengine: could not base64 decode stdin: ", err)
}
config := &rpb.Config{}
err = proto.Unmarshal(b[:n], config)
if err != nil {
log.Fatal("appengine: could not decode runtime_config: ", err)
}
return config
}
var errTimeout = &CallError{
Detail: "Deadline exceeded",
Code: 11, // CANCELED
Timeout: true,
}
// postWithTimeout issues a POST to the specified URL with a given timeout.
func postWithTimeout(url, bodyType string, body io.Reader, timeout time.Duration) (b []byte, err error) {
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
if timeout != 0 {
if tr, ok := apiHTTPClient.Transport.(*http.Transport); ok {
var canceled int32 // atomic; set to 1 if canceled
t := time.AfterFunc(timeout, func() {
atomic.StoreInt32(&canceled, 1)
tr.CancelRequest(req)
})
defer t.Stop()
defer func() {
// Check to see whether the call was canceled.
if atomic.LoadInt32(&canceled) != 0 {
err = errTimeout
}
}()
}
}
resp, err := apiHTTPClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
func call(service, method string, data []byte, requestID string, timeout time.Duration) ([]byte, error) {
req := &remote_api.Request{
ServiceName: &service,
Method: &method,
Request: data,
RequestId: &requestID,
}
buf, err := proto.Marshal(req)
if err != nil {
return nil, err
}
body, err := postWithTimeout(apiAddress, "application/octet-stream", bytes.NewReader(buf), timeout)
if err != nil {
return nil, err
}
res := &remote_api.Response{}
err = proto.Unmarshal(body, res)
if err != nil {
return nil, err
}
if ae := res.ApplicationError; ae != nil {
// All Remote API application errors are API-level failures.
return nil, &APIError{Service: service, Detail: *ae.Detail, Code: *ae.Code}
}
return res.Response, nil
}
// httpContext represents the context of an in-flight HTTP request.
// It implements the appengine.Context interface.
type httpContext struct {
req *http.Request
done chan struct{} // Closed when the context has expired.
}
// RegisterTestContext associates a test context with the given HTTP request,
// returning a closure to delete the association. It should only be used by the
// aetest package, and never directly. It is only available in the SDK.
func RegisterTestContext(req *http.Request, c context) (*http.Request, func()) {
req = registerTestContext(req, c)
return req, func() {
unregisterContext(req)
}
}
var errExpired = &CallError{
Detail: "invalid security ticket (context expired)",
Code: 3, // SECURITY_VIOLATION
Timeout: false,
}
func (c *httpContext) Call(service, method string, in, out ProtoMessage, opts *CallOptions) error {
if service == "__go__" {
if method == "GetNamespace" {
out.(*basepb.StringProto).Value = proto.String(c.req.Header.Get("X-AppEngine-Current-Namespace"))
return nil
}
if method == "GetDefaultNamespace" {
out.(*basepb.StringProto).Value = proto.String(c.req.Header.Get("X-AppEngine-Default-Namespace"))
return nil
}
}
if f, ok := apiOverrides[struct{ service, method string }{service, method}]; ok {
return f(in, out, opts)
}
data, err := proto.Marshal(in)
if err != nil {
return err
}
requestID := c.req.Header.Get("X-Appengine-Dev-Request-Id")
var d time.Duration
if opts != nil && opts.Timeout != 0 {
d = opts.Timeout
}
errc := make(chan error, 1)
go func() {
res, err := call(service, method, data, requestID, d)
if err != nil {
errc <- err
return
}
errc <- proto.Unmarshal(res, out)
}()
select {
case err := <-errc:
return err
case <-c.done:
log.Printf("ERROR: context expired before API call %s/%s completed\nrequest URL: %v", service, method, c.req.URL)
return errExpired
}
}
func (c *httpContext) Request() interface{} {
return c.req
}
func (c *httpContext) logf(level int64, levelName, format string, args ...interface{}) {
s := fmt.Sprintf(format, args...)
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
log.Println(levelName + ": " + s)
// Truncate long log lines.
const maxLogLine = 8192
if len(s) > maxLogLine {
suffix := fmt.Sprintf("...(length %d)", len(s))
s = s[:maxLogLine-len(suffix)] + suffix
}
buf, err := proto.Marshal(&lpb.UserAppLogGroup{
LogLine: []*lpb.UserAppLogLine{
{
TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
Level: proto.Int64(level),
Message: proto.String(s),
}}})
if err != nil {
log.Printf("appengine_internal.flushLog: failed marshaling AppLogGroup: %v", err)
return
}
req := &lpb.FlushRequest{
Logs: buf,
}
res := &basepb.VoidProto{}
if err := c.Call("logservice", "Flush", req, res, nil); err != nil {
log.Printf("appengine_internal.flushLog: failed Flush RPC: %v", err)
}
}
func (c *httpContext) Debugf(format string, args ...interface{}) { c.logf(0, "DEBUG", format, args...) }
func (c *httpContext) Infof(format string, args ...interface{}) { c.logf(1, "INFO", format, args...) }
func (c *httpContext) Warningf(format string, args ...interface{}) {
c.logf(2, "WARNING", format, args...)
}
func (c *httpContext) Errorf(format string, args ...interface{}) { c.logf(3, "ERROR", format, args...) }
func (c *httpContext) Criticalf(format string, args ...interface{}) {
c.logf(4, "CRITICAL", format, args...)
}
// FullyQualifiedAppID returns the fully-qualified application ID.
// This may contain a partition prefix (e.g. "s~" for High Replication apps),
// or a domain prefix (e.g. "example.com:").
func (c *httpContext) FullyQualifiedAppID() string {
return instanceConfig.AppID
}
|
[
"\"RUN_WITH_DEVAPPSERVER\""
] |
[] |
[
"RUN_WITH_DEVAPPSERVER"
] |
[]
|
["RUN_WITH_DEVAPPSERVER"]
|
go
| 1 | 0 | |
actions/uipath.py
|
import requests
# from dotenv import load_dotenv
import os
import json
import logging
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Set logger
logger_format = '[%(asctime)s] - [%(name)s] - [%(levelname)s] - %(message)s'
logging.basicConfig(level=logging.INFO, format=logger_format)
logger = logging.getLogger('UiPathAPI')
# create file handler which logs even debug messages
fh = logging.FileHandler('UiPathAPI.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter(logger_format)
logger.addHandler(fh)
class UiPathAPI:
JOB_STATES = ['pending', 'running', 'successful', 'faulted', 'stopping', 'terminating', 'stopped']
ROBOT_STATES = ['available', 'busy', 'unresponsive', 'disconnected']
def __init__(self):
# load_dotenv()
self.host = os.getenv('HOST')
self.token = None
self.headers = {
'Content-Type': "application/json",
}
def authenticate(self):
logger.info('Calling authenticate method...')
url = self.host + '/api/Account/Authenticate'
payload = {
'tenancyName': os.getenv('TENANCY_NAME'),
'usernameOrEmailAddress': os.getenv('USERNAME_OR_EMAIL_ADDRESS'),
'password': os.getenv('PASSWORD')
}
response = requests.request("POST", url, data=json.dumps(payload), headers=self.headers, verify=False).json()
if response.get('success'):
logger.info('User authenticated')
self.token = response.get('result')
self.headers['Authorization'] = 'Bearer {}'.format(self.token)
def make_request(self, url, payload, method='GET'):
response = requests.request(method, url, params=payload, headers=self.headers, verify=False).json()
if 'error' in response and response['error']['code'] == 0:
self.authenticate()
response = requests.request("GET", url, params=payload, headers=self.headers, verify=False).json()
return response.get('value')
def get_logs(self, filter_key, filter_value):
url = self.host + '/odata/RobotLogs'
payload = {
"$top": "5",
"$filter": "{} eq {}".format(filter_key, filter_value)
}
logs = self.make_request(url, payload)
log_messages = []
if logs:
log_messages = [x.get('Message') for x in logs]
return log_messages
def get_all_sessions(self):
logger.info('Calling get_all_sessions method...')
url = self.host + '/odata/Sessions'
sessions = self.make_request(url, None)
logger.info('Returning sessions')
return sessions
def get_all_robots(self, reporting_time=None, state=None):
logger.info('Calling get_all_robots method...')
url = self.host + '/odata/Sessions'
payload = {
'$select': 'State',
'$expand': 'Robot'
}
output_list = []
# Get full list of robots
full_list = self.make_request(url, payload)
logger.info('[get_all_robots] Filtering robots by ReportingTime = {} and State = {}'.format(reporting_time, state))
# Get list filtered by Reporting Time
payload['$filter'] = 'ReportingTime gt {}'.format(reporting_time)
filtered_list = self.make_request(url, payload)
# Get IDs of robots that are responsive
responsive_robot_ids = []
if filtered_list:
responsive_robot_ids = [x['Robot']['Id'] for x in filtered_list]
# Set real state to robots within the full list
for robot in full_list:
if robot['Robot']['Id'] not in responsive_robot_ids:
robot['State'] = 'Unresponsive'
if (state is not None and robot['State'].lower() == state) or state is None:
log_messages = self.get_logs('RobotName', "'{}'".format(robot['Robot']['Name']))
robot['Logs'] = log_messages
output_list.append(robot)
logger.info('Returning robots')
return output_list
def get_robot_by_name(self, name, reporting_time):
logger.info('Calling get_robot_by_name method...')
url = self.host + '/odata/Sessions'
payload = {
'$select': 'State',
'$expand': 'Robot',
'$top': '1',
'$filter': "Robot/Name eq '{}'".format(name)
}
robot = None
robots = self.make_request(url, payload)
if robots:
robot = robots[0]
# Filter by reporting time to check whether it is unresponsive or not
payload['$filter'] = payload['$filter'] + ' and ReportingTime gt {}'.format(reporting_time)
filtered_robot = self.make_request(url, payload)
if not filtered_robot: # empty list, so it is unresponsive
robot['State'] = 'Unresponsive'
log_messages = self.get_logs('RobotName', "'{}'".format(robot['Robot']['Name']))
robot['Logs'] = log_messages
logger.info('Returning robot info with name: {}'.format(name))
else:
logger.info('Robot with name [{}] not found'.format(name))
return robot
def get_all_jobs(self, start_time_from=None, start_time_to=None, end_time_from=None, end_time_to=None, state=None, count=None):
logger.info('Calling get_all_jobs method...')
url = self.host + '/odata/Jobs'
filter_clauses = []
if start_time_from: filter_clauses.append('StartTime gt {}'.format(start_time_from))
if start_time_to: filter_clauses.append('StartTime lt {}'.format(start_time_to))
if end_time_from: filter_clauses.append('EndTime gt {}'.format(end_time_from))
if end_time_to: filter_clauses.append('EndTime lt {}'.format(end_time_to))
if state: filter_clauses.append("State eq '{}'".format(state.title()))
payload = {}
if filter_clauses:
filter_clause = ' and '.join(filter_clauses)
payload['$filter'] = filter_clause
logger.info('[get_all_jobs] Filter clause = {}'.format(filter_clause))
if count: payload['$top'] = count
jobs = self.make_request(url, payload)
for job in jobs:
job_key = job.get('Key')
log_messages = self.get_logs('JobKey', job_key)
job['Logs'] = log_messages
logger.info('Returning jobs')
return jobs
def get_job_by_name(self, name):
logger.info('Calling get_job_by_name method...')
url = self.host + '/odata/Jobs'
payload = {
'$top': '1',
'$filter': "ReleaseName eq '{}'".format(name)
}
job = None
jobs = self.make_request(url, payload)
if jobs:
job = jobs[0]
job_key = job.get('Key')
log_messages = self.get_logs('JobKey', job_key)
job['Logs'] = log_messages
logger.info('Returning job info with name: {}'.format(name))
else:
logger.info('Job with name [{}] not found'.format(name))
return job
def get_all_assets(self):
logger.info('Calling get_all_assets method...')
url = self.host + '/odata/Assets'
payload = {
'$expand': 'RobotValues'
}
assets = self.make_request(url, payload)
logger.info('Returning assets')
return assets
def get_asset_by_name(self, name):
logger.info('Calling get_asset_by_name method...')
url = self.host + '/odata/Assets'
payload = {
'$top': '1',
'$filter': "Name eq '{}'".format(name)
}
asset = None
assets = self.make_request(url, payload)
if assets:
asset = assets[0]
logger.info('Returning asset info with name: {}'.format(name))
else:
logger.info('Asset with name [{}] not found'.format(name))
return asset
def get_all_queues(self, creation_time_from=None, creation_time_to=None):
logger.info('Calling get_all_queues method...')
url = self.host + '/odata/QueueDefinitions'
filter_clauses = []
if creation_time_from: filter_clauses.append('CreationTime gt {}'.format(creation_time_from))
if creation_time_to: filter_clauses.append('CreationTime lt {}'.format(creation_time_to))
payload = {}
if filter_clauses:
filter_clause = ' and '.join(filter_clauses)
payload['$filter'] = filter_clause
logger.info('[get_all_queues] Filter clause = {}'.format(filter_clause))
queues = self.make_request(url, payload)
processing_status_url = self.host + '/odata/QueueProcessingRecords/UiPathODataSvc.RetrieveQueuesProcessingStatus'
for queue in queues:
name = queue.get('Name')
payload = {"$filter": "QueueDefinitionName eq '{}'".format(name)}
processing_status = self.make_request(processing_status_url, payload)[0]
queue['ProcessingStatus'] = processing_status
logger.info('Returning queues')
return queues
def get_queue_by_name(self, name):
logger.info('Calling get_queue_by_name method...')
url = self.host + '/odata/QueueDefinitions'
payload = {
'$top': '1',
'$filter': "Name eq '{}'".format(name)
}
queues = self.make_request(url, payload)
queue = None
if queues:
queue = queues[0]
processing_status_url = self.host + '/odata/QueueProcessingRecords/UiPathODataSvc.RetrieveQueuesProcessingStatus'
payload = {"$filter": "QueueDefinitionName eq '{}'".format(name)}
processing_status = self.make_request(processing_status_url, payload)[0]
queue['ProcessingStatus'] = processing_status
logger.info('Returning queue info with name: {}'.format(name))
else:
logger.info('Queue with name [{}] not found'.format(name))
return queue
def get_all_processes(self, published_from=None, published_to=None):
logger.info('Calling get_all_processes method...')
url = self.host + '/odata/Processes'
filter_clauses = []
if published_from: filter_clauses.append('Published gt {}'.format(published_from))
if published_to: filter_clauses.append('Published lt {}'.format(published_to))
payload = {}
if filter_clauses:
filter_clause = ' and '.join(filter_clauses)
payload['$filter'] = filter_clause
logger.info('[get_all_processes] Filter clause = {}'.format(filter_clause))
processes = self.make_request(url, payload)
for process in processes:
log_messages = self.get_logs('ProcessName', "'{}'".format(process.get('Id')))
process['Logs'] = log_messages
logger.info('Returning processes')
return processes
def get_process_by_id(self, id):
logger.info('Calling get_process_by_id method...')
url = self.host + '/odata/Processes'
payload = {
'$top': '1',
'$filter': "Id eq '{}'".format(id)
}
process = None
processes = self.make_request(url, payload)
if processes:
process = processes[0]
log_messages = self.get_logs('ProcessName', "'{}'".format(process.get('Id')))
process['Logs'] = log_messages
logger.info('Returning process info with name: {}'.format(id))
else:
logger.info('Process with name [{}] not found'.format(id))
return process
|
[] |
[] |
[
"HOST",
"TENANCY_NAME",
"USERNAME_OR_EMAIL_ADDRESS",
"PASSWORD"
] |
[]
|
["HOST", "TENANCY_NAME", "USERNAME_OR_EMAIL_ADDRESS", "PASSWORD"]
|
python
| 4 | 0 | |
src/main/java/com/redhat/fuse/boosters/rest/http/CamelRouter.java
|
package com.redhat.fuse.boosters.rest.http;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.model.dataformat.JsonLibrary;
import org.apache.camel.model.rest.RestBindingMode;
import org.apache.camel.model.rest.RestParamType;
import org.springframework.stereotype.Component;
import java.util.List;
/**
* A simple Camel REST DSL route that implements the arrivals service.
*
*/
@Component
public class CamelRouter extends RouteBuilder {
@Override
public void configure() throws Exception {
String arrivalsHost = System.getenv("ARRIVALS_HOST");
String departuresHost = System.getenv("DEPARTURES_HOST");
if (arrivalsHost == null || arrivalsHost.isEmpty()) {
throw new Exception("ARRIVALS_HOST env var not set");
} else if(departuresHost == null || departuresHost.isEmpty()) {
throw new Exception("DEPARTURES_HOST env var not set");
}
// @formatter:off
restConfiguration()
.enableCORS(true)
.apiContextPath("/api-doc")
.apiProperty("api.title", "Airport Flights REST API")
.apiProperty("api.version", "1.0")
.apiProperty("cors", "true")
.apiProperty("base.path", "camel/")
.apiProperty("api.path", "/")
.apiProperty("host", "")
.apiProperty("schemes", "https")
.apiContextRouteId("doc-api")
.component("servlet")
.bindingMode(RestBindingMode.json);
rest("/flights")
.description("List all flights (arrivals & departures)")
.get()
.param().name("user_key")
.type(RestParamType.query)
.required(false)
.description("User Key, if calling the API in front of 3Scale.")
.endParam()
.outType(FlightsList.class)
.route().routeId("flights-api")
.multicast(new FlightAggregationStrategy())
.parallelProcessing()
.to("direct:arrivalsImplRemote", "direct:departuresImplRemote");
from("direct:arrivalsImplRemote").description("Arrivals REST service implementation route")
.streamCaching()
.to(String.format("http://%s/arrivals?bridgeEndpoint=true", arrivalsHost))
.convertBodyTo(String.class)
.unmarshal().json(JsonLibrary.Jackson, ArrivalsList.class);
from("direct:departuresImplRemote").description("Departures REST service implementation route")
.streamCaching()
.to(String.format("http://%s/departures?bridgeEndpoint=true", departuresHost))
.convertBodyTo(String.class)
.unmarshal().json(JsonLibrary.Jackson, DeparturesList.class);
from("direct:arrivalsImplLocal").description("Arrivals REST service implementation route")
.streamCaching()
.to("bean:arrivalsService?method=getArrivals");
from("direct:departuresImplLocal").description("Departures REST service implementation route")
.streamCaching()
.to("bean:departuresService?method=getDepartures");
// @formatter:on
}
}
|
[
"\"ARRIVALS_HOST\"",
"\"DEPARTURES_HOST\""
] |
[] |
[
"DEPARTURES_HOST",
"ARRIVALS_HOST"
] |
[]
|
["DEPARTURES_HOST", "ARRIVALS_HOST"]
|
java
| 2 | 0 | |
src/cnaas_nac/api/external/auth.py
|
import os
import json
from flask import request, make_response
from flask_restx import Resource, Namespace, fields
from flask_jwt_extended import jwt_required
from cnaas_nac.api.generic import empty_result
from cnaas_nac.tools.log import get_logger
from cnaas_nac.db.user import User, get_users, UserInfo
from cnaas_nac.db.nas import NasPort
from cnaas_nac.db.reply import Reply
from cnaas_nac.api.external.coa import CoA
from cnaas_nac.version import __api_version__
from netaddr import EUI, mac_unix_expanded
logger = get_logger()
api = Namespace('auth', description='Authentication API',
prefix='/api/{}'.format(__api_version__))
user_edit = api.model('auth_enable', {
'enable': fields.Boolean(required=False),
'vlan': fields.String(required=False),
'comment': fields.String(required=False),
'bounce': fields.String(required=False)
})
class AuthApi(Resource):
@jwt_required
def get(self):
"""
Get a JSON blob with all users, replies and other information.
"""
field = None
condition = ''
direction = 'username'
when = None
for arg in request.args:
if 'filter' in arg:
field = arg[arg.find('[')+1: arg.find(']')]
condition = request.args[arg].split('?')[0]
if 'sort' in arg:
direction = request.args[arg]
if 'when' in arg:
when = request.args[arg]
users = get_users(field=field, condition=condition,
order=direction, when=when)
response = make_response(json.dumps(empty_result(status='success',
data=users)), 200)
response.headers['X-Total-Count'] = len(users)
response.headers['Content-Type'] = 'application/json'
return response
@jwt_required
def post(self):
"""
Add a user manually.
"""
if 'RADIUS_SLAVE' in os.environ:
if os.environ['RADIUS_SLAVE'] == 'yes':
return empty_result(status='error',
data='Users can only be added to master server.'), 400
json_data = request.get_json()
if 'username' not in json_data:
return empty_result(status='error',
data='username is a required argument'), 400
try:
username = str(EUI(
json_data['username'], dialect=mac_unix_expanded))
except Exception:
username = json_data['username']
if 'password' in json_data:
try:
password = str(EUI(
json_data['password'], dialect=mac_unix_expanded))
except Exception:
password = json_data['password']
else:
password = username
if 'vlan' in json_data:
vlan = json_data['vlan']
else:
if 'RADIUS_DEFAULT_VLAN' in os.environ:
vlan = os.environ['RADIUS_DEFAULT_VLAN']
else:
vlan = 13
if 'comment' in json_data:
comment = json_data['comment']
else:
comment = None
if 'nas_identifier' not in json_data:
nas_identifier = None
else:
nas_identifier = json_data['nas_identifier']
if 'nas_port_id' not in json_data:
nas_port_id = None
else:
nas_port_id = json_data['nas_port_id']
if 'nas_ip_address' not in json_data:
nas_ip_address = None
else:
nas_ip_address = json_data['nas_ip_address']
if 'calling_station_id' not in json_data:
calling_station_id = None
else:
calling_station_id = json_data['calling_station_id']
if 'called_station_id' not in json_data:
called_station_id = None
else:
called_station_id = json_data['called_station_id']
if nas_identifier == "" or nas_identifier is None:
nas_identifier = username
err = User.add(username, password)
if err != "":
return empty_result(status="error", data=err), 400
err = Reply.add(username, vlan)
if err != "":
return empty_result(status="error", data=err), 400
err = UserInfo.add(username, comment)
if err != "":
return empty_result(status="error", data=err), 400
err = NasPort.add(username, nas_ip_address, nas_identifier, nas_port_id,
calling_station_id,
called_station_id)
if err != "":
return empty_result(status="error", data=err), 400
if 'active' in json_data and isinstance(json_data['active'], bool):
if json_data['active']:
User.enable(username)
user = get_users(field='username', condition=username)
response = make_response(json.dumps(empty_result(status='success',
data=user)), 200)
return response
class AuthApiByName(Resource):
@jwt_required
def get(self, username):
"""
Return a JSON blob with all users, VLANs and other information.
"""
users = get_users(field='username', condition=username)
response = make_response(json.dumps(empty_result(status='success',
data=users)), 200)
response.headers['X-Total-Count'] = len(users)
response.headers['Content-Type'] = 'application/json'
return response
@api.expect(user_edit)
@jwt_required
def put(self, username):
"""
Update user parameters such as VLAN, if the user is
enabled/disabled and so on.
"""
json_data = request.get_json()
result = ''
if json_data is None:
return empty_result(status='error', data='No JSON input found'), 400
if 'active' in json_data:
if json_data['active'] is True:
result = User.enable(username)
else:
result = User.disable(username)
UserInfo.add(username, reason='', auth=True)
if 'vlan' in json_data:
result = Reply.vlan(username, json_data['vlan'])
if 'comment' in json_data:
result = UserInfo.add(username, comment=json_data['comment'])
if 'bounce' in json_data and json_data['bounce'] is True:
userdata = get_users(field='username', condition=username)
if userdata == []:
return empty_result(status='error', data='User not found')
nas_ip_address = userdata[0]['nas_ip_address']
nas_port_id = userdata[0]['nas_port_id']
attrs = {
'NAS-IP-Address': nas_ip_address,
'NAS-Port-Id': nas_port_id,
'Arista-PortFlap': '1'
}
if 'RADIUS_COA_SECRET' not in os.environ:
return empty_result(status='error', data='CoA secret not configured.'), 400
secret = str.encode(os.environ['RADIUS_COA_SECRET'])
try:
coa_request = CoA(nas_ip_address, secret)
coa_request.send_packet(attrs=attrs)
except Exception as e:
result = str(e)
if result != '':
return empty_result(status='error', data=result), 400
return empty_result(status='success',
data=get_users(field='username', condition=username))
@jwt_required
def delete(self, username):
"""
Remove a user.
"""
errors = []
result = User.delete(username)
if result != '':
errors.append(result)
result = Reply.delete(username)
if result != '':
errors.append(result)
result = NasPort.delete(username)
if result != '':
errors.append(result)
result = UserInfo.delete(username)
if result != '':
errors.append(result)
if errors != []:
return empty_result(status='error', data=errors), 400
return empty_result(status='success', data=[])
api.add_resource(AuthApi, '')
api.add_resource(AuthApi, '/')
api.add_resource(AuthApiByName, '/<string:username>')
api.add_resource(AuthApiByName, '/<string:username>/')
|
[] |
[] |
[
"RADIUS_SLAVE",
"RADIUS_DEFAULT_VLAN",
"RADIUS_COA_SECRET"
] |
[]
|
["RADIUS_SLAVE", "RADIUS_DEFAULT_VLAN", "RADIUS_COA_SECRET"]
|
python
| 3 | 0 | |
01-Login/routes/logout/logout.go
|
package logout
import (
"net/http"
"net/url"
)
func LogoutHandler(w http.ResponseWriter, r *http.Request) {
//domain := os.Getenv("AUTH0_DOMAIN")
//domain := "https://gotem.auth.us-east-1.amazoncognito.com/logout?client_id=2htjam9t68mkehjg06j0hbb929&logout_uri=http://localhost:4242"
domain := "https://gotem.auth.us-east-1.amazoncognito.com"
logoutUrl, err := url.Parse(domain)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
logoutUrl.Path += "/logout"
parameters := url.Values{}
var scheme string
if r.TLS == nil {
scheme = "http"
} else {
scheme = "https"
}
returnTo, err := url.Parse(scheme + "://" + r.Host)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
parameters.Add("logout_uri", returnTo.String())
//parameters.Add("client_id", os.Getenv("AUTH0_CLIENT_ID"))
parameters.Add("client_id", "2htjam9t68mkehjg06j0hbb929")
logoutUrl.RawQuery = parameters.Encode()
http.Redirect(w, r, logoutUrl.String(), http.StatusTemporaryRedirect)
}
|
[
"\"AUTH0_DOMAIN\"",
"\"AUTH0_CLIENT_ID\""
] |
[] |
[
"AUTH0_DOMAIN",
"AUTH0_CLIENT_ID"
] |
[]
|
["AUTH0_DOMAIN", "AUTH0_CLIENT_ID"]
|
go
| 2 | 0 | |
setup.py
|
import os
import sys
import re
import subprocess
from setuptools import setup, Extension, find_namespace_packages
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
def __init__(self, name, sourcedir='', namespace=''):
super().__init__(name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
self.namespace = namespace
class CMakeBuild(build_ext):
def build_extension(self, ext):
from setuptools_scm import get_version
version = get_version(root='.', relative_to=__file__)
self.package = ext.namespace
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
cfg = 'Debug' if self.debug else 'Release'
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(extdir),
"-DPYTHON_EXECUTABLE={}".format(sys.executable),
"-DQMAP_VERSION_INFO={}".format(version),
"-DCMAKE_BUILD_TYPE={}".format(cfg),
"-DBINDINGS=ON"
]
build_args = []
if self.compiler.compiler_type != "msvc":
if not cmake_generator:
cmake_args += ["-GNinja"]
else:
# Single config generators are handled "normally"
single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
# Convert distutils Windows platform specifiers to CMake -A arguments
plat_to_cmake = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ["-A", plat_to_cmake[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += ["-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)]
build_args += ["--config", cfg]
# cross-compile support for macOS - respect ARCHFLAGS if set
if sys.platform.startswith("darwin"):
archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
if archs:
cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
if hasattr(self, "parallel") and self.parallel:
build_args += ["-j{}".format(self.parallel)]
if sys.platform == "win32":
cmake_args += ['-T', 'ClangCl']
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
else:
os.remove(self.build_temp + "/CMakeCache.txt")
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp)
subprocess.check_call(['cmake', '--build', '.', '--target', ext.name] + build_args, cwd=self.build_temp)
README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'README.md')
with open(README_PATH) as readme_file:
README = readme_file.read()
setup(
name='mqt.qmap',
author='Lukas Burgholzer',
author_email='[email protected]',
description='A tool for Quantum Circuit Mapping',
long_description=README,
long_description_content_type="text/markdown",
license="MIT",
url="https://www.cda.cit.tum.de/research/ibm_qx_mapping/",
ext_modules=[CMakeExtension('pyqmap', namespace='mqt.qmap')],
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
packages=find_namespace_packages(include=['mqt.*']),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
"Programming Language :: C++",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
],
keywords="MQT quantum compilation mapping",
project_urls={
'Source': 'https://github.com/cda-tum/qmap/',
'Tracker': 'https://github.com/cda-tum/qmap/issues',
'Research': 'https://www.cda.cit.tum.de/research/ibm_qx_mapping/',
}
)
|
[] |
[] |
[
"CMAKE_GENERATOR",
"ARCHFLAGS"
] |
[]
|
["CMAKE_GENERATOR", "ARCHFLAGS"]
|
python
| 2 | 0 | |
BaseTools/Source/Python/Eot/EotMain.py
|
## @file
# This file is used to be the main entrance of EOT tool
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os, time, glob
import Common.EdkLogger as EdkLogger
import Eot.EotGlobalData as EotGlobalData
from optparse import OptionParser
from Common.StringUtils import NormPath
from Common import BuildToolError
from Common.Misc import GuidStructureStringToGuidString
from collections import OrderedDict as sdict
from Eot.Parser import *
from Eot.InfParserLite import EdkInfParser
from Common.StringUtils import GetSplitValueList
from Eot import c
from Eot import Database
from array import array
from Eot.Report import Report
from Common.BuildVersion import gBUILD_VERSION
from Eot.Parser import ConvertGuid
from Common.LongFilePathSupport import OpenLongFilePath as open
import struct
import uuid
import copy
import codecs
from GenFds.AprioriSection import DXE_APRIORI_GUID, PEI_APRIORI_GUID
gGuidStringFormat = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X"
gIndention = -4
class Image(array):
_HEADER_ = struct.Struct("")
_HEADER_SIZE_ = _HEADER_.size
def __new__(cls, *args, **kwargs):
return array.__new__(cls, 'B')
def __init__(self, ID=None):
if ID is None:
self._ID_ = str(uuid.uuid1()).upper()
else:
self._ID_ = ID
self._BUF_ = None
self._LEN_ = None
self._OFF_ = None
self._SubImages = sdict() # {offset: Image()}
array.__init__(self)
def __repr__(self):
return self._ID_
def __len__(self):
Len = array.__len__(self)
for Offset in self._SubImages.keys():
Len += len(self._SubImages[Offset])
return Len
def _Unpack(self):
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _Pack(self, PadByte=0xFF):
raise NotImplementedError
def frombuffer(self, Buffer, Offset=0, Size=None):
self._BUF_ = Buffer
self._OFF_ = Offset
# we may need the Size information in advance if it's given
self._LEN_ = Size
self._LEN_ = self._Unpack()
def empty(self):
del self[0:]
def GetField(self, FieldStruct, Offset=0):
return FieldStruct.unpack_from(self, Offset)
def SetField(self, FieldStruct, Offset, *args):
# check if there's enough space
Size = FieldStruct.size
if Size > len(self):
self.extend([0] * (Size - len(self)))
FieldStruct.pack_into(self, Offset, *args)
def _SetData(self, Data):
if len(self) < self._HEADER_SIZE_:
self.extend([0] * (self._HEADER_SIZE_ - len(self)))
else:
del self[self._HEADER_SIZE_:]
self.extend(Data)
def _GetData(self):
if len(self) > self._HEADER_SIZE_:
return self[self._HEADER_SIZE_:]
return None
Data = property(_GetData, _SetData)
## CompressedImage() class
#
# A class for Compressed Image
#
class CompressedImage(Image):
# UncompressedLength = 4-byte
# CompressionType = 1-byte
_HEADER_ = struct.Struct("1I 1B")
_HEADER_SIZE_ = _HEADER_.size
_ORIG_SIZE_ = struct.Struct("1I")
_CMPRS_TYPE_ = struct.Struct("4x 1B")
def __init__(self, CompressedData=None, CompressionType=None, UncompressedLength=None):
Image.__init__(self)
if UncompressedLength is not None:
self.UncompressedLength = UncompressedLength
if CompressionType is not None:
self.CompressionType = CompressionType
if CompressedData is not None:
self.Data = CompressedData
def __str__(self):
global gIndention
S = "algorithm=%s uncompressed=%x" % (self.CompressionType, self.UncompressedLength)
for Sec in self.Sections:
S += '\n' + str(Sec)
return S
def _SetOriginalSize(self, Size):
self.SetField(self._ORIG_SIZE_, 0, Size)
def _GetOriginalSize(self):
return self.GetField(self._ORIG_SIZE_)[0]
def _SetCompressionType(self, Type):
self.SetField(self._CMPRS_TYPE_, 0, Type)
def _GetCompressionType(self):
return self.GetField(self._CMPRS_TYPE_)[0]
def _GetSections(self):
try:
TmpData = DeCompress('Efi', self[self._HEADER_SIZE_:])
DecData = array('B')
DecData.fromstring(TmpData)
except:
TmpData = DeCompress('Framework', self[self._HEADER_SIZE_:])
DecData = array('B')
DecData.fromstring(TmpData)
SectionList = []
Offset = 0
while Offset < len(DecData):
Sec = Section()
try:
Sec.frombuffer(DecData, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
except:
break
SectionList.append(Sec)
return SectionList
UncompressedLength = property(_GetOriginalSize, _SetOriginalSize)
CompressionType = property(_GetCompressionType, _SetCompressionType)
Sections = property(_GetSections)
## Ui() class
#
# A class for Ui
#
class Ui(Image):
_HEADER_ = struct.Struct("")
_HEADER_SIZE_ = 0
def __init__(self):
Image.__init__(self)
def __str__(self):
return self.String
def _Unpack(self):
# keep header in this Image object
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _GetUiString(self):
return codecs.utf_16_decode(self[0:-2].tostring())[0]
String = property(_GetUiString)
## Depex() class
#
# A class for Depex
#
class Depex(Image):
_HEADER_ = struct.Struct("")
_HEADER_SIZE_ = 0
_GUID_ = struct.Struct("1I2H8B")
_OPCODE_ = struct.Struct("1B")
_OPCODE_STRING_ = {
0x00 : "BEFORE",
0x01 : "AFTER",
0x02 : "PUSH",
0x03 : "AND",
0x04 : "OR",
0x05 : "NOT",
0x06 : "TRUE",
0x07 : "FALSE",
0x08 : "END",
0x09 : "SOR"
}
_NEXT_ = {
-1 : _OPCODE_, # first one in depex must be an opcdoe
0x00 : _GUID_, #"BEFORE",
0x01 : _GUID_, #"AFTER",
0x02 : _GUID_, #"PUSH",
0x03 : _OPCODE_, #"AND",
0x04 : _OPCODE_, #"OR",
0x05 : _OPCODE_, #"NOT",
0x06 : _OPCODE_, #"TRUE",
0x07 : _OPCODE_, #"FALSE",
0x08 : None, #"END",
0x09 : _OPCODE_, #"SOR"
}
def __init__(self):
Image.__init__(self)
self._ExprList = []
def __str__(self):
global gIndention
gIndention += 4
Indention = ' ' * gIndention
S = '\n'
for T in self.Expression:
if T in self._OPCODE_STRING_:
S += Indention + self._OPCODE_STRING_[T]
if T not in [0x00, 0x01, 0x02]:
S += '\n'
else:
S += ' ' + gGuidStringFormat % T + '\n'
gIndention -= 4
return S
def _Unpack(self):
# keep header in this Image object
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _GetExpression(self):
if self._ExprList == []:
Offset = 0
CurrentData = self._OPCODE_
while Offset < len(self):
Token = CurrentData.unpack_from(self, Offset)
Offset += CurrentData.size
if len(Token) == 1:
Token = Token[0]
if Token in self._NEXT_:
CurrentData = self._NEXT_[Token]
else:
CurrentData = self._GUID_
else:
CurrentData = self._OPCODE_
self._ExprList.append(Token)
if CurrentData is None:
break
return self._ExprList
Expression = property(_GetExpression)
# # FirmwareVolume() class
#
# A class for Firmware Volume
#
class FirmwareVolume(Image):
# Read FvLength, Attributes, HeaderLength, Checksum
_HEADER_ = struct.Struct("16x 1I2H8B 1Q 4x 1I 1H 1H")
_HEADER_SIZE_ = _HEADER_.size
_FfsGuid = "8C8CE578-8A3D-4F1C-9935-896185C32DD3"
_GUID_ = struct.Struct("16x 1I2H8B")
_LENGTH_ = struct.Struct("16x 16x 1Q")
_SIG_ = struct.Struct("16x 16x 8x 1I")
_ATTR_ = struct.Struct("16x 16x 8x 4x 1I")
_HLEN_ = struct.Struct("16x 16x 8x 4x 4x 1H")
_CHECKSUM_ = struct.Struct("16x 16x 8x 4x 4x 2x 1H")
def __init__(self, Name=''):
Image.__init__(self)
self.Name = Name
self.FfsDict = sdict()
self.OrderedFfsDict = sdict()
self.UnDispatchedFfsDict = sdict()
self.ProtocolList = sdict()
def CheckArchProtocol(self):
for Item in EotGlobalData.gArchProtocolGuids:
if Item.lower() not in EotGlobalData.gProtocolList:
return False
return True
def ParseDepex(self, Depex, Type):
List = None
if Type == 'Ppi':
List = EotGlobalData.gPpiList
if Type == 'Protocol':
List = EotGlobalData.gProtocolList
DepexStack = []
DepexList = []
DepexString = ''
FileDepex = None
CouldBeLoaded = True
for Index in range(0, len(Depex.Expression)):
Item = Depex.Expression[Index]
if Item == 0x00:
Index = Index + 1
Guid = gGuidStringFormat % Depex.Expression[Index]
if Guid in self.OrderedFfsDict and Depex.Expression[Index + 1] == 0x08:
return (True, 'BEFORE %s' % Guid, [Guid, 'BEFORE'])
elif Item == 0x01:
Index = Index + 1
Guid = gGuidStringFormat % Depex.Expression[Index]
if Guid in self.OrderedFfsDict and Depex.Expression[Index + 1] == 0x08:
return (True, 'AFTER %s' % Guid, [Guid, 'AFTER'])
elif Item == 0x02:
Index = Index + 1
Guid = gGuidStringFormat % Depex.Expression[Index]
if Guid.lower() in List:
DepexStack.append(True)
DepexList.append(Guid)
else:
DepexStack.append(False)
DepexList.append(Guid)
continue
elif Item == 0x03 or Item == 0x04:
DepexStack.append(eval(str(DepexStack.pop()) + ' ' + Depex._OPCODE_STRING_[Item].lower() + ' ' + str(DepexStack.pop())))
DepexList.append(str(DepexList.pop()) + ' ' + Depex._OPCODE_STRING_[Item].upper() + ' ' + str(DepexList.pop()))
elif Item == 0x05:
DepexStack.append(eval(Depex._OPCODE_STRING_[Item].lower() + ' ' + str(DepexStack.pop())))
DepexList.append(Depex._OPCODE_STRING_[Item].lower() + ' ' + str(DepexList.pop()))
elif Item == 0x06:
DepexStack.append(True)
DepexList.append('TRUE')
DepexString = DepexString + 'TRUE' + ' '
elif Item == 0x07:
DepexStack.append(False)
DepexList.append('False')
DepexString = DepexString + 'FALSE' + ' '
elif Item == 0x08:
if Index != len(Depex.Expression) - 1:
CouldBeLoaded = False
else:
CouldBeLoaded = DepexStack.pop()
else:
CouldBeLoaded = False
if DepexList != []:
DepexString = DepexList[0].strip()
return (CouldBeLoaded, DepexString, FileDepex)
def Dispatch(self, Db=None):
if Db is None:
return False
self.UnDispatchedFfsDict = copy.copy(self.FfsDict)
# Find PeiCore, DexCore, PeiPriori, DxePriori first
FfsSecCoreGuid = None
FfsPeiCoreGuid = None
FfsDxeCoreGuid = None
FfsPeiPrioriGuid = None
FfsDxePrioriGuid = None
for FfsID in list(self.UnDispatchedFfsDict.keys()):
Ffs = self.UnDispatchedFfsDict[FfsID]
if Ffs.Type == 0x03:
FfsSecCoreGuid = FfsID
continue
if Ffs.Type == 0x04:
FfsPeiCoreGuid = FfsID
continue
if Ffs.Type == 0x05:
FfsDxeCoreGuid = FfsID
continue
if Ffs.Guid.lower() == PEI_APRIORI_GUID.lower():
FfsPeiPrioriGuid = FfsID
continue
if Ffs.Guid.lower() == DXE_APRIORI_GUID.lower():
FfsDxePrioriGuid = FfsID
continue
# Parse SEC_CORE first
if FfsSecCoreGuid is not None:
self.OrderedFfsDict[FfsSecCoreGuid] = self.UnDispatchedFfsDict.pop(FfsSecCoreGuid)
self.LoadPpi(Db, FfsSecCoreGuid)
# Parse PEI first
if FfsPeiCoreGuid is not None:
self.OrderedFfsDict[FfsPeiCoreGuid] = self.UnDispatchedFfsDict.pop(FfsPeiCoreGuid)
self.LoadPpi(Db, FfsPeiCoreGuid)
if FfsPeiPrioriGuid is not None:
# Load PEIM described in priori file
FfsPeiPriori = self.UnDispatchedFfsDict.pop(FfsPeiPrioriGuid)
if len(FfsPeiPriori.Sections) == 1:
Section = FfsPeiPriori.Sections.popitem()[1]
if Section.Type == 0x19:
GuidStruct = struct.Struct('1I2H8B')
Start = 4
while len(Section) > Start:
Guid = GuidStruct.unpack_from(Section[Start : Start + 16])
GuidString = gGuidStringFormat % Guid
Start = Start + 16
if GuidString in self.UnDispatchedFfsDict:
self.OrderedFfsDict[GuidString] = self.UnDispatchedFfsDict.pop(GuidString)
self.LoadPpi(Db, GuidString)
self.DisPatchPei(Db)
# Parse DXE then
if FfsDxeCoreGuid is not None:
self.OrderedFfsDict[FfsDxeCoreGuid] = self.UnDispatchedFfsDict.pop(FfsDxeCoreGuid)
self.LoadProtocol(Db, FfsDxeCoreGuid)
if FfsDxePrioriGuid is not None:
# Load PEIM described in priori file
FfsDxePriori = self.UnDispatchedFfsDict.pop(FfsDxePrioriGuid)
if len(FfsDxePriori.Sections) == 1:
Section = FfsDxePriori.Sections.popitem()[1]
if Section.Type == 0x19:
GuidStruct = struct.Struct('1I2H8B')
Start = 4
while len(Section) > Start:
Guid = GuidStruct.unpack_from(Section[Start : Start + 16])
GuidString = gGuidStringFormat % Guid
Start = Start + 16
if GuidString in self.UnDispatchedFfsDict:
self.OrderedFfsDict[GuidString] = self.UnDispatchedFfsDict.pop(GuidString)
self.LoadProtocol(Db, GuidString)
self.DisPatchDxe(Db)
def LoadProtocol(self, Db, ModuleGuid):
SqlCommand = """select GuidValue from Report
where SourceFileFullPath in
(select Value1 from Inf where BelongsToFile =
(select BelongsToFile from Inf
where Value1 = 'FILE_GUID' and Value2 like '%s' and Model = %s)
and Model = %s)
and ItemType = 'Protocol' and ItemMode = 'Produced'""" \
% (ModuleGuid, 5001, 3007)
RecordSet = Db.TblReport.Exec(SqlCommand)
for Record in RecordSet:
SqlCommand = """select Value2 from Inf where BelongsToFile =
(select DISTINCT BelongsToFile from Inf
where Value1 =
(select SourceFileFullPath from Report
where GuidValue like '%s' and ItemMode = 'Callback'))
and Value1 = 'FILE_GUID'""" % Record[0]
CallBackSet = Db.TblReport.Exec(SqlCommand)
if CallBackSet != []:
EotGlobalData.gProtocolList[Record[0].lower()] = ModuleGuid
else:
EotGlobalData.gProtocolList[Record[0].lower()] = ModuleGuid
def LoadPpi(self, Db, ModuleGuid):
SqlCommand = """select GuidValue from Report
where SourceFileFullPath in
(select Value1 from Inf where BelongsToFile =
(select BelongsToFile from Inf
where Value1 = 'FILE_GUID' and Value2 like '%s' and Model = %s)
and Model = %s)
and ItemType = 'Ppi' and ItemMode = 'Produced'""" \
% (ModuleGuid, 5001, 3007)
RecordSet = Db.TblReport.Exec(SqlCommand)
for Record in RecordSet:
EotGlobalData.gPpiList[Record[0].lower()] = ModuleGuid
def DisPatchDxe(self, Db):
IsInstalled = False
ScheduleList = sdict()
for FfsID in list(self.UnDispatchedFfsDict.keys()):
CouldBeLoaded = False
DepexString = ''
FileDepex = None
Ffs = self.UnDispatchedFfsDict[FfsID]
if Ffs.Type == 0x07:
# Get Depex
IsFoundDepex = False
for Section in Ffs.Sections.values():
# Find Depex
if Section.Type == 0x13:
IsFoundDepex = True
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(Section._SubImages[4], 'Protocol')
break
if Section.Type == 0x01:
CompressSections = Section._SubImages[4]
for CompressSection in CompressSections.Sections:
if CompressSection.Type == 0x13:
IsFoundDepex = True
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(CompressSection._SubImages[4], 'Protocol')
break
if CompressSection.Type == 0x02:
NewSections = CompressSection._SubImages[4]
for NewSection in NewSections.Sections:
if NewSection.Type == 0x13:
IsFoundDepex = True
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(NewSection._SubImages[4], 'Protocol')
break
# Not find Depex
if not IsFoundDepex:
CouldBeLoaded = self.CheckArchProtocol()
DepexString = ''
FileDepex = None
# Append New Ffs
if CouldBeLoaded:
IsInstalled = True
NewFfs = self.UnDispatchedFfsDict.pop(FfsID)
NewFfs.Depex = DepexString
if FileDepex is not None:
ScheduleList.insert(FileDepex[1], FfsID, NewFfs, FileDepex[0])
else:
ScheduleList[FfsID] = NewFfs
else:
self.UnDispatchedFfsDict[FfsID].Depex = DepexString
for FfsID in ScheduleList.keys():
NewFfs = ScheduleList.pop(FfsID)
FfsName = 'UnKnown'
self.OrderedFfsDict[FfsID] = NewFfs
self.LoadProtocol(Db, FfsID)
SqlCommand = """select Value2 from Inf
where BelongsToFile = (select BelongsToFile from Inf where Value1 = 'FILE_GUID' and lower(Value2) = lower('%s') and Model = %s)
and Model = %s and Value1='BASE_NAME'""" % (FfsID, 5001, 5001)
RecordSet = Db.TblReport.Exec(SqlCommand)
if RecordSet != []:
FfsName = RecordSet[0][0]
if IsInstalled:
self.DisPatchDxe(Db)
def DisPatchPei(self, Db):
IsInstalled = False
for FfsID in list(self.UnDispatchedFfsDict.keys()):
CouldBeLoaded = True
DepexString = ''
FileDepex = None
Ffs = self.UnDispatchedFfsDict[FfsID]
if Ffs.Type == 0x06 or Ffs.Type == 0x08:
# Get Depex
for Section in Ffs.Sections.values():
if Section.Type == 0x1B:
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(Section._SubImages[4], 'Ppi')
break
if Section.Type == 0x01:
CompressSections = Section._SubImages[4]
for CompressSection in CompressSections.Sections:
if CompressSection.Type == 0x1B:
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(CompressSection._SubImages[4], 'Ppi')
break
if CompressSection.Type == 0x02:
NewSections = CompressSection._SubImages[4]
for NewSection in NewSections.Sections:
if NewSection.Type == 0x1B:
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(NewSection._SubImages[4], 'Ppi')
break
# Append New Ffs
if CouldBeLoaded:
IsInstalled = True
NewFfs = self.UnDispatchedFfsDict.pop(FfsID)
NewFfs.Depex = DepexString
self.OrderedFfsDict[FfsID] = NewFfs
self.LoadPpi(Db, FfsID)
else:
self.UnDispatchedFfsDict[FfsID].Depex = DepexString
if IsInstalled:
self.DisPatchPei(Db)
def __str__(self):
global gIndention
gIndention += 4
FvInfo = '\n' + ' ' * gIndention
FvInfo += "[FV:%s] file_system=%s size=%x checksum=%s\n" % (self.Name, self.FileSystemGuid, self.Size, self.Checksum)
FfsInfo = "\n".join([str(self.FfsDict[FfsId]) for FfsId in self.FfsDict])
gIndention -= 4
return FvInfo + FfsInfo
def _Unpack(self):
Size = self._LENGTH_.unpack_from(self._BUF_, self._OFF_)[0]
self.empty()
self.extend(self._BUF_[self._OFF_:self._OFF_ + Size])
# traverse the FFS
EndOfFv = Size
FfsStartAddress = self.HeaderSize
LastFfsObj = None
while FfsStartAddress < EndOfFv:
FfsObj = Ffs()
FfsObj.frombuffer(self, FfsStartAddress)
FfsId = repr(FfsObj)
if ((self.Attributes & 0x00000800) != 0 and len(FfsObj) == 0xFFFFFF) \
or ((self.Attributes & 0x00000800) == 0 and len(FfsObj) == 0):
if LastFfsObj is not None:
LastFfsObj.FreeSpace = EndOfFv - LastFfsObj._OFF_ - len(LastFfsObj)
else:
if FfsId in self.FfsDict:
EdkLogger.error("FV", 0, "Duplicate GUID in FFS",
ExtraData="\t%s @ %s\n\t%s @ %s" \
% (FfsObj.Guid, FfsObj.Offset,
self.FfsDict[FfsId].Guid, self.FfsDict[FfsId].Offset))
self.FfsDict[FfsId] = FfsObj
if LastFfsObj is not None:
LastFfsObj.FreeSpace = FfsStartAddress - LastFfsObj._OFF_ - len(LastFfsObj)
FfsStartAddress += len(FfsObj)
#
# align to next 8-byte aligned address: A = (A + 8 - 1) & (~(8 - 1))
# The next FFS must be at the latest next 8-byte aligned address
#
FfsStartAddress = (FfsStartAddress + 7) & (~7)
LastFfsObj = FfsObj
def _GetAttributes(self):
return self.GetField(self._ATTR_, 0)[0]
def _GetSize(self):
return self.GetField(self._LENGTH_, 0)[0]
def _GetChecksum(self):
return self.GetField(self._CHECKSUM_, 0)[0]
def _GetHeaderLength(self):
return self.GetField(self._HLEN_, 0)[0]
def _GetFileSystemGuid(self):
return gGuidStringFormat % self.GetField(self._GUID_, 0)
Attributes = property(_GetAttributes)
Size = property(_GetSize)
Checksum = property(_GetChecksum)
HeaderSize = property(_GetHeaderLength)
FileSystemGuid = property(_GetFileSystemGuid)
## GuidDefinedImage() class
#
# A class for GUID Defined Image
#
class GuidDefinedImage(Image):
_HEADER_ = struct.Struct("1I2H8B 1H 1H")
_HEADER_SIZE_ = _HEADER_.size
_GUID_ = struct.Struct("1I2H8B")
_DATA_OFFSET_ = struct.Struct("16x 1H")
_ATTR_ = struct.Struct("18x 1H")
CRC32_GUID = "FC1BCDB0-7D31-49AA-936A-A4600D9DD083"
TIANO_COMPRESS_GUID = 'A31280AD-481E-41B6-95E8-127F4C984779'
LZMA_COMPRESS_GUID = 'EE4E5898-3914-4259-9D6E-DC7BD79403CF'
def __init__(self, SectionDefinitionGuid=None, DataOffset=None, Attributes=None, Data=None):
Image.__init__(self)
if SectionDefinitionGuid is not None:
self.SectionDefinitionGuid = SectionDefinitionGuid
if DataOffset is not None:
self.DataOffset = DataOffset
if Attributes is not None:
self.Attributes = Attributes
if Data is not None:
self.Data = Data
def __str__(self):
S = "guid=%s" % (gGuidStringFormat % self.SectionDefinitionGuid)
for Sec in self.Sections:
S += "\n" + str(Sec)
return S
def _Unpack(self):
# keep header in this Image object
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _SetAttribute(self, Attribute):
self.SetField(self._ATTR_, 0, Attribute)
def _GetAttribute(self):
return self.GetField(self._ATTR_)[0]
def _SetGuid(self, Guid):
self.SetField(self._GUID_, 0, Guid)
def _GetGuid(self):
return self.GetField(self._GUID_)
def _SetDataOffset(self, Offset):
self.SetField(self._DATA_OFFSET_, 0, Offset)
def _GetDataOffset(self):
return self.GetField(self._DATA_OFFSET_)[0]
def _GetSections(self):
SectionList = []
Guid = gGuidStringFormat % self.SectionDefinitionGuid
if Guid == self.CRC32_GUID:
# skip the CRC32 value, we don't do CRC32 verification here
Offset = self.DataOffset - 4
while Offset < len(self):
Sec = Section()
try:
Sec.frombuffer(self, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
Offset = (Offset + 3) & (~3)
except:
break
SectionList.append(Sec)
elif Guid == self.TIANO_COMPRESS_GUID:
try:
# skip the header
Offset = self.DataOffset - 4
TmpData = DeCompress('Framework', self[self.Offset:])
DecData = array('B')
DecData.fromstring(TmpData)
Offset = 0
while Offset < len(DecData):
Sec = Section()
try:
Sec.frombuffer(DecData, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
Offset = (Offset + 3) & (~3)
except:
break
SectionList.append(Sec)
except:
pass
elif Guid == self.LZMA_COMPRESS_GUID:
try:
# skip the header
Offset = self.DataOffset - 4
TmpData = DeCompress('Lzma', self[self.Offset:])
DecData = array('B')
DecData.fromstring(TmpData)
Offset = 0
while Offset < len(DecData):
Sec = Section()
try:
Sec.frombuffer(DecData, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
Offset = (Offset + 3) & (~3)
except:
break
SectionList.append(Sec)
except:
pass
return SectionList
Attributes = property(_GetAttribute, _SetAttribute)
SectionDefinitionGuid = property(_GetGuid, _SetGuid)
DataOffset = property(_GetDataOffset, _SetDataOffset)
Sections = property(_GetSections)
## Section() class
#
# A class for Section
#
class Section(Image):
_TypeName = {
0x00 : "<unknown>",
0x01 : "COMPRESSION",
0x02 : "GUID_DEFINED",
0x10 : "PE32",
0x11 : "PIC",
0x12 : "TE",
0x13 : "DXE_DEPEX",
0x14 : "VERSION",
0x15 : "USER_INTERFACE",
0x16 : "COMPATIBILITY16",
0x17 : "FIRMWARE_VOLUME_IMAGE",
0x18 : "FREEFORM_SUBTYPE_GUID",
0x19 : "RAW",
0x1B : "PEI_DEPEX"
}
_SectionSubImages = {
0x01 : CompressedImage,
0x02 : GuidDefinedImage,
0x17 : FirmwareVolume,
0x13 : Depex,
0x1B : Depex,
0x15 : Ui
}
# Size = 3-byte
# Type = 1-byte
_HEADER_ = struct.Struct("3B 1B")
_HEADER_SIZE_ = _HEADER_.size
# SubTypeGuid
# _FREE_FORM_SUBTYPE_GUID_HEADER_ = struct.Struct("1I2H8B")
_SIZE_ = struct.Struct("3B")
_TYPE_ = struct.Struct("3x 1B")
def __init__(self, Type=None, Size=None):
Image.__init__(self)
self._Alignment = 1
if Type is not None:
self.Type = Type
if Size is not None:
self.Size = Size
def __str__(self):
global gIndention
gIndention += 4
SectionInfo = ' ' * gIndention
if self.Type in self._TypeName:
SectionInfo += "[SECTION:%s] offset=%x size=%x" % (self._TypeName[self.Type], self._OFF_, self.Size)
else:
SectionInfo += "[SECTION:%x<unknown>] offset=%x size=%x " % (self.Type, self._OFF_, self.Size)
for Offset in self._SubImages.keys():
SectionInfo += ", " + str(self._SubImages[Offset])
gIndention -= 4
return SectionInfo
def _Unpack(self):
self.empty()
Type, = self._TYPE_.unpack_from(self._BUF_, self._OFF_)
Size1, Size2, Size3 = self._SIZE_.unpack_from(self._BUF_, self._OFF_)
Size = Size1 + (Size2 << 8) + (Size3 << 16)
if Type not in self._SectionSubImages:
# no need to extract sub-image, keep all in this Image object
self.extend(self._BUF_[self._OFF_ : self._OFF_ + Size])
else:
# keep header in this Image object
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._HEADER_SIZE_])
#
# use new Image object to represent payload, which may be another kind
# of image such as PE32
#
PayloadOffset = self._HEADER_SIZE_
PayloadLen = self.Size - self._HEADER_SIZE_
Payload = self._SectionSubImages[self.Type]()
Payload.frombuffer(self._BUF_, self._OFF_ + self._HEADER_SIZE_, PayloadLen)
self._SubImages[PayloadOffset] = Payload
return Size
def _SetSize(self, Size):
Size1 = Size & 0xFF
Size2 = (Size & 0xFF00) >> 8
Size3 = (Size & 0xFF0000) >> 16
self.SetField(self._SIZE_, 0, Size1, Size2, Size3)
def _GetSize(self):
Size1, Size2, Size3 = self.GetField(self._SIZE_)
return Size1 + (Size2 << 8) + (Size3 << 16)
def _SetType(self, Type):
self.SetField(self._TYPE_, 0, Type)
def _GetType(self):
return self.GetField(self._TYPE_)[0]
def _GetAlignment(self):
return self._Alignment
def _SetAlignment(self, Alignment):
self._Alignment = Alignment
AlignmentMask = Alignment - 1
# section alignment is actually for payload, so we need to add header size
PayloadOffset = self._OFF_ + self._HEADER_SIZE_
if (PayloadOffset & (~AlignmentMask)) == 0:
return
NewOffset = (PayloadOffset + AlignmentMask) & (~AlignmentMask)
while (NewOffset - PayloadOffset) < self._HEADER_SIZE_:
NewOffset += self._Alignment
def tofile(self, f):
self.Size = len(self)
Image.tofile(self, f)
for Offset in self._SubImages:
self._SubImages[Offset].tofile(f)
Type = property(_GetType, _SetType)
Size = property(_GetSize, _SetSize)
Alignment = property(_GetAlignment, _SetAlignment)
## Ffs() class
#
# A class for Ffs Section
#
class Ffs(Image):
_FfsFormat = "24B%(payload_size)sB"
# skip IntegrityCheck
_HEADER_ = struct.Struct("1I2H8B 2x 1B 1B 3B 1B")
_HEADER_SIZE_ = _HEADER_.size
_NAME_ = struct.Struct("1I2H8B")
_INT_CHECK_ = struct.Struct("16x 1H")
_TYPE_ = struct.Struct("18x 1B")
_ATTR_ = struct.Struct("19x 1B")
_SIZE_ = struct.Struct("20x 3B")
_STATE_ = struct.Struct("23x 1B")
FFS_ATTRIB_FIXED = 0x04
FFS_ATTRIB_DATA_ALIGNMENT = 0x38
FFS_ATTRIB_CHECKSUM = 0x40
_TypeName = {
0x00 : "<unknown>",
0x01 : "RAW",
0x02 : "FREEFORM",
0x03 : "SECURITY_CORE",
0x04 : "PEI_CORE",
0x05 : "DXE_CORE",
0x06 : "PEIM",
0x07 : "DRIVER",
0x08 : "COMBINED_PEIM_DRIVER",
0x09 : "APPLICATION",
0x0A : "SMM",
0x0B : "FIRMWARE_VOLUME_IMAGE",
0x0C : "COMBINED_SMM_DXE",
0x0D : "SMM_CORE",
0x0E : "MM_STANDALONE",
0x0F : "MM_CORE_STANDALONE",
0xc0 : "OEM_MIN",
0xdf : "OEM_MAX",
0xe0 : "DEBUG_MIN",
0xef : "DEBUG_MAX",
0xf0 : "FFS_MIN",
0xff : "FFS_MAX",
0xf0 : "FFS_PAD",
}
def __init__(self):
Image.__init__(self)
self.FreeSpace = 0
self.Sections = sdict()
self.Depex = ''
self.__ID__ = None
def __str__(self):
global gIndention
gIndention += 4
Indention = ' ' * gIndention
FfsInfo = Indention
FfsInfo += "[FFS:%s] offset=%x size=%x guid=%s free_space=%x alignment=%s\n" % \
(Ffs._TypeName[self.Type], self._OFF_, self.Size, self.Guid, self.FreeSpace, self.Alignment)
SectionInfo = '\n'.join([str(self.Sections[Offset]) for Offset in self.Sections.keys()])
gIndention -= 4
return FfsInfo + SectionInfo + "\n"
def __len__(self):
return self.Size
def __repr__(self):
return self.__ID__
def _Unpack(self):
Size1, Size2, Size3 = self._SIZE_.unpack_from(self._BUF_, self._OFF_)
Size = Size1 + (Size2 << 8) + (Size3 << 16)
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + Size])
# Pad FFS may use the same GUID. We need to avoid it.
if self.Type == 0xf0:
self.__ID__ = str(uuid.uuid1()).upper()
else:
self.__ID__ = self.Guid
# Traverse the SECTION. RAW and PAD do not have sections
if self.Type not in [0xf0, 0x01] and Size > 0 and Size < 0xFFFFFF:
EndOfFfs = Size
SectionStartAddress = self._HEADER_SIZE_
while SectionStartAddress < EndOfFfs:
SectionObj = Section()
SectionObj.frombuffer(self, SectionStartAddress)
#f = open(repr(SectionObj), 'wb')
#SectionObj.Size = 0
#SectionObj.tofile(f)
#f.close()
self.Sections[SectionStartAddress] = SectionObj
SectionStartAddress += len(SectionObj)
SectionStartAddress = (SectionStartAddress + 3) & (~3)
def Pack(self):
pass
def SetFreeSpace(self, Size):
self.FreeSpace = Size
def _GetGuid(self):
return gGuidStringFormat % self.Name
def _SetName(self, Value):
# Guid1, Guid2, Guid3, Guid4, Guid5, Guid6, Guid7, Guid8, Guid9, Guid10, Guid11
self.SetField(self._NAME_, 0, Value)
def _GetName(self):
# Guid1, Guid2, Guid3, Guid4, Guid5, Guid6, Guid7, Guid8, Guid9, Guid10, Guid11
return self.GetField(self._NAME_)
def _SetSize(self, Size):
Size1 = Size & 0xFF
Size2 = (Size & 0xFF00) >> 8
Size3 = (Size & 0xFF0000) >> 16
self.SetField(self._SIZE_, 0, Size1, Size2, Size3)
def _GetSize(self):
Size1, Size2, Size3 = self.GetField(self._SIZE_)
return Size1 + (Size2 << 8) + (Size3 << 16)
def _SetType(self, Type):
self.SetField(self._TYPE_, 0, Type)
def _GetType(self):
return self.GetField(self._TYPE_)[0]
def _SetAttributes(self, Value):
self.SetField(self._ATTR_, 0, Value)
def _GetAttributes(self):
return self.GetField(self._ATTR_)[0]
def _GetFixed(self):
if (self.Attributes & self.FFS_ATTRIB_FIXED) != 0:
return True
return False
def _GetCheckSum(self):
if (self.Attributes & self.FFS_ATTRIB_CHECKSUM) != 0:
return True
return False
def _GetAlignment(self):
return (self.Attributes & self.FFS_ATTRIB_DATA_ALIGNMENT) >> 3
def _SetState(self, Value):
self.SetField(self._STATE_, 0, Value)
def _GetState(self):
return self.GetField(self._STATE_)[0]
Name = property(_GetName, _SetName)
Guid = property(_GetGuid)
Type = property(_GetType, _SetType)
Size = property(_GetSize, _SetSize)
Attributes = property(_GetAttributes, _SetAttributes)
Fixed = property(_GetFixed)
Checksum = property(_GetCheckSum)
Alignment = property(_GetAlignment)
State = property(_GetState, _SetState)
## MultipleFv() class
#
# A class for Multiple FV
#
class MultipleFv(FirmwareVolume):
def __init__(self, FvList):
FirmwareVolume.__init__(self)
self.BasicInfo = []
for FvPath in FvList:
Fd = None
FvName = os.path.splitext(os.path.split(FvPath)[1])[0]
if FvPath.strip():
Fd = open(FvPath, 'rb')
Buf = array('B')
try:
Buf.fromfile(Fd, os.path.getsize(FvPath))
except EOFError:
pass
Fv = FirmwareVolume(FvName)
Fv.frombuffer(Buf, 0, len(Buf))
self.BasicInfo.append([Fv.Name, Fv.FileSystemGuid, Fv.Size])
self.FfsDict.update(Fv.FfsDict)
## Class Eot
#
# This class is used to define Eot main entrance
#
# @param object: Inherited from object class
#
class Eot(object):
## The constructor
#
# @param self: The object pointer
#
def __init__(self, CommandLineOption=True, IsInit=True, SourceFileList=None, \
IncludeDirList=None, DecFileList=None, GuidList=None, LogFile=None,
FvFileList="", MapFileList="", Report='Report.html', Dispatch=None):
# Version and Copyright
self.VersionNumber = ("0.02" + " " + gBUILD_VERSION)
self.Version = "%prog Version " + self.VersionNumber
self.Copyright = "Copyright (c) 2008 - 2018, Intel Corporation All rights reserved."
self.Report = Report
self.IsInit = IsInit
self.SourceFileList = SourceFileList
self.IncludeDirList = IncludeDirList
self.DecFileList = DecFileList
self.GuidList = GuidList
self.LogFile = LogFile
self.FvFileList = FvFileList
self.MapFileList = MapFileList
self.Dispatch = Dispatch
# Check workspace environment
if "EFI_SOURCE" not in os.environ:
if "EDK_SOURCE" not in os.environ:
pass
else:
EotGlobalData.gEDK_SOURCE = os.path.normpath(os.getenv("EDK_SOURCE"))
else:
EotGlobalData.gEFI_SOURCE = os.path.normpath(os.getenv("EFI_SOURCE"))
EotGlobalData.gEDK_SOURCE = os.path.join(EotGlobalData.gEFI_SOURCE, 'Edk')
if "WORKSPACE" not in os.environ:
EdkLogger.error("EOT", BuildToolError.ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
else:
EotGlobalData.gWORKSPACE = os.path.normpath(os.getenv("WORKSPACE"))
EotGlobalData.gMACRO['WORKSPACE'] = EotGlobalData.gWORKSPACE
EotGlobalData.gMACRO['EFI_SOURCE'] = EotGlobalData.gEFI_SOURCE
EotGlobalData.gMACRO['EDK_SOURCE'] = EotGlobalData.gEDK_SOURCE
# Parse the options and args
if CommandLineOption:
self.ParseOption()
if self.FvFileList:
for FvFile in GetSplitValueList(self.FvFileList, ' '):
FvFile = os.path.normpath(FvFile)
if not os.path.isfile(FvFile):
EdkLogger.error("Eot", EdkLogger.EOT_ERROR, "Can not find file %s " % FvFile)
EotGlobalData.gFV_FILE.append(FvFile)
else:
EdkLogger.error("Eot", EdkLogger.EOT_ERROR, "The fv file list of target platform was not specified")
if self.MapFileList:
for MapFile in GetSplitValueList(self.MapFileList, ' '):
MapFile = os.path.normpath(MapFile)
if not os.path.isfile(MapFile):
EdkLogger.error("Eot", EdkLogger.EOT_ERROR, "Can not find file %s " % MapFile)
EotGlobalData.gMAP_FILE.append(MapFile)
# Generate source file list
self.GenerateSourceFileList(self.SourceFileList, self.IncludeDirList)
# Generate guid list of dec file list
self.ParseDecFile(self.DecFileList)
# Generate guid list from GUID list file
self.ParseGuidList(self.GuidList)
# Init Eot database
EotGlobalData.gDb = Database.Database(Database.DATABASE_PATH)
EotGlobalData.gDb.InitDatabase(self.IsInit)
# Build ECC database
self.BuildDatabase()
# Parse Ppi/Protocol
self.ParseExecutionOrder()
# Merge Identifier tables
self.GenerateQueryTable()
# Generate report database
self.GenerateReportDatabase()
# Load Fv Info
self.LoadFvInfo()
# Load Map Info
self.LoadMapInfo()
# Generate Report
self.GenerateReport()
# Convert log file
self.ConvertLogFile(self.LogFile)
# DONE
EdkLogger.quiet("EOT FINISHED!")
# Close Database
EotGlobalData.gDb.Close()
## ParseDecFile() method
#
# parse DEC file and get all GUID names with GUID values as {GuidName : GuidValue}
# The Dict is stored in EotGlobalData.gGuidDict
#
# @param self: The object pointer
# @param DecFileList: A list of all DEC files
#
def ParseDecFile(self, DecFileList):
if DecFileList:
path = os.path.normpath(DecFileList)
lfr = open(path, 'rb')
for line in lfr:
path = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line.strip()))
if os.path.exists(path):
dfr = open(path, 'rb')
for line in dfr:
line = CleanString(line)
list = line.split('=')
if len(list) == 2:
EotGlobalData.gGuidDict[list[0].strip()] = GuidStructureStringToGuidString(list[1].strip())
## ParseGuidList() method
#
# Parse Guid list and get all GUID names with GUID values as {GuidName : GuidValue}
# The Dict is stored in EotGlobalData.gGuidDict
#
# @param self: The object pointer
# @param GuidList: A list of all GUID and its value
#
def ParseGuidList(self, GuidList):
Path = os.path.join(EotGlobalData.gWORKSPACE, GuidList)
if os.path.isfile(Path):
for Line in open(Path):
if Line.strip():
(GuidName, GuidValue) = Line.split()
EotGlobalData.gGuidDict[GuidName] = GuidValue
## ConvertLogFile() method
#
# Parse a real running log file to get real dispatch order
# The result is saved to old file name + '.new'
#
# @param self: The object pointer
# @param LogFile: A real running log file name
#
def ConvertLogFile(self, LogFile):
newline = []
lfr = None
lfw = None
if LogFile:
lfr = open(LogFile, 'rb')
lfw = open(LogFile + '.new', 'wb')
for line in lfr:
line = line.strip()
line = line.replace('.efi', '')
index = line.find("Loading PEIM at ")
if index > -1:
newline.append(line[index + 55 : ])
continue
index = line.find("Loading driver at ")
if index > -1:
newline.append(line[index + 57 : ])
continue
for line in newline:
lfw.write(line + '\r\n')
if lfr:
lfr.close()
if lfw:
lfw.close()
## GenerateSourceFileList() method
#
# Generate a list of all source files
# 1. Search the file list one by one
# 2. Store inf file name with source file names under it like
# { INF file name: [source file1, source file2, ...]}
# 3. Search the include list to find all .h files
# 4. Store source file list to EotGlobalData.gSOURCE_FILES
# 5. Store INF file list to EotGlobalData.gINF_FILES
#
# @param self: The object pointer
# @param SourceFileList: A list of all source files
# @param IncludeFileList: A list of all include files
#
def GenerateSourceFileList(self, SourceFileList, IncludeFileList):
EdkLogger.quiet("Generating source files list ... ")
mSourceFileList = []
mInfFileList = []
mDecFileList = []
mFileList = {}
mCurrentInfFile = ''
mCurrentSourceFileList = []
if SourceFileList:
sfl = open(SourceFileList, 'r')
for line in sfl:
line = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line.strip()))
if line[-2:].upper() == '.C' or line[-2:].upper() == '.H':
if line not in mCurrentSourceFileList:
mCurrentSourceFileList.append(line)
mSourceFileList.append(line)
EotGlobalData.gOP_SOURCE_FILES.write('%s\n' % line)
if line[-4:].upper() == '.INF':
if mCurrentInfFile != '':
mFileList[mCurrentInfFile] = mCurrentSourceFileList
mCurrentSourceFileList = []
mCurrentInfFile = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line))
EotGlobalData.gOP_INF.write('%s\n' % mCurrentInfFile)
if mCurrentInfFile not in mFileList:
mFileList[mCurrentInfFile] = mCurrentSourceFileList
# Get all include files from packages
if IncludeFileList:
ifl = open(IncludeFileList, 'rb')
for line in ifl:
if not line.strip():
continue
newline = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line.strip()))
for Root, Dirs, Files in os.walk(str(newline)):
for File in Files:
FullPath = os.path.normpath(os.path.join(Root, File))
if FullPath not in mSourceFileList and File[-2:].upper() == '.H':
mSourceFileList.append(FullPath)
EotGlobalData.gOP_SOURCE_FILES.write('%s\n' % FullPath)
if FullPath not in mDecFileList and File.upper().find('.DEC') > -1:
mDecFileList.append(FullPath)
EotGlobalData.gSOURCE_FILES = mSourceFileList
EotGlobalData.gOP_SOURCE_FILES.close()
EotGlobalData.gINF_FILES = mFileList
EotGlobalData.gOP_INF.close()
## GenerateReport() method
#
# Generate final HTML report
#
# @param self: The object pointer
#
def GenerateReport(self):
EdkLogger.quiet("Generating report file ... ")
Rep = Report(self.Report, EotGlobalData.gFV, self.Dispatch)
Rep.GenerateReport()
## LoadMapInfo() method
#
# Load map files and parse them
#
# @param self: The object pointer
#
def LoadMapInfo(self):
if EotGlobalData.gMAP_FILE != []:
EdkLogger.quiet("Parsing Map file ... ")
EotGlobalData.gMap = ParseMapFile(EotGlobalData.gMAP_FILE)
## LoadFvInfo() method
#
# Load FV binary files and parse them
#
# @param self: The object pointer
#
def LoadFvInfo(self):
EdkLogger.quiet("Parsing FV file ... ")
EotGlobalData.gFV = MultipleFv(EotGlobalData.gFV_FILE)
EotGlobalData.gFV.Dispatch(EotGlobalData.gDb)
for Protocol in EotGlobalData.gProtocolList:
EotGlobalData.gOP_UN_MATCHED_IN_LIBRARY_CALLING.write('%s\n' %Protocol)
## GenerateReportDatabase() method
#
# Generate data for the information needed by report
# 1. Update name, macro and value of all found PPI/PROTOCOL GUID
# 2. Install hard coded PPI/PROTOCOL
#
# @param self: The object pointer
#
def GenerateReportDatabase(self):
EdkLogger.quiet("Generating the cross-reference table of GUID for Ppi/Protocol ... ")
# Update Protocol/Ppi Guid
SqlCommand = """select DISTINCT GuidName from Report"""
RecordSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
for Record in RecordSet:
GuidName = Record[0]
GuidMacro = ''
GuidMacro2 = ''
GuidValue = ''
# Find guid value defined in Dec file
if GuidName in EotGlobalData.gGuidDict:
GuidValue = EotGlobalData.gGuidDict[GuidName]
SqlCommand = """update Report set GuidMacro = '%s', GuidValue = '%s' where GuidName = '%s'""" %(GuidMacro, GuidValue, GuidName)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
continue
# Search defined Macros for guid name
SqlCommand ="""select DISTINCT Value, Modifier from Query where Name like '%s'""" % GuidName
GuidMacroSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
# Ignore NULL result
if not GuidMacroSet:
continue
GuidMacro = GuidMacroSet[0][0].strip()
if not GuidMacro:
continue
# Find Guid value of Guid Macro
SqlCommand ="""select DISTINCT Value from Query2 where Value like '%%%s%%' and Model = %s""" % (GuidMacro, MODEL_IDENTIFIER_MACRO_DEFINE)
GuidValueSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
if GuidValueSet != []:
GuidValue = GuidValueSet[0][0]
GuidValue = GuidValue[GuidValue.find(GuidMacro) + len(GuidMacro) :]
GuidValue = GuidValue.lower().replace('\\', '').replace('\r', '').replace('\n', '').replace('l', '').strip()
GuidValue = GuidStructureStringToGuidString(GuidValue)
SqlCommand = """update Report set GuidMacro = '%s', GuidValue = '%s' where GuidName = '%s'""" %(GuidMacro, GuidValue, GuidName)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
continue
# Update Hard Coded Ppi/Protocol
SqlCommand = """select DISTINCT GuidValue, ItemType from Report where ModuleID = -2 and ItemMode = 'Produced'"""
RecordSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
for Record in RecordSet:
if Record[1] == 'Ppi':
EotGlobalData.gPpiList[Record[0].lower()] = -2
if Record[1] == 'Protocol':
EotGlobalData.gProtocolList[Record[0].lower()] = -2
## GenerateQueryTable() method
#
# Generate two tables improve query performance
#
# @param self: The object pointer
#
def GenerateQueryTable(self):
EdkLogger.quiet("Generating temp query table for analysis ... ")
for Identifier in EotGlobalData.gIdentifierTableList:
SqlCommand = """insert into Query (Name, Modifier, Value, Model)
select Name, Modifier, Value, Model from %s where (Model = %s or Model = %s)""" \
% (Identifier[0], MODEL_IDENTIFIER_VARIABLE, MODEL_IDENTIFIER_ASSIGNMENT_EXPRESSION)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
SqlCommand = """insert into Query2 (Name, Modifier, Value, Model)
select Name, Modifier, Value, Model from %s where Model = %s""" \
% (Identifier[0], MODEL_IDENTIFIER_MACRO_DEFINE)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
## ParseExecutionOrder() method
#
# Get final execution order
# 1. Search all PPI
# 2. Search all PROTOCOL
#
# @param self: The object pointer
#
def ParseExecutionOrder(self):
EdkLogger.quiet("Searching Ppi/Protocol ... ")
for Identifier in EotGlobalData.gIdentifierTableList:
ModuleID, ModuleName, ModuleGuid, SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, Enabled = \
-1, '', '', -1, '', '', '', '', '', '', '', '', 0
SourceFileID = Identifier[0].replace('Identifier', '')
SourceFileFullPath = Identifier[1]
Identifier = Identifier[0]
# Find Ppis
ItemMode = 'Produced'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.InstallPpi', '->InstallPpi', 'PeiInstallPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode)
ItemMode = 'Produced'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.ReInstallPpi', '->ReInstallPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 2)
SearchPpiCallFunction(Identifier, SourceFileID, SourceFileFullPath, ItemMode)
ItemMode = 'Consumed'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.LocatePpi', '->LocatePpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Ppi', ItemMode)
ItemMode = 'Callback'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.NotifyPpi', '->NotifyPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode)
# Find Protocols
ItemMode = 'Produced'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%' or Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.InstallProtocolInterface', '.ReInstallProtocolInterface', '->InstallProtocolInterface', '->ReInstallProtocolInterface', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 1)
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.InstallMultipleProtocolInterfaces', '->InstallMultipleProtocolInterfaces', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 2)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Protocol', ItemMode)
ItemMode = 'Consumed'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.LocateProtocol', '->LocateProtocol', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 0)
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.HandleProtocol', '->HandleProtocol', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 1)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Protocol', ItemMode)
ItemMode = 'Callback'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.RegisterProtocolNotify', '->RegisterProtocolNotify', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 0)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Protocol', ItemMode)
# Hard Code
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gEfiSecPlatformInformationPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gEfiNtLoadAsDllPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gNtPeiLoadFileGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiNtAutoScanPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gNtFwhPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiNtThunkPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiPlatformTypePpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiFrequencySelectionCpuPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiCachePpiGuid', '', '', '', 0)
EotGlobalData.gDb.Conn.commit()
## BuildDatabase() methoc
#
# Build the database for target
#
# @param self: The object pointer
#
def BuildDatabase(self):
# Clean report table
EotGlobalData.gDb.TblReport.Drop()
EotGlobalData.gDb.TblReport.Create()
# Build database
if self.IsInit:
self.BuildMetaDataFileDatabase(EotGlobalData.gINF_FILES)
EdkLogger.quiet("Building database for source code ...")
c.CreateCCodeDB(EotGlobalData.gSOURCE_FILES)
EdkLogger.quiet("Building database for source code done!")
EotGlobalData.gIdentifierTableList = GetTableList((MODEL_FILE_C, MODEL_FILE_H), 'Identifier', EotGlobalData.gDb)
## BuildMetaDataFileDatabase() method
#
# Build the database for meta data files
#
# @param self: The object pointer
# @param Inf_Files: A list for all INF files
#
def BuildMetaDataFileDatabase(self, Inf_Files):
EdkLogger.quiet("Building database for meta data files ...")
for InfFile in Inf_Files:
if not InfFile:
continue
EdkLogger.quiet("Parsing %s ..." % str(InfFile))
EdkInfParser(InfFile, EotGlobalData.gDb, Inf_Files[InfFile])
EotGlobalData.gDb.Conn.commit()
EdkLogger.quiet("Building database for meta data files done!")
## ParseOption() method
#
# Parse command line options
#
# @param self: The object pointer
#
def ParseOption(self):
(Options, Target) = self.EotOptionParser()
# Set log level
self.SetLogLevel(Options)
if Options.FvFileList:
self.FvFileList = Options.FvFileList
if Options.MapFileList:
self.MapFileList = Options.FvMapFileList
if Options.SourceFileList:
self.SourceFileList = Options.SourceFileList
if Options.IncludeDirList:
self.IncludeDirList = Options.IncludeDirList
if Options.DecFileList:
self.DecFileList = Options.DecFileList
if Options.GuidList:
self.GuidList = Options.GuidList
if Options.LogFile:
self.LogFile = Options.LogFile
if Options.keepdatabase:
self.IsInit = False
## SetLogLevel() method
#
# Set current log level of the tool based on args
#
# @param self: The object pointer
# @param Option: The option list including log level setting
#
def SetLogLevel(self, Option):
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
## EotOptionParser() method
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @param self: The object pointer
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def EotOptionParser(self):
Parser = OptionParser(description = self.Copyright, version = self.Version, prog = "Eot.exe", usage = "%prog [options]")
Parser.add_option("-m", "--makefile filename", action="store", type="string", dest='MakeFile',
help="Specify a makefile for the platform.")
Parser.add_option("-c", "--dsc filename", action="store", type="string", dest="DscFile",
help="Specify a dsc file for the platform.")
Parser.add_option("-f", "--fv filename", action="store", type="string", dest="FvFileList",
help="Specify fv file list, quoted by \"\".")
Parser.add_option("-a", "--map filename", action="store", type="string", dest="MapFileList",
help="Specify map file list, quoted by \"\".")
Parser.add_option("-s", "--source files", action="store", type="string", dest="SourceFileList",
help="Specify source file list by a file")
Parser.add_option("-i", "--include dirs", action="store", type="string", dest="IncludeDirList",
help="Specify include dir list by a file")
Parser.add_option("-e", "--dec files", action="store", type="string", dest="DecFileList",
help="Specify dec file list by a file")
Parser.add_option("-g", "--guid list", action="store", type="string", dest="GuidList",
help="Specify guid file list by a file")
Parser.add_option("-l", "--log filename", action="store", type="string", dest="LogFile",
help="Specify real execution log file")
Parser.add_option("-k", "--keepdatabase", action="store_true", type=None, help="The existing Eot database will not be cleaned except report information if this option is specified.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
(Opt, Args)=Parser.parse_args()
return (Opt, Args)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
# Initialize log system
EdkLogger.Initialize()
EdkLogger.IsRaiseError = False
EdkLogger.quiet(time.strftime("%H:%M:%S, %b.%d %Y ", time.localtime()) + "[00:00]" + "\n")
StartTime = time.clock()
Eot = Eot(CommandLineOption=False,
SourceFileList=r'C:\TestEot\Source.txt',
GuidList=r'C:\TestEot\Guid.txt',
FvFileList=r'C:\TestEot\FVRECOVERY.Fv')
FinishTime = time.clock()
BuildDuration = time.strftime("%M:%S", time.gmtime(int(round(FinishTime - StartTime))))
EdkLogger.quiet("\n%s [%s]" % (time.strftime("%H:%M:%S, %b.%d %Y", time.localtime()), BuildDuration))
|
[] |
[] |
[
"EDK_SOURCE",
"WORKSPACE",
"EFI_SOURCE"
] |
[]
|
["EDK_SOURCE", "WORKSPACE", "EFI_SOURCE"]
|
python
| 3 | 0 | |
feature_engineering/exp2.py
|
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
pd.set_option("display.max_columns", 500)
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
from sklearn.preprocessing import LabelEncoder
import xgboost as xgb
from xgboost import XGBClassifier
from sklearn.model_selection import TimeSeriesSplit, cross_val_score
from sklearn.metrics import roc_auc_score
from hyperopt import fmin, hp, tpe, space_eval
from sklearn.model_selection import KFold, TimeSeriesSplit
import lightgbm as lgb
from time import time
from tqdm import tqdm_notebook
from xgboost import XGBClassifier
import os
from sklearn.model_selection import KFold
import gc
import warnings
warnings.filterwarnings('ignore')
# 迭代次数传参
# python label_lgb2.py 1.2
import argparse
ap = argparse.ArgumentParser(description='label_lgb2.py')
ap.add_argument('size', nargs='*', action="store", default=-1, type=int)
pa = ap.parse_args()
size = pa.size[0]
# # 导入数据
# 如果设置为-1,使用全量数据,否则使用size大小的数据
if size == -1:
NROWS = None
else:
NROWS = size
print("NROWS: ", NROWS)
# 使用原始数据
train = pd.read_csv('../temp/train_label.csv', nrows=NROWS)
# 使用增加样本后的数据
# train = pd.read_csv('../temp/train_label_50.csv', nrows=NROWS)
test = pd.read_csv('../temp/test_label.csv', nrows=NROWS)
test = test.drop('isFraud', axis=1)
sub = pd.read_csv('../temp/sample_submission_label.csv', nrows=NROWS)
print("train.shape:", train.shape)
print("test.shape:", test.shape)
# train.head(3)
target = "isFraud"
# ## 内存优化
# In[4]:
def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[: 3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
end_mem = df.memory_usage().sum() / 1024 ** 2
print('Memory usage after optimization is: {:.2f} MB, {:.1f}% reduction'.format(end_mem, 100 * (
start_mem - end_mem) / start_mem))
return df
# In[5]:
train = reduce_mem_usage(train)
test = reduce_mem_usage(test)
# # 特征工程
print("feature engineer")
# ## 特征分类: 按特征名分类
# ### 缺失值的数量
# shift特征
train['card_addr1_P_emaildomain'] = train["card1"].apply(lambda x: str(x)) + "_" + train["card2"].apply(
lambda x: str(x)) + \
"_" + train["card3"].apply(lambda x: str(x)) + "_" + train["card4"].apply(
lambda x: str(x)) + \
"_" + train["card5"].apply(lambda x: str(x)) + "_" + train["card6"].apply(
lambda x: str(x)) + \
"_" + train["addr1"].apply(lambda x: str(x)) + "_" + train["P_emaildomain"].apply(
lambda x: str(x))
test['card_addr1_P_emaildomain'] = test["card1"].apply(lambda x: str(x)) + "_" + test["card2"].apply(lambda x: str(x)) + \
"_" + test["card3"].apply(lambda x: str(x)) + "_" + test["card4"].apply(
lambda x: str(x)) + \
"_" + test["card5"].apply(lambda x: str(x)) + "_" + test["card6"].apply(
lambda x: str(x)) + \
"_" + test["addr1"].apply(lambda x: str(x)) + "_" + test["P_emaildomain"].apply(
lambda x: str(x))
shift_feature = []
for i in range(1, 51):
train["card_addr1_P_emaildomain_" + str(i) + "before"] = train["card_addr1_P_emaildomain"].shift(i)
test["card_addr1_P_emaildomain_" + str(i) + "before"] = test["card_addr1_P_emaildomain"].shift(i)
shift_feature.append("card_addr1_P_emaildomain_" + str(i) + "before")
for i in range(-1, -51, -1):
train["card_addr1_P_emaildomain_" + str(-i) + "after"] = train["card_addr1_P_emaildomain"].shift(i)
test["card_addr1_P_emaildomain_" + str(-i) + "after"] = test["card_addr1_P_emaildomain"].shift(i)
shift_feature.append("card_addr1_P_emaildomain_" + str(-i) + "after")
def cur_in_window(x):
cur = x[0]
window = x[1:]
cnt = 0
for item in window:
if cur == item:
cnt += 1
return cnt
train["shift_100_cnt"] = train[["card_addr1_P_emaildomain"] + shift_feature].apply(lambda x: cur_in_window(x), axis=1)
test["shift_100_cnt"] = test[["card_addr1_P_emaildomain"] + shift_feature].apply(lambda x: cur_in_window(x), axis=1)
train = train.drop(["card_addr1_P_emaildomain"] + shift_feature, axis=1)
test = test.drop(["card_addr1_P_emaildomain"] + shift_feature, axis=1)
# shift特征end
train['null'] = train.isna().sum(axis=1)
test['null'] = test.isna().sum(axis=1)
# # ### target_encoder
#
# print("target_encoder")
# np.random.seed(13)
#
# def impact_coding(data, feature, target='y'):
# '''
# In this implementation we get the values and the dictionary as two different steps.
# This is just because initially we were ignoring the dictionary as a result variable.
#
# In this implementation the KFolds use shuffling. If you want reproducibility the cv
# could be moved to a parameter.
# '''
# n_folds = 5 #20
# n_inner_folds = 5 #10
# impact_coded = pd.Series()
#
# oof_default_mean = data[target].mean() # Gobal mean to use by default (you could further tune this)
# kf = KFold(n_splits=n_folds, shuffle=True)
# oof_mean_cv = pd.DataFrame()
# split = 0
# for infold, oof in kf.split(data[feature]):
# impact_coded_cv = pd.Series()
# kf_inner = KFold(n_splits=n_inner_folds, shuffle=True)
# inner_split = 0
# inner_oof_mean_cv = pd.DataFrame()
# oof_default_inner_mean = data.iloc[infold][target].mean()
# for infold_inner, oof_inner in kf_inner.split(data.iloc[infold]):
# # The mean to apply to the inner oof split (a 1/n_folds % based on the rest)
# oof_mean = data.iloc[infold_inner].groupby(by=feature)[target].mean()
# impact_coded_cv = impact_coded_cv.append(data.iloc[infold].apply(
# lambda x: oof_mean[x[feature]]
# if x[feature] in oof_mean.index else oof_default_inner_mean, axis=1))
#
# # Also populate mapping (this has all group -> mean for all inner CV folds)
# inner_oof_mean_cv = inner_oof_mean_cv.join(pd.DataFrame(oof_mean), rsuffix=inner_split, how='outer')
# inner_oof_mean_cv.fillna(value=oof_default_inner_mean, inplace=True)
# inner_split += 1
#
# # Also populate mapping
# oof_mean_cv = oof_mean_cv.join(pd.DataFrame(inner_oof_mean_cv), rsuffix=split, how='outer')
# oof_mean_cv.fillna(value=oof_default_mean, inplace=True)
# split += 1
#
# impact_coded = impact_coded.append(data.iloc[oof].apply(
# lambda x: inner_oof_mean_cv.loc[x[feature]].mean()
# if x[feature] in inner_oof_mean_cv.index else oof_default_mean, axis=1))
#
# return impact_coded, oof_mean_cv.mean(axis=1), oof_default_mean
#
#
# # In[8]:
#
#
# # train.card5.nunique()
#
#
# # In[9]:
#
#
# target = "isFraud"
#
#
# # In[10]:
# # Apply the encoding to training and test data, and preserve the mapping
# categorical_features = ["card2", "card5"]
# impact_coding_map = {}
# for f in categorical_features:
# print("Impact coding for {}".format(f))
# train["impact_encoded_{}".format(f)], impact_coding_mapping, default_coding = impact_coding(train, f, target)
# impact_coding_map[f] = (impact_coding_mapping, default_coding)
# mapping, default_mean = impact_coding_map[f]
# test["impact_encoded_{}".format(f)] = test.apply(lambda x: mapping[x[f]]
# if x[f] in mapping else default_mean, axis=1)
#
# # ### target_encoder end
# ### targetEncoder保存中间变量
# ### 从targetEncoder运行
# ### 时间相关特征(TransactionDT)
# In[6]:
def transform_TransactionDT(df):
START_DATE = '2017-12-01'
start_date = datetime.datetime.strptime(START_DATE, '%Y-%m-%d')
df['Date'] = df['TransactionDT'].apply(lambda x: (start_date + datetime.timedelta(seconds=x)))
df['Weekday'] = df['Date'].dt.dayofweek
df['Hour'] = df['Date'].dt.hour
df['Day'] = df['Date'].dt.day
df['Morning'] = (df['Hour'] >= 7) & (df['Hour'] <= 11).astype('int')
df['Noon'] = (df['Hour'] >= 12) & (df['Hour'] <= 18).astype('int')
df['Evening'] = (df['Hour'] >= 19) & (df['Hour'] <= 23).astype('int')
df['Midnight'] = (df['Hour'] >= 0) & (df['Hour'] <= 6).astype('int')
del df['Date']
return df
# In[7]:
train = transform_TransactionDT(train)
test = transform_TransactionDT(test)
# ### 金额(TransactionAmt)
# In[8]:
train['TransactionAmt'] = train['TransactionAmt'].astype(float)
train['TransAmtLog'] = np.log(train['TransactionAmt'])
train['TransAmtDemical'] = train['TransactionAmt'].astype('str').str.split('.', expand=True)[1].str.len()
test['TransactionAmt'] = test['TransactionAmt'].astype(float)
test['TransAmtLog'] = np.log(test['TransactionAmt'])
test['TransAmtDemical'] = test['TransactionAmt'].astype('str').str.split('.', expand=True)[1].str.len()
# ### 金额(TransactionAmt)是否整除的特征
# In[9]:
def mod_m(x, m):
if x % m == 0:
return 1
else:
return 0
train['TransactionAmt_mod_1'] = train['TransactionAmt'].apply(lambda x: mod_m(x, 1))
train['TransactionAmt_mod_10'] = train['TransactionAmt'].apply(lambda x: mod_m(x, 10))
train['TransactionAmt_mod_50'] = train['TransactionAmt'].apply(lambda x: mod_m(x, 50))
train['TransactionAmt_mod_100'] = train['TransactionAmt'].apply(lambda x: mod_m(x, 100))
test['TransactionAmt_mod_1'] = test['TransactionAmt'].apply(lambda x: mod_m(x, 1))
test['TransactionAmt_mod_10'] = test['TransactionAmt'].apply(lambda x: mod_m(x, 10))
test['TransactionAmt_mod_50'] = test['TransactionAmt'].apply(lambda x: mod_m(x, 50))
test['TransactionAmt_mod_100'] = test['TransactionAmt'].apply(lambda x: mod_m(x, 100))
# ### ProductCD
# ### card特征提取
def get_sub(x, idx):
try:
return str(x)[idx]
except:
return "-1"
for idx in [-1, -2, -3, -4, -5]:
train["card1" + "_sub_" + str(idx)] = train["card1"].apply(lambda x: get_sub(x, idx))
test["card1" + "_sub_" + str(idx)] = test["card1"].apply(lambda x: get_sub(x, idx))
# target encoding会导致过拟合
# feature = 'card1'
# temp = train.groupby([feature])[target].sum().reset_index()
# temp.index = temp[feature]
# temp = temp.drop(feature, axis=1)
# faeture_map = temp.to_dict()[target]
# train[feature + "_target_cnt"] = train[feature].map(faeture_map)
# test[feature + "_target_cnt"] = test[feature].map(faeture_map)
# In[12]:
# card1是类别型特征,对card1的字段进行提取
train["card1_len"] = train["card1"].apply(lambda x: len(str(x)))
test["card1_len"] = test["card1"].apply(lambda x: len(str(x)))
train["card1_first"] = train["card1"].apply(lambda x: str(x)[0])
test["card1_first"] = test["card1"].apply(lambda x: str(x)[0])
# card2特征提取
# train["card2_first"] = train["card2"].apply(lambda x: str(x)[0])
# test["card2_first"] = test["card2"].apply(lambda x: str(x)[0])
# train["card2_second"] = train["card2"].apply(lambda x: str(x)[1])
# test["card2_second"] = test["card2"].apply(lambda x: str(x)[1])
# train["card2_last"] = train["card2"].apply(lambda x: str(x)[2])
# test["card2_last"] = test["card2"].apply(lambda x: str(x)[2])
# In[13]:
# 是否缺失的标记
train["card1_na"] = 0
train.loc[train["card1"].isna(), "card1_na"] = 1
test["card1_na"] = 0
test.loc[test["card1"].isna(), "card1_na"] = 1
train["card2_na"] = 0
train.loc[train["card2"].isna(), "card2_na"] = 1
test["card2_na"] = 0
test.loc[test["card2"].isna(), "card2_na"] = 1
# train["card3_na"] = 0
# train.loc[train["card3"].isna(), "card3_na"] = 1
# test["card3_na"] = 0
# test.loc[test["card3"].isna(), "card3_na"] = 1
# train["card4_na"] = 0
# train.loc[train["card4"].isna(), "card4_na"] = 1
# test["card4_na"] = 0
# test.loc[test["card4"].isna(), "card4_na"] = 1
train["card5_na"] = 0
train.loc[train["card5"].isna(), "card5_na"] = 1
test["card5_na"] = 0
test.loc[test["card5"].isna(), "card5_na"] = 1
# train["card6_na"] = 0
# train.loc[train["card6"].isna(), "card6_na"] = 1
# test["card6_na"] = 0
# test.loc[test["card6"].isna(), "card6_na"] = 1
# In[14]:
# card字段拼接的统计
train['card_str'] = train["card1"].apply(lambda x: str(x)) + "_" + train["card2"].apply(lambda x: str(x)) + "_" + train[
"card3"].apply(lambda x: str(x)) + "_" + train["card4"].apply(lambda x: str(x)) + "_" + train["card5"].apply(
lambda x: str(x)) + "_" + train["card6"].apply(lambda x: str(x))
test['card_str'] = test["card1"].apply(lambda x: str(x)) + "_" + test["card2"].apply(lambda x: str(x)) + "_" + test[
"card3"].apply(lambda x: str(x)) + "_" + test["card4"].apply(lambda x: str(x)) + "_" + test["card5"].apply(
lambda x: str(x)) + "_" + test["card6"].apply(lambda x: str(x))
train['card_count_full'] = train['card_str'].map(
pd.concat([train['card_str'], test['card_str']], ignore_index=True).value_counts(dropna=False))
test['card_count_full'] = test['card_str'].map(
pd.concat([test['card_str'], test['card_str']], ignore_index=True).value_counts(dropna=False))
# In[15]:
train['TransactionAmt_to_std_card_str'] = train['TransactionAmt'] / train.groupby(['card_str'])[
'TransactionAmt'].transform('std')
test['TransactionAmt_to_std_card_str'] = test['TransactionAmt'] / test.groupby(['card_str'])[
'TransactionAmt'].transform('std')
train['TransactionAmt_to_mean_card_str'] = train['TransactionAmt'] / train.groupby(['card_str'])[
'TransactionAmt'].transform('mean')
test['TransactionAmt_to_mean_card_str'] = test['TransactionAmt'] / test.groupby(['card_str'])[
'TransactionAmt'].transform('mean')
train['TransactionAmt_to_sum_card_str'] = train['TransactionAmt'] / train.groupby(['card_str'])[
'TransactionAmt'].transform('sum')
test['TransactionAmt_to_sum_card_str'] = test['TransactionAmt'] / test.groupby(['card_str'])[
'TransactionAmt'].transform('sum')
# In[16]:
train['card1_count_full'] = train['card1'].map(
pd.concat([train['card1'], test['card1']], ignore_index=True).value_counts(dropna=False))
test['card1_count_full'] = test['card1'].map(
pd.concat([train['card1'], test['card1']], ignore_index=True).value_counts(dropna=False))
train['card2_count_full'] = train['card2'].map(
pd.concat([train['card2'], test['card2']], ignore_index=True).value_counts(dropna=False))
test['card2_count_full'] = test['card2'].map(
pd.concat([train['card2'], test['card2']], ignore_index=True).value_counts(dropna=False))
train['card3_count_full'] = train['card3'].map(
pd.concat([train['card3'], test['card3']], ignore_index=True).value_counts(dropna=False))
test['card3_count_full'] = test['card3'].map(
pd.concat([train['card3'], test['card3']], ignore_index=True).value_counts(dropna=False))
train['card4_count_full'] = train['card4'].map(
pd.concat([train['card4'], test['card4']], ignore_index=True).value_counts(dropna=False))
test['card4_count_full'] = test['card4'].map(
pd.concat([train['card4'], test['card4']], ignore_index=True).value_counts(dropna=False))
train['card5_count_full'] = train['card5'].map(
pd.concat([train['card5'], test['card5']], ignore_index=True).value_counts(dropna=False))
test['card5_count_full'] = test['card5'].map(
pd.concat([train['card5'], test['card5']], ignore_index=True).value_counts(dropna=False))
train['card6_count_full'] = train['card6'].map(
pd.concat([train['card6'], test['card6']], ignore_index=True).value_counts(dropna=False))
test['card6_count_full'] = test['card6'].map(
pd.concat([train['card6'], test['card6']], ignore_index=True).value_counts(dropna=False))
# In[17]:
train['TransactionAmt_to_mean_card1'] = train['TransactionAmt'] / train.groupby(['card1'])['TransactionAmt'].transform(
'mean')
train['TransactionAmt_to_mean_card2'] = train['TransactionAmt'] / train.groupby(['card2'])['TransactionAmt'].transform(
'mean')
test['TransactionAmt_to_mean_card1'] = test['TransactionAmt'] / test.groupby(['card1'])['TransactionAmt'].transform(
'mean')
test['TransactionAmt_to_mean_card2'] = test['TransactionAmt'] / test.groupby(['card2'])['TransactionAmt'].transform(
'mean')
train['TransactionAmt_to_mean_card3'] = train['TransactionAmt'] / train.groupby(['card3'])['TransactionAmt'].transform(
'mean')
train['TransactionAmt_to_mean_card4'] = train['TransactionAmt'] / train.groupby(['card4'])['TransactionAmt'].transform(
'mean')
test['TransactionAmt_to_mean_card3'] = test['TransactionAmt'] / test.groupby(['card3'])['TransactionAmt'].transform(
'mean')
test['TransactionAmt_to_mean_card4'] = test['TransactionAmt'] / test.groupby(['card4'])['TransactionAmt'].transform(
'mean')
train['TransactionAmt_to_mean_card5'] = train['TransactionAmt'] / train.groupby(['card5'])['TransactionAmt'].transform(
'mean')
train['TransactionAmt_to_mean_card6'] = train['TransactionAmt'] / train.groupby(['card6'])['TransactionAmt'].transform(
'mean')
test['TransactionAmt_to_mean_card5'] = test['TransactionAmt'] / test.groupby(['card5'])['TransactionAmt'].transform(
'mean')
test['TransactionAmt_to_mean_card6'] = test['TransactionAmt'] / test.groupby(['card6'])['TransactionAmt'].transform(
'mean')
# In[18]:
train['TransactionAmt_to_std_card1'] = train['TransactionAmt'] / train.groupby(['card1'])['TransactionAmt'].transform(
'std')
train['TransactionAmt_to_std_card2'] = train['TransactionAmt'] / train.groupby(['card2'])['TransactionAmt'].transform(
'std')
test['TransactionAmt_to_std_card1'] = test['TransactionAmt'] / test.groupby(['card1'])['TransactionAmt'].transform(
'std')
test['TransactionAmt_to_std_card2'] = test['TransactionAmt'] / test.groupby(['card2'])['TransactionAmt'].transform(
'std')
train['TransactionAmt_to_std_card3'] = train['TransactionAmt'] / train.groupby(['card3'])['TransactionAmt'].transform(
'std')
train['TransactionAmt_to_std_card4'] = train['TransactionAmt'] / train.groupby(['card4'])['TransactionAmt'].transform(
'std')
test['TransactionAmt_to_std_card3'] = test['TransactionAmt'] / test.groupby(['card3'])['TransactionAmt'].transform(
'std')
test['TransactionAmt_to_std_card4'] = test['TransactionAmt'] / test.groupby(['card4'])['TransactionAmt'].transform(
'std')
train['TransactionAmt_to_std_card5'] = train['TransactionAmt'] / train.groupby(['card5'])['TransactionAmt'].transform(
'std')
train['TransactionAmt_to_std_card6'] = train['TransactionAmt'] / train.groupby(['card6'])['TransactionAmt'].transform(
'std')
test['TransactionAmt_to_std_card5'] = test['TransactionAmt'] / test.groupby(['card5'])['TransactionAmt'].transform(
'std')
test['TransactionAmt_to_std_card6'] = test['TransactionAmt'] / test.groupby(['card6'])['TransactionAmt'].transform(
'std')
# In[19]:
train['TransactionAmt_to_sum_card1'] = train['TransactionAmt'] / train.groupby(['card1'])['TransactionAmt'].transform(
'sum')
train['TransactionAmt_to_sum_card2'] = train['TransactionAmt'] / train.groupby(['card2'])['TransactionAmt'].transform(
'sum')
test['TransactionAmt_to_sum_card1'] = test['TransactionAmt'] / test.groupby(['card1'])['TransactionAmt'].transform(
'sum')
test['TransactionAmt_to_sum_card2'] = test['TransactionAmt'] / test.groupby(['card2'])['TransactionAmt'].transform(
'sum')
train['TransactionAmt_to_sum_card3'] = train['TransactionAmt'] / train.groupby(['card3'])['TransactionAmt'].transform(
'sum')
train['TransactionAmt_to_sum_card4'] = train['TransactionAmt'] / train.groupby(['card4'])['TransactionAmt'].transform(
'sum')
test['TransactionAmt_to_sum_card3'] = test['TransactionAmt'] / test.groupby(['card3'])['TransactionAmt'].transform(
'sum')
test['TransactionAmt_to_sum_card4'] = test['TransactionAmt'] / test.groupby(['card4'])['TransactionAmt'].transform(
'sum')
train['TransactionAmt_to_sum_card5'] = train['TransactionAmt'] / train.groupby(['card5'])['TransactionAmt'].transform(
'sum')
train['TransactionAmt_to_sum_card6'] = train['TransactionAmt'] / train.groupby(['card6'])['TransactionAmt'].transform(
'sum')
test['TransactionAmt_to_sum_card5'] = test['TransactionAmt'] / test.groupby(['card5'])['TransactionAmt'].transform(
'sum')
test['TransactionAmt_to_sum_card6'] = test['TransactionAmt'] / test.groupby(['card6'])['TransactionAmt'].transform(
'sum')
# In[20]:
train['id_02_to_mean_card1'] = train['id_02'] / train.groupby(['card1'])['id_02'].transform('mean')
train['id_02_to_mean_card4'] = train['id_02'] / train.groupby(['card4'])['id_02'].transform('mean')
train['id_02_to_std_card1'] = train['id_02'] / train.groupby(['card1'])['id_02'].transform('std')
train['id_02_to_std_card4'] = train['id_02'] / train.groupby(['card4'])['id_02'].transform('std')
test['id_02_to_mean_card1'] = test['id_02'] / test.groupby(['card1'])['id_02'].transform('mean')
test['id_02_to_mean_card4'] = test['id_02'] / test.groupby(['card4'])['id_02'].transform('mean')
test['id_02_to_std_card1'] = test['id_02'] / test.groupby(['card1'])['id_02'].transform('std')
test['id_02_to_std_card4'] = test['id_02'] / test.groupby(['card4'])['id_02'].transform('std')
# In[21]:
train['D15_to_mean_card1'] = train['D15'] / train.groupby(['card1'])['D15'].transform('mean')
train['D15_to_mean_card4'] = train['D15'] / train.groupby(['card4'])['D15'].transform('mean')
train['D15_to_std_card1'] = train['D15'] / train.groupby(['card1'])['D15'].transform('std')
train['D15_to_std_card4'] = train['D15'] / train.groupby(['card4'])['D15'].transform('std')
test['D15_to_mean_card1'] = test['D15'] / test.groupby(['card1'])['D15'].transform('mean')
test['D15_to_mean_card4'] = test['D15'] / test.groupby(['card4'])['D15'].transform('mean')
test['D15_to_std_card1'] = test['D15'] / test.groupby(['card1'])['D15'].transform('std')
test['D15_to_std_card4'] = test['D15'] / test.groupby(['card4'])['D15'].transform('std')
train['D15_to_mean_card4'] = train['D15'] / train.groupby(['card4'])['D15'].transform('mean')
train['D15_to_std_card4'] = train['D15'] / train.groupby(['card4'])['D15'].transform('std')
test['D15_to_mean_card4'] = test['D15'] / test.groupby(['card4'])['D15'].transform('mean')
test['D15_to_std_card4'] = test['D15'] / test.groupby(['card4'])['D15'].transform('std')
# ### card svd特征
# In[22]:
from sklearn.decomposition import PCA, FastICA
from sklearn.decomposition import TruncatedSVD
from sklearn.random_projection import GaussianRandomProjection
from sklearn.random_projection import SparseRandomProjection
def get_dc_feature(df_train, df_test, n_comp=12, used_features=None):
"""
构造分解特征
"""
if not used_features:
used_features = df_test.columns
train = df_train.copy()
test = df_test.copy()
# tSVD
# tsvd = TruncatedSVD(n_components=n_comp, random_state=420)
# tsvd_results_train = tsvd.fit_transform(train[used_features])
# tsvd_results_test = tsvd.transform(test[used_features])
# PCA
pca = PCA(n_components=n_comp, random_state=420)
pca2_results_train = pca.fit_transform(train[used_features])
pca2_results_test = pca.transform(test[used_features])
# # ICAz
# ica = FastICA(n_components=n_comp, random_state=420)
# ica2_results_train = ica.fit_transform(train[used_features])
# ica2_results_test = ica.transform(test[used_features])
# # GRP
# grp = GaussianRandomProjection(n_components=n_comp, eps=0.1, random_state=420)
# grp_results_train = grp.fit_transform(train[used_features])
# grp_results_test = grp.transform(test[used_features])
# # SRP
# srp = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=420)
# srp_results_train = srp.fit_transform(train[used_features])
# srp_results_test = srp.transform(test[used_features])
# Append decomposition components to datasets
for i in range(1, n_comp + 1):
train['pca_' + str(i)] = pca2_results_train[:, i - 1]
test['pca_' + str(i)] = pca2_results_test[:, i - 1]
# train['ica_' + str(i)] = ica2_results_train[:, i - 1]
# test['ica_' + str(i)] = ica2_results_test[:, i - 1]
# train['tsvd_' + str(i)] = tsvd_results_train[:, i - 1]
# test['tsvd_' + str(i)] = tsvd_results_test[:, i - 1]
# train['grp_' + str(i)] = grp_results_train[:, i - 1]
# test['grp_' + str(i)] = grp_results_test[:, i - 1]
# train['srp_' + str(i)] = srp_results_train[:, i - 1]
# test['srp_' + str(i)] = srp_results_test[:, i - 1]
return train, test
print(train.shape, test.shape)
used_features = ['card1', 'card2', 'card3', 'card5']
train[used_features] = train[used_features].fillna(-1.0)
test[used_features] = test[used_features].fillna(-1.0)
train, test = get_dc_feature(train, test, n_comp=3, used_features=used_features)
print(train.shape, test.shape)
# ### address
# both are for purchaser, addr1 as billing region, addr2 as billing country
# In[23]:
train['addr1_count_full'] = train['addr1'].map(
pd.concat([train['addr1'], test['addr1']], ignore_index=True).value_counts(dropna=False))
test['addr1_count_full'] = test['addr1'].map(
pd.concat([train['addr1'], test['addr1']], ignore_index=True).value_counts(dropna=False))
train['addr2_count_full'] = train['addr2'].map(
pd.concat([train['addr2'], test['addr2']], ignore_index=True).value_counts(dropna=False))
test['addr2_count_full'] = test['addr2'].map(
pd.concat([train['addr2'], test['addr2']], ignore_index=True).value_counts(dropna=False))
train['D15_to_mean_addr1'] = train['D15'] / train.groupby(['addr1'])['D15'].transform('mean')
train['D15_to_std_addr1'] = train['D15'] / train.groupby(['addr1'])['D15'].transform('std')
test['D15_to_mean_addr1'] = test['D15'] / test.groupby(['addr1'])['D15'].transform('mean')
test['D15_to_std_addr1'] = test['D15'] / test.groupby(['addr1'])['D15'].transform('std')
# ### distance
# In[24]:
train["dist1_plus_dist2"] = train["dist1"] + train["dist2"]
train["dist1_minus_dist2"] = train["dist1"] - train["dist2"]
train["dist1_times_dist2"] = train["dist1"] * train["dist2"]
train["dist1_divides_dist2"] = train["dist1"] / train["dist2"]
test["dist1_plus_dist2"] = test["dist1"] + test["dist2"]
test["dist1_minus_dist2"] = test["dist1"] - test["dist2"]
test["dist1_times_dist2"] = test["dist1"] * test["dist2"]
test["dist1_divides_dist2"] = test["dist1"] / test["dist2"]
# ### 邮箱
# In[25]:
def transform_email(df):
for col in ['P_emaildomain', 'R_emaildomain']:
col1 = col.replace('domain', '_suffix')
df[col1] = df[col].str.rsplit('.', expand=True).iloc[:, -1]
col2 = col.replace('domain', 'Corp')
df[col2] = df[col]
df.loc[df[col].isin(['gmail.com', 'gmail']), col2] = 'Google'
df.loc[df[col].isin(['yahoo.com', 'yahoo.com.mx', 'yahoo.co.uk', 'yahoo.co.jp',
'yahoo.de', 'yahoo.fr', 'yahoo.es', 'yahoo.com.mx',
'ymail.com']), col2] = 'Yahoo'
df.loc[df[col].isin(['hotmail.com', 'outlook.com', 'msn.com', 'live.com.mx', 'hotmail.es',
'hotmail.co.uk', 'hotmail.de', 'outlook.es', 'live.com', 'live.fr',
'hotmail.fr']), col2] = 'Microsoft'
df.loc[df[col].isin(['aol.com', 'verizon.net']), col2] = 'Verizon'
df.loc[df[col].isin(['att.net', 'sbcglobal.net', 'bellsouth.net']), col2] = 'AT&T'
df.loc[df[col].isin(['icloud.com', 'mac.com', 'me.com']), col2] = 'Apple'
df.loc[df[col2].isin(df[col2].value_counts()[df[col2].value_counts() <= 1000].index), col2] = 'Others'
return df
# In[26]:
train = transform_email(train)
test = transform_email(test)
# In[27]:
# train['P_email']=(train['P_emaildomain']=='xmail.com')
# train['R_email']=(train['R_emaildomain']=='xmail.com')
# test['P_email']=(test['P_emaildomain']=='xmail.com')
# test['R_email']=(test['R_emaildomain']=='xmail.com')
# ### C1-C14
#
# C1-C14: counting, such as how many addresses are found to be associated with the payment card, etc. The actual meaning is masked.
#
# ### D1-D15
# timedelta, such as days between previous transaction, etc.
# ### M1-M9
# match, such as names on card and address, etc.
# In[28]:
MFeatures = ["M1", "M2", "M3", "M4", "M5", "M6", "M7", "M8", "M9"]
for feature in MFeatures:
train[feature + '_count_full'] = train[feature].map(
pd.concat([train[feature], test[feature]], ignore_index=True).value_counts(dropna=False))
test[feature + '_count_full'] = test[feature].map(
pd.concat([train[feature], test[feature]], ignore_index=True).value_counts(dropna=False))
# ### Vxxx
# Vesta engineered rich features, including ranking, counting, and other entity relations.
# ### id相关特征
# In[29]:
# # lastest_browser
# a = np.zeros(train.shape[0])
# train["lastest_browser"] = a
# a = np.zeros(test.shape[0])
# test["lastest_browser"] = a
# def browser(df):
# df.loc[df["id_31"]=="samsung browser 7.0",'lastest_browser']=1
# df.loc[df["id_31"]=="opera 53.0",'lastest_browser']=1
# df.loc[df["id_31"]=="mobile safari 10.0",'lastest_browser']=1
# df.loc[df["id_31"]=="google search application 49.0",'lastest_browser']=1
# df.loc[df["id_31"]=="firefox 60.0",'lastest_browser']=1
# df.loc[df["id_31"]=="edge 17.0",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 69.0",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 67.0 for android",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 63.0 for android",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 63.0 for ios",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 64.0",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 64.0 for android",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 64.0 for ios",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 65.0",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 65.0 for android",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 65.0 for ios",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 66.0",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 66.0 for android",'lastest_browser']=1
# df.loc[df["id_31"]=="chrome 66.0 for ios",'lastest_browser']=1
# return df
# train=browser(train)
# test=browser(test)
# In[30]:
def transform_id_cols(df):
# df['id_01_cut'] = pd.cut(df['id_01'], bins=[-100, -30, -20, -10, -5, 0])
df['id_05_d'] = df['id_05']
df['id_05_d'].where(df[df['id_05_d'].notnull()]['id_05_d'] == 0, 1, inplace=True)
# df['id_06_cut'] = pd.cut(df['id_06'], bins=[-100, -10, -5, 0])
df['id_06_d'] = df['id_06']
df['id_06_d'].where(df[df['id_06_d'].notnull()]['id_06_d'] == 0, 1, inplace=True)
# Dealing with id_30
df['id_30_count'] = df['id_30'].map(df['id_30'].value_counts(dropna=False))
df['System'] = df['id_30'].astype('str').str.split('.', expand=True)[0].str.split('_', expand=True)[0]
df['SystemCorp'] = df['System'].str.split(expand=True)[0]
# Dealing with id_31
df['LastestBrowser'] = df['id_31']
df.loc[
df['LastestBrowser'].isin(['samsung browser 7.0', 'opera 53.0', 'mobile safari 10.0', 'chrome 63.0 for android',
'google search application 49.0', 'firefox 60.0', 'edge 17.0', 'chrome 69.0',
'chrome 67.0 for android', 'chrome 64.0', 'chrome 63.0 for ios', 'chrome 65.0',
'chrome 64.0 for android', 'chrome 64.0 for ios', 'chrome 66.0',
'chrome 65.0 for android', 'chrome 65.0 for ios', 'chrome 66.0 for android',
'chrome 66.0 for ios']), 'LastestBrowser'] = 1
df.loc[df['LastestBrowser'].str.len() > 1, 'LastestBrowser'] = 0
df['id_31_count'] = df['id_31'].map(df['id_31'].value_counts(dropna=False))
df['MSBrowser'] = df['id_31'].str.contains('edge|ie|microsoft', case=False) * 1
df['AppleBrowser'] = df['id_31'].str.contains('safari', case=False) * 1
df['GoogleBrowser'] = df['id_31'].str.contains('chrome', case=False) * 1
df['BrowserType'] = df['id_31']
df.loc[df['BrowserType'].str.contains('samsung', case=False, na=False), 'BrowserType'] = 'Samsung'
df.loc[df['BrowserType'].str.contains('safari', case=False, na=False), 'BrowserType'] = 'Apple'
df.loc[df['BrowserType'].str.contains('chrome|google', case=False, na=False), 'BrowserType'] = 'Google'
df.loc[df['BrowserType'].str.contains('firefox', case=False, na=False), 'BrowserType'] = 'Mozilla'
df.loc[df['BrowserType'].str.contains('edge|ie|microsoft', case=False, na=False,
regex=True), 'BrowserType'] = 'Microsoft'
df.loc[df['BrowserType'].isin(df['BrowserType'].value_counts()[df['BrowserType'].value_counts() < 1000].index), [
'BrowserType']] = 'other'
# Dealing with id_33
df['id_33_count'] = df['id_33'].map(df['id_33'].value_counts(dropna=False))
df['DisplaySize'] = df['id_33'].str.split('x', expand=True)[0].astype('float') * \
df['id_33'].str.split('x', expand=True)[1].astype('float')
df['DisplaySize'].replace(0, np.nan, inplace=True)
df['DisplaySize'] = (df['DisplaySize'] / df['DisplaySize'].min()).round(0)
# Try easy combining
for feature in ['id_02__id_20', 'id_13__id_17', 'id_02__D8', 'D11__DeviceInfo',
'DeviceInfo__P_emaildomain', 'card2__dist1', 'card1__card5',
'card2__id_20', 'card5__P_emaildomain', 'addr1__card1']:
f1, f2 = feature.split('__')
df[feature] = df[f1].astype(str) + '_' + df[f2].astype(str)
for col in ['id_30', 'id_31', 'id_33', 'DeviceInfo']:
df[col + '_DeviceTpye'] = train[col] + '_' + train['DeviceType']
return df
# In[31]:
train = transform_id_cols(train)
test = transform_id_cols(test)
# ### DeviceType
# In[32]:
# ### DeviceInfo
# In[33]:
def transform_DeviceInfo(df):
df['DeviceCorp'] = df['DeviceInfo']
df.loc[df['DeviceInfo'].str.contains('HUAWEI|HONOR', case=False, na=False, regex=True), 'DeviceCorp'] = 'HUAWEI'
df.loc[df['DeviceInfo'].str.contains('OS', na=False, regex=False), 'DeviceCorp'] = 'APPLE'
df.loc[df['DeviceInfo'].str.contains('Idea|TA', case=False, na=False), 'DeviceCorp'] = 'Lenovo'
df.loc[df['DeviceInfo'].str.contains('Moto|XT|Edison', case=False, na=False), 'DeviceCorp'] = 'Moto'
df.loc[df['DeviceInfo'].str.contains('MI|Mi|Redmi', na=False), 'DeviceCorp'] = 'Mi'
df.loc[df['DeviceInfo'].str.contains('VS|LG|EGO', na=False), 'DeviceCorp'] = 'LG'
df.loc[
df['DeviceInfo'].str.contains('ONE TOUCH|ALCATEL', case=False, na=False, regex=False), 'DeviceCorp'] = 'ALCATEL'
df.loc[df['DeviceInfo'].str.contains('ONE A', na=False, regex=False), 'DeviceCorp'] = 'ONEPLUS'
df.loc[df['DeviceInfo'].str.contains('OPR6', na=False, regex=False), 'DeviceCorp'] = 'HTC'
df.loc[df['DeviceInfo'].str.contains('Nexus|Pixel', case=False, na=False, regex=True), 'DeviceCorp'] = 'google'
df.loc[df['DeviceInfo'].str.contains('STV', na=False, regex=False), 'DeviceCorp'] = 'blackberry'
df.loc[df['DeviceInfo'].str.contains('ASUS', case=False, na=False, regex=False), 'DeviceCorp'] = 'ASUS'
df.loc[df['DeviceInfo'].str.contains('BLADE', case=False, na=False, regex=False), 'DeviceCorp'] = 'ZTE'
df['DeviceCorp'] = \
df['DeviceInfo'].astype('str').str.split(':', expand=True)[0].str.split('-', expand=True)[0].str.split(expand=True)[
0]
df.loc[df['DeviceInfo'].isin(['rv', 'SM', 'GT', 'SGH']), 'DeviceCorp'] = 'SAMSUNG'
df.loc[df['DeviceInfo'].str.startswith('Z', na=False), 'DeviceCorp'] = 'ZTE'
df.loc[df['DeviceInfo'].str.startswith('KF', na=False), 'DeviceCorp'] = 'Amazon'
for i in ['D', 'E', 'F', 'G']:
df.loc[df['DeviceInfo'].str.startswith(i, na=False), 'DeviceCorp'] = 'SONY'
minority = df['DeviceCorp'].value_counts()[df['DeviceCorp'].value_counts() < 100].index
df.loc[df['DeviceCorp'].isin(minority), 'DeviceCorp'] = 'Other'
df['DeviceCorp'] = df['DeviceCorp'].str.upper()
return df
# In[34]:
train = transform_DeviceInfo(train)
test = transform_DeviceInfo(test)
# ### 类别型变量labelEncoder
# In[35]:
target = "isFraud"
# Label Encoding
for f in tqdm_notebook([feature for feature in train.columns if feature != target]):
if train[f].dtype == 'object' or test[f].dtype == 'object':
lbl = LabelEncoder()
temp = pd.DataFrame(train[f].astype(str).append(test[f].astype(str)))
lbl.fit(temp[f])
train[f] = lbl.transform(list(train[f].astype(str)))
test[f] = lbl.transform(list(test[f].astype(str)))
# ### 构造特征
# In[36]:
def transform_number(df):
df['id_02_log'] = np.log10(df['id_02'])
df['C5_d'] = df['C5']
df['C5_d'].where(df['C5'] == 0, 1, inplace=True)
df['D8_mul_D9'] = df['D8'] * df['D9']
df['TransAmt_mul_dist1'] = df['TransactionAmt'] * df['dist1']
df['TransAmt_per_TransDT'] = df['TransactionAmt'] * 24 * 60 * 60 / df['TransactionDT']
return df
# In[37]:
train = transform_number(train)
test = transform_number(test)
# ### 交叉特征
for feature in tqdm_notebook(['id_02__id_20', 'id_02__D8', 'D11__DeviceInfo', 'DeviceInfo__P_emaildomain',
'P_emaildomain__C2',
'P_emaildomain__card1', 'P_emaildomain__card2',
'card2__dist1', 'card1__card5', 'card2__id_20', 'card5__P_emaildomain',
'addr1__card1', 'card2__card4', 'card4__card6'
]):
f1, f2 = feature.split('__')
train[feature] = train[f1].astype(str) + '_' + train[f2].astype(str)
test[feature] = test[f1].astype(str) + '_' + test[f2].astype(str)
le = LabelEncoder()
le.fit(list(train[feature].astype(str).values) + list(test[feature].astype(str).values))
train[feature] = le.transform(list(train[feature].astype(str).values))
test[feature] = le.transform(list(test[feature].astype(str).values))
# ### 高阶交叉特征
for feature in tqdm_notebook([
'P_emaildomain__card1__card2', 'addr1__card1__card2'
]):
f1, f2, f3 = feature.split('__')
train[feature] = train[f1].astype(str) + '_' + train[f2].astype(str) + '_' + train[f3].astype(str)
test[feature] = test[f1].astype(str) + '_' + test[f2].astype(str) + '_' + test[f3].astype(str)
le = LabelEncoder()
le.fit(list(train[feature].astype(str).values) + list(test[feature].astype(str).values))
train[feature] = le.transform(list(train[feature].astype(str).values))
test[feature] = le.transform(list(test[feature].astype(str).values))
X = train.sort_values('TransactionDT').drop(['isFraud', 'TransactionDT', 'TransactionID'], axis=1)
y = train.sort_values('TransactionDT')['isFraud']
test_X = test.sort_values('TransactionDT').drop(['TransactionDT', 'TransactionID'], axis=1)
# X.to_csv("../temp/feature_X.csv", index = False)
# y.to_csv("../temp/feature_y.csv", index = False)
# test_X.to_csv("../temp/feature_test_X.csv", index = False)
# ### lightgbm参数
print("lgb model")
params = {'num_leaves': 491,
'min_child_weight': 0.03454472573214212,
'feature_fraction': 0.3797454081646243,
'bagging_fraction': 0.4181193142567742,
'min_data_in_leaf': 106,
'objective': 'binary',
'max_depth': -1,
'learning_rate': 0.006883242363721497,
"boosting_type": "gbdt",
"bagging_seed": 11,
"metric": 'auc',
"verbosity": -1,
'reg_alpha': 0.3899927210061127,
'reg_lambda': 0.6485237330340494,
'random_state': 47
}
n_fold = 5
folds = KFold(n_splits=n_fold, shuffle=False)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
lgb_sub = sub.copy()
lgb_sub['isFraud'] = 0
aucs = []
training_start_time = time()
for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
if fold_n == 4:
break
start_time = time()
print('Training on fold {}'.format(fold_n + 1))
trn_data = lgb.Dataset(X.iloc[train_index], label=y.iloc[train_index])
val_data = lgb.Dataset(X.iloc[valid_index], label=y.iloc[valid_index])
clf = lgb.train(params, trn_data, num_boost_round=10000, valid_sets=[val_data], verbose_eval=100,
early_stopping_rounds=500)
pred = clf.predict(test_X)
val = clf.predict(X.iloc[valid_index])
print('ROC accuracy: {}'.format(roc_auc_score(y.iloc[valid_index], val)))
aucs.append(roc_auc_score(y.iloc[valid_index], val))
# 不使用最后一折
lgb_sub['isFraud'] = lgb_sub['isFraud'] + pred / (n_fold - 1)
# 使用全部的fold
# lgb_sub['isFraud'] = lgb_sub['isFraud'] + pred / n_fold
print('Fold {} finished in {}'.format(fold_n + 1, str(datetime.timedelta(seconds=time() - start_time))))
subname = '../label/ieee_lgb_kflod.csv'
lgb_sub.to_csv(subname, index=False)
print('-' * 30)
print('Training has finished.')
print('Total training time is {}'.format(str(datetime.timedelta(seconds=time() - training_start_time))))
print('AUCs:', aucs)
print('Mean AUC:', np.mean(aucs))
print('-' * 30)
# 真实效果
test1 = pd.read_csv('../temp/test1_label.csv', usecols=["TransactionID", "isFraud"])
test2 = pd.read_csv('../temp/test2_label.csv', usecols=["TransactionID", "isFraud"])
pre = pd.read_csv(subname)
df1 = test1.merge(pre, on="TransactionID", how="left")
print("test1 auc: ", roc_auc_score(df1["isFraud_x"], df1["isFraud_y"]))
df = test2.merge(pre, on="TransactionID", how="left")
print("test2 auc:", roc_auc_score(df["isFraud_x"], df["isFraud_y"]))
|
[] |
[] |
[
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["KMP_DUPLICATE_LIB_OK"]
|
python
| 1 | 0 | |
log_tracker_app.py
|
'''
Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
'''
import time
import os
from watchdog.observers import Observer
from models.Handler import LogFileEventHandler
from dotenv import load_dotenv
if __name__ == "__main__":
load_dotenv()
LOG_FILEPATH = os.environ.get('LOG_FILEPATH')
LOG_DIRECTORY_PATH = os.environ.get('LOG_DIRECTORY_PATH')
event_handler = LogFileEventHandler(LOG_FILEPATH)
observer = Observer()
observer.schedule(event_handler, LOG_DIRECTORY_PATH, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
finally:
observer.stop()
observer.join()
|
[] |
[] |
[
"LOG_DIRECTORY_PATH",
"LOG_FILEPATH"
] |
[]
|
["LOG_DIRECTORY_PATH", "LOG_FILEPATH"]
|
python
| 2 | 0 | |
git_t5/cli/train_model.py
|
import os
from dataclasses import dataclass
import hydra
from git_t5.core import (
T5DataModule,
T5DataModuleConfig,
T5ModelForPreTraining,
T5ModelForPreTrainingConfig,
T5Trainer,
T5TrainerConfig,
WandbLogger,
WandbLoggerConfig,
)
from git_t5.core.configs import (
HFDatasetConfig,
LocalDatasetConfig,
MultitaskDatasetConfig,
)
from git_t5.core.optimizers import (
AdafactorConfig,
AdagradConfig,
AdamConfig,
AdamWConfig,
OptimizerConfig,
)
from git_t5.core.schedulers import (
ConstantSchedulerConfig,
InverseSquareRootSchedulerConfig,
LinearSchedulerConfig,
PolynomialSchedulerConfig,
)
from hydra.core.config_store import ConfigStore
from .config import DefaultConfig, register_base_configs
@dataclass
class Config(DefaultConfig):
data: T5DataModuleConfig = T5DataModuleConfig()
model: T5ModelForPreTrainingConfig = T5ModelForPreTrainingConfig()
optimizer: OptimizerConfig = OptimizerConfig()
trainer: T5TrainerConfig = T5TrainerConfig()
logger: WandbLoggerConfig = WandbLoggerConfig()
def register_optimizers(cs: ConfigStore) -> None:
cs.store(
group="optimizer",
name="base_adam",
node=AdamConfig,
)
cs.store(
group="optimizer",
name="base_adamw",
node=AdamWConfig,
)
cs.store(
group="optimizer",
name="base_adafactor",
node=AdafactorConfig,
)
cs.store(
group="optimizer",
name="base_adagrad",
node=AdagradConfig,
)
def register_schedulers(cs: ConfigStore) -> None:
cs.store(
group="optimizer/scheduler",
name="base_polynomial",
node=PolynomialSchedulerConfig,
)
cs.store(
group="optimizer/scheduler",
name="base_inverse_square_root",
node=InverseSquareRootSchedulerConfig,
)
cs.store(
group="optimizer/scheduler",
name="base_linear",
node=LinearSchedulerConfig,
)
cs.store(
group="optimizer/scheduler",
name="base_constant",
node=ConstantSchedulerConfig,
)
def register_datasets(cs: ConfigStore) -> None:
cs.store(
group="dataset",
name="base_huggingface_dataset",
node=HFDatasetConfig,
)
cs.store(
group="dataset",
name="base_local_dataset",
node=LocalDatasetConfig,
)
cs.store(
group="dataset",
name="base_multitask_dataset",
node=MultitaskDatasetConfig,
)
def register_configs() -> None:
cs = ConfigStore.instance()
cs.store(name="default", node=Config)
cs.store(
group="data",
name="base_data",
node=T5DataModuleConfig,
)
cs.store(
group="model",
name="base_model",
node=T5ModelForPreTrainingConfig,
)
cs.store(
group="trainer",
name="base_trainer",
node=T5TrainerConfig,
)
cs.store(
group="logger",
name="base_logger",
node=WandbLoggerConfig,
)
register_optimizers(cs)
register_schedulers(cs)
register_datasets(cs)
@hydra.main(config_path="conf", config_name="config_model")
def hydra_entry(cfg: Config) -> None:
# disable rust iterators multithreading
os.environ["TOKENIZERS_PARALLELISM"] = "false"
logger = WandbLogger(cfg.logger)
model = T5ModelForPreTraining.from_config(cfg)
data_module = T5DataModule.from_config(cfg)
trainer = T5Trainer(
config=cfg,
model=model,
data_module=data_module,
logger=logger,
)
trainer.fit()
def main() -> None:
register_base_configs()
register_configs()
hydra_entry() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
main()
|
[] |
[] |
[
"TOKENIZERS_PARALLELISM"
] |
[]
|
["TOKENIZERS_PARALLELISM"]
|
python
| 1 | 0 | |
cmd/ghapi2db/ghapi2db.go
|
package main
import (
"context"
"database/sql"
"fmt"
"math"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
lib "github.com/cncf/devstatscode"
"github.com/google/go-github/v38/github"
)
// getAPIParams connects to GitHub and Postgres
// Returns list of recent repositories and recent date to fetch commits from
func getAPIParams(ctx *lib.Ctx) (repos []string, isSingleRepo bool, singleRepo string, gctx context.Context, gcs []*github.Client, c *sql.DB, recentDt time.Time) {
// Connect to GitHub API
gctx, gcs = lib.GHClient(ctx)
// Connect to Postgres DB
c = lib.PgConn(ctx)
// Get list of repositories to process
recentReposDt := lib.GetDateAgo(c, ctx, lib.HourStart(time.Now()), ctx.RecentReposRange)
reposA, rids := lib.GetRecentRepos(c, ctx, recentReposDt)
if ctx.Debug > 0 {
lib.Printf("Repos to process from %v: %v\n", recentReposDt, reposA)
}
// Repos can have the same ID with diffrent names
// But they also have the same name with different IDs
// We first need to put all repo names with unique IDs
// And then make this names list unique as well
ridsM := make(map[int64]struct{})
reposM := make(map[string]struct{})
for i := range rids {
rid := rids[i]
_, ok := ridsM[rid]
if !ok {
reposM[reposA[i]] = struct{}{}
ridsM[rid] = struct{}{}
}
}
for repo := range reposM {
repos = append(repos, repo)
}
if ctx.Debug > 0 {
lib.Printf("Unique repos: %v\n", repos)
}
recentDt = lib.GetDateAgo(c, ctx, lib.HourStart(time.Now()), ctx.RecentRange)
// Single repo mode
singleRepo = os.Getenv("REPO")
if singleRepo != "" {
isSingleRepo = true
}
return
}
// getEnrichCommitsDateRange return last enriched commits date
func getEnrichCommitsDateRange(c *sql.DB, ctx *lib.Ctx, repo string) (dtf time.Time, dtt time.Time, ok bool) {
var pdt *time.Time
rows := lib.QuerySQLWithErr(
c,
ctx,
fmt.Sprintf(
"select coalesce(max(dup_created_at), "+
"(select min(dup_created_at) from gha_commits where dup_repo_name = %s)) "+
"from gha_commits where author_email != '' and dup_repo_name = %s",
lib.NValue(1),
lib.NValue(2),
),
repo,
repo,
)
defer func() { lib.FatalOnError(rows.Close()) }()
for rows.Next() {
lib.FatalOnError(rows.Scan(&pdt))
if pdt == nil {
if ctx.Debug > 0 {
lib.Printf("%s: no date from\n", repo)
}
return
}
dtf = pdt.Add(time.Minute * time.Duration(-2))
}
lib.FatalOnError(rows.Err())
rows = lib.QuerySQLWithErr(
c,
ctx,
fmt.Sprintf(
"select max(dup_created_at) from gha_commits where dup_repo_name = %s",
lib.NValue(1),
),
repo,
)
defer func() { lib.FatalOnError(rows.Close()) }()
for rows.Next() {
lib.FatalOnError(rows.Scan(&pdt))
if pdt == nil {
if ctx.Debug > 0 {
lib.Printf("%s: no date to\n", repo)
}
return
}
dtt = pdt.Add(time.Minute * time.Duration(2))
}
lib.FatalOnError(rows.Err())
if ctx.Debug > 0 {
lib.Printf("%s: %s - %s\n", repo, lib.ToYMDHMSDate(dtf), lib.ToYMDHMSDate(dtt))
}
ok = true
return
}
// Search for given actor using his/her login
// If not found, return hash as its ID
func lookupActorTx(con *sql.Tx, ctx *lib.Ctx, login string, maybeHide func(string) string) int {
hlogin := maybeHide(login)
rows := lib.QuerySQLTxWithErr(
con,
ctx,
fmt.Sprintf(
"select id from gha_actors where login=%s union select id from "+
"gha_actors where lower(login)=%s order by id desc limit 1",
lib.NValue(1),
lib.NValue(2),
),
hlogin,
strings.ToLower(hlogin),
)
defer func() { lib.FatalOnError(rows.Close()) }()
aid := 0
for rows.Next() {
lib.FatalOnError(rows.Scan(&aid))
}
lib.FatalOnError(rows.Err())
if aid == 0 {
aid = lib.HashStrings([]string{login})
}
return aid
}
// Inserts single GHA Actor
func insertActorTx(con *sql.Tx, ctx *lib.Ctx, aid int64, login, name string, maybeHide func(string) string) {
lib.ExecSQLTxWithErr(
con,
ctx,
lib.InsertIgnore("into gha_actors(id, login, name) "+lib.NValues(3)),
lib.AnyArray{aid, maybeHide(login), maybeHide(lib.TruncToBytes(name, 120))}...,
)
}
// processCommit - logic to enrich commit
func processCommit(c *sql.DB, ctx *lib.Ctx, commit *github.RepositoryCommit, maybeHide func(string) string) {
// Check required fields
if commit.Commit == nil {
lib.Fatalf("Nil Commit: %+v\n", commit)
return
}
// Start transaction for data possibly shared between events
tx, err := c.Begin()
lib.FatalOnError(err)
// Shortcuts
// SHA
cSHA := *commit.SHA
// Committer
committerID := int64(0)
committerLogin := ""
if commit.Committer != nil && commit.Committer.ID != nil {
committerID = *commit.Committer.ID
}
if commit.Committer != nil && commit.Committer.Login != nil {
committerLogin = *commit.Committer.Login
}
committerName := *commit.Commit.Committer.Name
committerEmail := *commit.Commit.Committer.Email
// committerDate := *commit.Commit.Committer.Date
// Author
authorID := int64(0)
authorLogin := ""
if commit.Author != nil && commit.Author.ID != nil {
authorID = *commit.Author.ID
}
if commit.Author != nil && commit.Author.Login != nil {
authorLogin = *commit.Author.Login
}
authorName := *commit.Commit.Author.Name
authorEmail := *commit.Commit.Author.Email
authorDate := *commit.Commit.Author.Date
//lib.Printf("%s %v %v\n", cSHA, authorDate, committerDate)
// Check if we already have this commit
strAuthorDate := lib.ToYMDHMSDate(authorDate)
rows := lib.QuerySQLTxWithErr(
tx,
ctx,
fmt.Sprintf(
"select sha, author_name, dup_created_at "+
"from gha_commits where sha = %s "+
"order by abs(extract(epoch from %s - dup_created_at)) "+
"limit 1",
lib.NValue(1),
lib.NValue(2),
),
cSHA,
strAuthorDate,
)
defer func() { lib.FatalOnError(rows.Close()) }()
sha := ""
currentAuthorName := ""
var createdAt time.Time
for rows.Next() {
lib.FatalOnError(rows.Scan(&sha, ¤tAuthorName, &createdAt))
}
lib.FatalOnError(rows.Err())
if sha != "" && ctx.Debug > 1 {
lib.Printf("GHA GHAPI time difference for sha %s: %v\n", cSHA, createdAt.Sub(authorDate))
}
// Get existing committer & author, it is possible that we don't have them yet
newCommitterID := int64(0)
if committerLogin != "" {
newCommitterID = int64(lookupActorTx(tx, ctx, committerLogin, maybeHide))
}
newAuthorID := int64(0)
if authorLogin != "" {
newAuthorID = committerID
if authorLogin != committerLogin {
newAuthorID = int64(lookupActorTx(tx, ctx, authorLogin, maybeHide))
}
}
// Compare to what we currently have, eventually warn and insert new
if committerLogin != "" && sha != "" && newCommitterID != committerID {
if ctx.Debug > 0 {
lib.Printf("DB Committer ID: %d != API Committer ID: %d, sha: %s, login: %s\n", newCommitterID, committerID, cSHA, committerLogin)
}
insertActorTx(tx, ctx, committerID, committerLogin, committerName, maybeHide)
}
if authorLogin != "" && sha != "" && authorLogin != committerLogin && newAuthorID != authorID {
if ctx.Debug > 0 {
lib.Printf("DB Author ID: %d != API Author ID: %d, SHA: %s, login: %s\n", newAuthorID, authorID, cSHA, authorLogin)
}
insertActorTx(tx, ctx, authorID, authorLogin, authorName, maybeHide)
}
// Same author?
if sha != "" && currentAuthorName != authorName {
lib.Printf("Author name mismatch API: %s, DB: %s, SHA: %s\n", authorName, currentAuthorName, cSHA)
}
// If we have that commit, update (enrich) it.
if sha == "" {
sha = *commit.SHA
if ctx.Debug > 1 {
lib.Printf("SHA %s not found\n", sha)
}
} else {
cols := []string{
"author_name=" + lib.NValue(1),
"author_email=" + lib.NValue(2),
"committer_name=" + lib.NValue(3),
"committer_email=" + lib.NValue(4),
}
vals := lib.AnyArray{
maybeHide(lib.TruncToBytes(authorName, 160)),
maybeHide(lib.TruncToBytes(authorEmail, 160)),
maybeHide(lib.TruncToBytes(committerName, 160)),
maybeHide(lib.TruncToBytes(committerEmail, 160)),
}
nVal := 5
if committerLogin != "" {
cols = append(cols, "committer_id="+lib.NValue(nVal))
vals = append(vals, committerID)
nVal++
cols = append(cols, "dup_committer_login="+lib.NValue(nVal))
vals = append(vals, maybeHide(lib.TruncToBytes(committerLogin, 160)))
nVal++
}
if authorLogin != "" {
cols = append(cols, "author_id="+lib.NValue(nVal))
vals = append(vals, authorID)
nVal++
cols = append(cols, "dup_author_login="+lib.NValue(nVal))
vals = append(vals, maybeHide(lib.TruncToBytes(authorLogin, 160)))
nVal++
}
vals = append(vals, sha)
vals = append(vals, createdAt)
query := "update gha_commits set " + strings.Join(cols, ", ")
query += " where sha=" + lib.NValue(nVal) + " and dup_created_at=" + lib.NValue(nVal+1)
lib.ExecSQLTxWithErr(tx, ctx, query, vals...)
}
// Author email
mEmail := maybeHide(lib.TruncToBytes(authorEmail, 120))
lib.ExecSQLTxWithErr(
tx,
ctx,
//lib.InsertIgnore("into gha_actors_emails(actor_id, email) "+lib.NValues(2)),
fmt.Sprintf(
"insert into gha_actors_emails(actor_id, email) %s on conflict(actor_id, email) "+
"do update set origin = 1 where gha_actors_emails.actor_id = %s "+
"and gha_actors_emails.email = %s",
lib.NValues(2),
lib.NValue(3),
lib.NValue(4),
),
lib.AnyArray{authorID, mEmail, authorID, mEmail}...,
)
// Committer email
if committerEmail != authorEmail {
mEmail = maybeHide(lib.TruncToBytes(committerEmail, 120))
lib.ExecSQLTxWithErr(
tx,
ctx,
//lib.InsertIgnore("into gha_actors_emails(actor_id, email) "+lib.NValues(2)),
fmt.Sprintf(
"insert into gha_actors_emails(actor_id, email) %s on conflict(actor_id, email) "+
"do update set origin = 1 where gha_actors_emails.actor_id = %s "+
"and gha_actors_emails.email = %s",
lib.NValues(2),
lib.NValue(3),
lib.NValue(4),
),
lib.AnyArray{committerID, mEmail, committerID, mEmail}...,
)
}
// Author name
mName := maybeHide(lib.TruncToBytes(authorName, 120))
lib.ExecSQLTxWithErr(
tx,
ctx,
//lib.InsertIgnore("into gha_actors_names(actor_id, name) "+lib.NValues(2)),
fmt.Sprintf(
"insert into gha_actors_names(actor_id, name) %s on conflict(actor_id, name) "+
"do update set origin = 1 where gha_actors_names.actor_id = %s "+
"and gha_actors_names.name = %s",
lib.NValues(2),
lib.NValue(3),
lib.NValue(4),
),
lib.AnyArray{authorID, mName, authorID, mName}...,
)
// Committer name
if committerName != authorName {
mName = maybeHide(lib.TruncToBytes(committerName, 120))
lib.ExecSQLTxWithErr(
tx,
ctx,
//lib.InsertIgnore("into gha_actors_names(actor_id, name) "+lib.NValues(2)),
fmt.Sprintf(
"insert into gha_actors_names(actor_id, name) %s on conflict(actor_id, name) "+
"do update set origin = 1 where gha_actors_names.actor_id = %s "+
"and gha_actors_names.name = %s",
lib.NValues(2),
lib.NValue(3),
lib.NValue(4),
),
lib.AnyArray{committerID, mName, committerID, mName}...,
)
}
// Final commit
// lib.FatalOnError(tx.Rollback())
lib.FatalOnError(tx.Commit())
}
// Some debugging options (environment variables)
// You can set:
// REPO=full_repo_name
// DTFROM=datetime 'YYYY-MM-DD hh:mm:ss.uuuuuu"
// To use DTFROM make sure you set GHA2DB_RECENT_RANGE to cover that range too.
func syncCommits(ctx *lib.Ctx) {
// Get common params
repos, isSingleRepo, singleRepo, gctx, gc, c, recentDt := getAPIParams(ctx)
defer func() { lib.FatalOnError(c.Close()) }()
// Date range mode
var (
dateRangeFrom *time.Time
dateRangeTo *time.Time
)
isDateRange := false
dateRangeFromS := os.Getenv("DTFROM")
dateRangeToS := os.Getenv("DTTO")
if dateRangeFromS != "" {
tmp := lib.TimeParseAny(dateRangeFromS)
dateRangeFrom = &tmp
isDateRange = true
}
if dateRangeToS != "" {
tmp := lib.TimeParseAny(dateRangeToS)
dateRangeTo = &tmp
isDateRange = true
}
// Process commits in parallel
thrN := lib.GetThreadsNum(ctx)
maxThreads := 16
if maxThreads > thrN {
maxThreads = thrN
}
allowedThrN := maxThreads
var thrMutex = &sync.Mutex{}
apiCalls := 0
var apiCallsMutex = &sync.Mutex{}
ch := make(chan bool)
nThreads := 0
dtStart := time.Now()
lastTime := dtStart
checked := 0
nRepos := len(repos)
lib.Printf("ghapi2db.go: Processing %d repos - GHAPI commits part\n", nRepos)
opt := &github.CommitsListOptions{
Since: recentDt,
// SHA: "s",
// Path: "p",
// Author: "a",
}
opt.PerPage = 100
if isDateRange {
if dateRangeFrom != nil {
opt.Since = *dateRangeFrom
}
if dateRangeTo != nil {
opt.Until = *dateRangeTo
}
}
for _, orgRepo := range repos {
go func(ch chan bool, orgRepo string) {
if isSingleRepo && orgRepo != singleRepo {
ch <- false
return
}
ary := strings.Split(orgRepo, "/")
if len(ary) < 2 {
ch <- false
return
}
org := ary[0]
repo := ary[1]
if org == "" || repo == "" {
ch <- false
return
}
thDtStart := time.Now()
thLastTime := dtStart
// To handle GDPR
maybeHide := lib.MaybeHideFunc(lib.GetHidden(lib.HideCfgFile))
// Need deep copy - threads
copt := opt
// No DTFROM/DTTO set and no GHA2DB_NO_AUTOFETCHCOMMITS
if !isDateRange && ctx.AutoFetchCommits {
dtf, dtt, ok := getEnrichCommitsDateRange(c, ctx, orgRepo)
if !ok {
ch <- false
return
}
copt = &github.CommitsListOptions{
Since: dtf,
Until: dtt,
SHA: opt.SHA,
Path: opt.Path,
Author: opt.Author,
}
copt.PerPage = opt.PerPage
}
var (
err error
commits []*github.RepositoryCommit
response *github.Response
)
nPages := 0
// Synchronize go routine
// start infinite for (paging)
for {
got := false
/// start trials
for tr := 0; tr < ctx.MaxGHAPIRetry; tr++ {
hint, _, rem, waitPeriod := lib.GetRateLimits(gctx, ctx, gc, true)
if ctx.GitHubDebug > 0 {
lib.Printf("Repo commits Try: %d, rem: %+v, waitPeriod: %+v, hint: %d\n", tr, rem, waitPeriod, hint)
}
if rem[hint] <= ctx.MinGHAPIPoints {
if waitPeriod[hint].Seconds() <= float64(ctx.MaxGHAPIWaitSeconds) {
if ctx.GitHubDebug > 0 {
lib.Printf("API limit reached while getting commits data, waiting %v (%d)\n", waitPeriod[hint], tr)
}
time.Sleep(time.Duration(1) * time.Second)
time.Sleep(waitPeriod[hint])
continue
} else {
if ctx.GHAPIErrorIsFatal {
lib.Fatalf("API limit reached while getting commits data, aborting, don't want to wait %v", waitPeriod[hint])
os.Exit(1)
} else {
lib.Printf("Error: API limit reached while getting commits data, aborting, don't want to wait %v\n", waitPeriod[hint])
ch <- false
return
}
}
}
nPages++
if ctx.GitHubDebug > 0 {
lib.Printf("API call for commits %s (%d), remaining GHAPI points %+v, hint: %d\n", orgRepo, nPages, rem, hint)
}
apiCallsMutex.Lock()
apiCalls++
apiCallsMutex.Unlock()
commits, response, err = gc[hint].Repositories.ListCommits(gctx, org, repo, copt)
res := lib.HandlePossibleError(err, orgRepo, "Repositories.ListCommits")
if res != "" {
if res == lib.Abuse {
wait := time.Duration(int(math.Pow(2.0, float64(tr+3)))) * time.Second
thrMutex.Lock()
if ctx.GitHubDebug > 0 {
lib.Printf("GitHub API abuse detected (issues events), wait %v\n", wait)
}
if allowedThrN > 1 {
allowedThrN--
if ctx.GitHubDebug > 0 {
lib.Printf("Lower threads limit (issues events): %d/%d\n", nThreads, allowedThrN)
}
}
thrMutex.Unlock()
time.Sleep(wait)
}
if res == lib.NotFound {
lib.Printf("Warning: not found: %s/%s\n", org, repo)
ch <- false
return
}
continue
} else {
thrMutex.Lock()
if allowedThrN < maxThreads {
allowedThrN++
if ctx.GitHubDebug > 0 {
lib.Printf("Rise threads limit (issues events): %d/%d\n", nThreads, allowedThrN)
}
}
thrMutex.Unlock()
}
got = true
break
}
/// end trials
if !got {
if ctx.GHAPIErrorIsFatal {
lib.Fatalf("GetRateLimit call failed %d times while getting events, aborting", ctx.MaxGHAPIRetry)
os.Exit(2)
} else {
lib.Printf("Error: GetRateLimit call failed %d times while getting events, aborting\n", ctx.MaxGHAPIRetry)
ch <- false
return
}
}
// Process commits
if ctx.Debug > 0 {
lib.Printf("%s: processing %d commits, page %d\n", orgRepo, len(commits), nPages)
}
for _, commit := range commits {
processCommit(c, ctx, commit, maybeHide)
}
hint, _, thRem, thWait := lib.GetRateLimits(gctx, ctx, gc, true)
lib.ProgressInfo(0, 0, thDtStart, &thLastTime, time.Duration(10)*time.Second, fmt.Sprintf("%s page %d, API points: %+v, resets in: %+v, hint: %d", orgRepo, nPages, thRem, thWait, hint))
// Handle paging
if response.NextPage == 0 {
break
}
copt.Page = response.NextPage
}
// end infinite for (paging)
ch <- true
}(ch, orgRepo)
nThreads++
for nThreads >= allowedThrN {
<-ch
nThreads--
checked++
// Get RateLimits info
hint, _, rem, wait := lib.GetRateLimits(gctx, ctx, gc, true)
lib.ProgressInfo(checked, nRepos, dtStart, &lastTime, time.Duration(10)*time.Second, fmt.Sprintf("API points: %+v, resets in: %+v, hint: %d", rem, wait, hint))
}
}
// Usually all work happens on '<-ch'
if ctx.Debug > 0 {
lib.Printf("Final GHAPI threads join\n")
}
for nThreads > 0 {
<-ch
nThreads--
checked++
// Get RateLimits info
hint, _, rem, wait := lib.GetRateLimits(gctx, ctx, gc, true)
lib.ProgressInfo(checked, nRepos, dtStart, &lastTime, time.Duration(10)*time.Second, fmt.Sprintf("API points: %+v, resets in: %+v, hint: %d", rem, wait, hint))
}
lib.Printf("GH Commits API calls: %d\n", apiCalls)
}
// Some debugging options (environment variables)
// You can set:
// REPO=full_repo_name
// DTFROM=datetime 'YYYY-MM-DD hh:mm:ss.uuuuuu"
// DTTO=datetime 'YYYY-MM-DD hh:mm:ss.uuuuuu"
// MILESTONE=milestone name
// ISSUE="issue_number"
// To use DTFROM and DTTO make sure you set GHA2DB_RECENT_RANGE to cover that range too.
func syncEvents(ctx *lib.Ctx) {
// Get common params
repos, isSingleRepo, singleRepo, gctx, gc, c, recentDt := getAPIParams(ctx)
defer func() { lib.FatalOnError(c.Close()) }()
// Date range mode
var (
dateRangeFrom *time.Time
dateRangeTo *time.Time
)
isDateRange := false
dateRangeFromS := os.Getenv("DTFROM")
dateRangeToS := os.Getenv("DTTO")
if dateRangeFromS != "" {
tmp := lib.TimeParseAny(dateRangeFromS)
dateRangeFrom = &tmp
isDateRange = true
}
if dateRangeToS != "" {
tmp := lib.TimeParseAny(dateRangeToS)
dateRangeTo = &tmp
isDateRange = true
}
// Single milestone mode
isSingleMilestone := false
singleMilestone := os.Getenv("MILESTONE")
if singleMilestone != "" {
isSingleMilestone = true
}
// Single issue mode
isSingleIssue := false
singleIssue := 0
sSingleIssue := os.Getenv("ISSUE")
if sSingleIssue != "" {
var err error
singleIssue, err = strconv.Atoi(sSingleIssue)
if err == nil {
isSingleIssue = true
}
}
// Specify list of events to process
eventTypes := make(map[string]struct{})
eventTypes["closed"] = struct{}{}
eventTypes["merged"] = struct{}{}
eventTypes["referenced"] = struct{}{}
eventTypes["reopened"] = struct{}{}
eventTypes["locked"] = struct{}{}
eventTypes["unlocked"] = struct{}{}
eventTypes["renamed"] = struct{}{}
eventTypes["mentioned"] = struct{}{}
eventTypes["assigned"] = struct{}{}
eventTypes["unassigned"] = struct{}{}
eventTypes["labeled"] = struct{}{}
eventTypes["unlabeled"] = struct{}{}
eventTypes["milestoned"] = struct{}{}
eventTypes["demilestoned"] = struct{}{}
eventTypes["subscribed"] = struct{}{}
eventTypes["unsubscribed"] = struct{}{}
eventTypes["head_ref_deleted"] = struct{}{}
eventTypes["head_ref_restored"] = struct{}{}
eventTypes["review_requested"] = struct{}{}
eventTypes["review_dismissed"] = struct{}{}
eventTypes["review_request_removed"] = struct{}{}
eventTypes["added_to_project"] = struct{}{}
eventTypes["removed_from_project"] = struct{}{}
eventTypes["moved_columns_in_project"] = struct{}{}
eventTypes["marked_as_duplicate"] = struct{}{}
eventTypes["unmarked_as_duplicate"] = struct{}{}
eventTypes["converted_note_to_issue"] = struct{}{}
// Non specified in GH API but happenning
eventTypes["base_ref_changed"] = struct{}{}
eventTypes["comment_deleted"] = struct{}{}
eventTypes["deployed"] = struct{}{}
eventTypes["transferred"] = struct{}{}
eventTypes["head_ref_force_pushed"] = struct{}{}
eventTypes["pinned"] = struct{}{}
eventTypes["unpinned"] = struct{}{}
eventTypes["ready_for_review"] = struct{}{}
eventTypes["base_ref_force_pushed"] = struct{}{}
eventTypes["connected"] = struct{}{}
eventTypes["disconnected"] = struct{}{}
eventTypes["convert_to_draft"] = struct{}{}
eventTypes["base_ref_deleted"] = struct{}{}
eventTypes["automatic_base_change_succeeded"] = struct{}{}
eventTypes["automatic_base_change_failed"] = struct{}{}
eventTypes["auto_merge_enabled"] = struct{}{}
eventTypes["auto_merge_disabled"] = struct{}{}
eventTypes["auto_squash_enabled"] = struct{}{}
eventTypes["auto_squash_disabled"] = struct{}{}
eventTypes["auto_rebase_enabled"] = struct{}{}
eventTypes["auto_rebase_disabled"] = struct{}{}
eventTypes["user_blocked"] = struct{}{}
// Get number of CPUs available
thrN := lib.GetThreadsNum(ctx)
// GitHub don't like MT quering - they say that:
// 403 You have triggered an abuse detection mechanism. Please wait a few minutes before you try again
// So let's get all GitHub stuff one-after-another (ugly and slow) and then spawn threads to speedup
// Damn GitHub! - this could be working Number of CPU times faster! We're trying some hardcoded value: maxThreads
// Seems like GitHub is not detecting abuse when using 16 threads, but it detects when using 32.
maxThreads := 16
if maxThreads > thrN {
maxThreads = thrN
}
allowedThrN := maxThreads
var thrMutex = &sync.Mutex{}
ch := make(chan bool)
nThreads := 0
dtStart := time.Now()
lastTime := dtStart
checked := 0
nRepos := len(repos)
lib.Printf("ghapi2db.go: Processing %d repos - GHAPI Events part\n", nRepos)
//opt := &github.ListOptions{}
opt := &github.ListOptions{PerPage: 100}
issues := make(map[int64]lib.IssueConfigAry)
var issuesMutex = &sync.Mutex{}
eids := make(map[int64][2]int64)
eidRepos := make(map[int64][]string)
var eidsMutex = &sync.Mutex{}
prs := make(map[int64]github.PullRequest)
var prsMutex = &sync.Mutex{}
apiCalls := 0
var apiCallsMutex = &sync.Mutex{}
for _, orgRepo := range repos {
go func(ch chan bool, orgRepo string) {
if isSingleRepo && orgRepo != singleRepo {
ch <- false
return
}
ary := strings.Split(orgRepo, "/")
if len(ary) < 2 {
ch <- false
return
}
org := ary[0]
repo := ary[1]
if org == "" || repo == "" {
ch <- false
return
}
gcfg := lib.IssueConfig{
Repo: orgRepo,
}
var (
err error
events []*github.IssueEvent
response *github.Response
pr *github.PullRequest
)
nPages := 0
for {
got := false
for tr := 0; tr < ctx.MaxGHAPIRetry; tr++ {
hint, _, rem, waitPeriod := lib.GetRateLimits(gctx, ctx, gc, true)
if ctx.GitHubDebug > 0 {
lib.Printf("Issues Repo Events Try: %d, rem: %+v, waitPeriod: %+v, hint: %d\n", tr, rem, waitPeriod, hint)
}
if rem[hint] <= ctx.MinGHAPIPoints {
if waitPeriod[hint].Seconds() <= float64(ctx.MaxGHAPIWaitSeconds) {
if ctx.GitHubDebug > 0 {
lib.Printf("API limit reached while getting events data, waiting %v (%d)\n", waitPeriod[hint], tr)
}
time.Sleep(time.Duration(1) * time.Second)
time.Sleep(waitPeriod[hint])
continue
} else {
if ctx.GHAPIErrorIsFatal {
lib.Fatalf("API limit reached while getting issues events data, aborting, don't want to wait %v", waitPeriod[hint])
os.Exit(1)
} else {
lib.Printf("Error: API limit reached while getting issues events data, aborting, don't want to wait %v\n", waitPeriod[hint])
ch <- false
return
}
}
}
nPages++
if ctx.GitHubDebug > 0 {
lib.Printf("API call for issues events %s (%d), remaining GHAPI points %+v, hint: %d\n", orgRepo, nPages, rem, hint)
}
apiCallsMutex.Lock()
apiCalls++
apiCallsMutex.Unlock()
// Returns events in GHA format
//events, response, err = gc.Activity.ListRepositoryEvents(gctx, org, repo, opt)
// Returns events in Issue Event format (UI events)
events, response, err = gc[hint].Issues.ListRepositoryEvents(gctx, org, repo, opt)
res := lib.HandlePossibleError(err, gcfg.String(), "Issues.ListRepositoryEvents")
if res != "" {
if res == lib.Abuse {
wait := time.Duration(int(math.Pow(2.0, float64(tr+3)))) * time.Second
thrMutex.Lock()
if ctx.GitHubDebug > 0 {
lib.Printf("GitHub API abuse detected (issues events), wait %v\n", wait)
}
if allowedThrN > 1 {
allowedThrN--
if ctx.GitHubDebug > 0 {
lib.Printf("Lower threads limit (issues events): %d/%d\n", nThreads, allowedThrN)
}
}
thrMutex.Unlock()
time.Sleep(wait)
}
if res == lib.NotFound {
lib.Printf("Warning: not found: %s/%s\n", org, repo)
ch <- false
return
}
continue
} else {
thrMutex.Lock()
if allowedThrN < maxThreads {
allowedThrN++
if ctx.GitHubDebug > 0 {
lib.Printf("Rise threads limit (issues events): %d/%d\n", nThreads, allowedThrN)
}
}
thrMutex.Unlock()
}
got = true
break
}
if !got {
if ctx.GHAPIErrorIsFatal {
lib.Fatalf("GetRateLimit call failed %d times while getting events, aborting", ctx.MaxGHAPIRetry)
os.Exit(2)
} else {
lib.Printf("Error: GetRateLimit call failed %d times while getting events, aborting\n", ctx.MaxGHAPIRetry)
ch <- false
return
}
}
minCreatedAt := time.Now()
maxCreatedAt := recentDt
for _, event := range events {
createdAt := *event.CreatedAt
if createdAt.Before(minCreatedAt) {
minCreatedAt = createdAt
}
if createdAt.After(maxCreatedAt) {
maxCreatedAt = createdAt
}
if isDateRange {
if dateRangeFrom != nil && createdAt.Before(*dateRangeFrom) {
continue
}
if dateRangeTo != nil && createdAt.After(*dateRangeTo) {
continue
}
}
if event.Event == nil {
lib.Printf("Warning: Skipping event without type\n")
continue
}
eventType := *event.Event
_, ok := eventTypes[eventType]
if !ok {
lib.Printf("Warning: skipping event type %s for issue %s %d\n", eventType, orgRepo, *event.Issue.Number)
continue
}
issue := event.Issue
if isSingleIssue && (issue.Number == nil || *issue.Number != singleIssue) {
continue
}
if isSingleMilestone && (issue.Milestone == nil || issue.Milestone.Title == nil || *issue.Milestone.Title != singleMilestone) {
continue
}
if createdAt.Before(recentDt) {
continue
}
cfg := lib.IssueConfig{Repo: orgRepo}
eid := *event.ID
iid := *issue.ID
// Check for duplicate events
eidsMutex.Lock()
duplicate := false
_, o := eids[eid]
if o {
eids[eid] = [2]int64{iid, eids[eid][1] + 1}
eidRepos[eid] = append(eidRepos[eid], orgRepo)
duplicate = true
} else {
eids[eid] = [2]int64{iid, 1}
eidRepos[eid] = []string{orgRepo}
}
eidsMutex.Unlock()
if duplicate {
if ctx.Debug > 0 {
lib.Printf("Note: duplicate GH event %d, %v, %v\n", eid, eids[eid], eidRepos[eid])
}
ch <- false
return
}
if issue.Milestone != nil {
cfg.MilestoneID = issue.Milestone.ID
}
if issue.Assignee != nil {
cfg.AssigneeID = issue.Assignee.ID
}
if eventType == "renamed" {
issue.Title = event.Rename.To
}
cfg.EventID = *event.ID
cfg.IssueID = *issue.ID
cfg.EventType = eventType
cfg.CreatedAt = createdAt
cfg.GhIssue = issue
cfg.GhEvent = event
cfg.Number = *issue.Number
cfg.Pr = issue.IsPullRequest()
// Labels
cfg.LabelsMap = make(map[int64]string)
for _, label := range issue.Labels {
cfg.LabelsMap[*label.ID] = *label.Name
}
labelsAry := lib.Int64Ary{}
for label := range cfg.LabelsMap {
labelsAry = append(labelsAry, label)
}
sort.Sort(labelsAry)
l := len(labelsAry)
for i, label := range labelsAry {
if i == l-1 {
cfg.Labels += fmt.Sprintf("%d", label)
} else {
cfg.Labels += fmt.Sprintf("%d,", label)
}
}
// Assignees
cfg.AssigneesMap = make(map[int64]string)
for _, assignee := range issue.Assignees {
cfg.AssigneesMap[*assignee.ID] = *assignee.Login
}
assigneesAry := lib.Int64Ary{}
for assignee := range cfg.AssigneesMap {
assigneesAry = append(assigneesAry, assignee)
}
sort.Sort(assigneesAry)
l = len(assigneesAry)
for i, assignee := range assigneesAry {
if i == l-1 {
cfg.Assignees += fmt.Sprintf("%d", assignee)
} else {
cfg.Assignees += fmt.Sprintf("%d,", assignee)
}
}
issuesMutex.Lock()
_, ok = issues[cfg.IssueID]
if ok {
issues[cfg.IssueID] = append(issues[cfg.IssueID], cfg)
} else {
issues[cfg.IssueID] = []lib.IssueConfig{cfg}
}
issuesMutex.Unlock()
if ctx.Debug > 1 {
lib.Printf("Processing %v\n", cfg)
} else if ctx.Debug == 1 {
lib.Printf("Processing %s issue number %d, event: %s, date: %s\n", cfg.Repo, cfg.Number, cfg.EventType, lib.ToYMDHMSDate(cfg.CreatedAt))
}
// Handle PR
if issue.IsPullRequest() {
prsMutex.Lock()
_, foundPR := prs[cfg.IssueID]
prsMutex.Unlock()
if !foundPR {
prNum := *issue.Number
got = false
for tr := 0; tr < ctx.MaxGHAPIRetry; tr++ {
hint, _, rem, waitPeriod := lib.GetRateLimits(gctx, ctx, gc, true)
if ctx.GitHubDebug > 0 {
lib.Printf("Get PR Try: %d, rem: %+v, waitPeriod: %+v, hint: %d\n", tr, rem, waitPeriod, hint)
}
if rem[hint] <= ctx.MinGHAPIPoints {
if waitPeriod[hint].Seconds() <= float64(ctx.MaxGHAPIWaitSeconds) {
if ctx.GitHubDebug > 0 {
lib.Printf("API limit reached while getting PR data, waiting %v (%d)\n", waitPeriod[hint], tr)
}
time.Sleep(time.Duration(1) * time.Second)
time.Sleep(waitPeriod[hint])
continue
} else {
if ctx.GHAPIErrorIsFatal {
lib.Fatalf("API limit reached while getting PR data, aborting, don't want to wait %v", waitPeriod[hint])
os.Exit(1)
} else {
lib.Printf("Error: API limit reached while getting PR data, aborting, don't want to wait %v\n", waitPeriod[hint])
ch <- false
return
}
}
}
if ctx.GitHubDebug > 0 {
lib.Printf("API call for %s PR: %d, remaining GHAPI points %+v, hint: %d\n", orgRepo, prNum, rem, hint)
}
apiCallsMutex.Lock()
apiCalls++
apiCallsMutex.Unlock()
pr, _, err = gc[hint].PullRequests.Get(gctx, org, repo, prNum)
res := lib.HandlePossibleError(err, gcfg.String(), "PullRequests.Get")
if res != "" {
if res == lib.Abuse {
wait := time.Duration(int(math.Pow(2.0, float64(tr+3)))) * time.Second
thrMutex.Lock()
if ctx.GitHubDebug > 0 {
lib.Printf("GitHub API abuse detected (get PR), wait %v\n", wait)
}
if allowedThrN > 1 {
allowedThrN--
if ctx.GitHubDebug > 0 {
lib.Printf("Lower threads limit (get PR): %d/%d\n", nThreads, allowedThrN)
}
}
thrMutex.Unlock()
time.Sleep(wait)
}
continue
} else {
thrMutex.Lock()
if allowedThrN < maxThreads {
allowedThrN++
if ctx.GitHubDebug > 0 {
lib.Printf("Rise threads limit (get PR): %d/%d\n", nThreads, allowedThrN)
}
}
thrMutex.Unlock()
}
got = true
break
}
if !got {
if ctx.GHAPIErrorIsFatal {
lib.Fatalf("GetRateLimit call failed %d times while getting PR, aborting", ctx.MaxGHAPIRetry)
os.Exit(2)
} else {
lib.Printf("Error: GetRateLimit call failed %d times while getting PR, aborting\n", ctx.MaxGHAPIRetry)
ch <- false
return
}
}
if pr != nil {
prsMutex.Lock()
prs[cfg.IssueID] = *pr
prsMutex.Unlock()
}
}
}
}
if ctx.Debug > 0 {
lib.Printf("%s: [%v - %v] < %v: %v\n", orgRepo, minCreatedAt, maxCreatedAt, recentDt, minCreatedAt.Before(recentDt))
}
if minCreatedAt.Before(recentDt) {
break
}
// Handle paging
if response.NextPage == 0 {
break
}
opt.Page = response.NextPage
}
// Synchronize go routine
ch <- true
}(ch, orgRepo)
nThreads++
for nThreads >= allowedThrN {
<-ch
nThreads--
checked++
// Get RateLimits info
hint, _, rem, wait := lib.GetRateLimits(gctx, ctx, gc, true)
lib.ProgressInfo(checked, nRepos, dtStart, &lastTime, time.Duration(10)*time.Second, fmt.Sprintf("API points: %+v, resets in: %+v, hint: %d", rem, wait, hint))
}
}
// Usually all work happens on '<-ch'
if ctx.Debug > 0 {
lib.Printf("Final GHAPI threads join\n")
}
for nThreads > 0 {
<-ch
nThreads--
checked++
// Get RateLimits info
hint, _, rem, wait := lib.GetRateLimits(gctx, ctx, gc, true)
lib.ProgressInfo(checked, nRepos, dtStart, &lastTime, time.Duration(10)*time.Second, fmt.Sprintf("API points: %+v, resets in: %+v, hint: %d", rem, wait, hint))
}
// API calls
lib.Printf("GH Repo Events/PRs API calls: %d\n", apiCalls)
// Do final corrections
// manual sync: false
lib.SyncIssuesState(gctx, gc, ctx, c, issues, prs, false)
}
func syncLicenses(ctx *lib.Ctx) {
gctx, gcs := lib.GHClient(ctx)
c := lib.PgConn(ctx)
defer func() { lib.FatalOnError(c.Close()) }()
query := lib.RepoNamesQuery
if !ctx.ForceAPILicenses {
query += " and (license_key is null or license_key = '')"
}
repos := []string{}
repo := ""
rows := lib.QuerySQLWithErr(c, ctx, query)
defer func() { lib.FatalOnError(rows.Close()) }()
for rows.Next() {
lib.FatalOnError(rows.Scan(&repo))
repos = append(repos, repo)
}
lib.FatalOnError(rows.Err())
nRepos := len(repos)
lib.Printf("Checking license on %d repos\n", nRepos)
hint, _, rem, wait := lib.GetRateLimits(gctx, ctx, gcs, true)
allowed := 0
handleRate := func() (ok bool) {
if rem[hint] <= ctx.MinGHAPIPoints {
if wait[hint].Seconds() <= float64(ctx.MaxGHAPIWaitSeconds) {
if ctx.GitHubDebug > 0 {
lib.Printf("API limit reached while getting licenses data, waiting %v\n", wait[hint])
}
time.Sleep(time.Duration(1) * time.Second)
time.Sleep(wait[hint])
} else {
if ctx.GHAPIErrorIsFatal {
lib.Fatalf("API limit reached while getting licenses data, aborting, don't want to wait %v", wait[hint])
os.Exit(1)
} else {
lib.Printf("Error: API limit reached while getting licenses data, aborting, don't want to wait %v\n", wait[hint])
return
}
}
hint, _, rem, wait = lib.GetRateLimits(gctx, ctx, gcs, true)
}
allowed = rem[hint] / 10
ok = true
return
}
if !handleRate() {
return
}
thrN := lib.GetThreadsNum(ctx)
processed := 0
lastTime := time.Now()
dtStart := lastTime
freq := time.Duration(30) * time.Second
mtx := &sync.Mutex{}
found := 0
notFound := 0
abuses := 0
iter := func(abused bool) (ok bool) {
if !abused {
processed++
allowed--
} else {
allowed = 0
abuses++
}
if allowed <= 0 {
hint, _, rem, wait = lib.GetRateLimits(gctx, ctx, gcs, true)
if !handleRate() {
return
}
}
lib.ProgressInfo(processed, nRepos, dtStart, &lastTime, freq, fmt.Sprintf("API points: %+v, resets in: %+v, hint: %d", rem, wait, hint))
ok = true
return
}
getLicense := func(ch chan struct{}, orgRepo string) {
defer func() {
if ch != nil {
ch <- struct{}{}
}
}()
noLicense := func() {
query := fmt.Sprintf(
"update gha_repos set license_key = %s, license_name = %s, license_prob = %s, updated_at = %s where name = %s",
lib.NValue(1),
lib.NValue(2),
lib.NValue(3),
lib.NValue(4),
lib.NValue(5),
)
lib.ExecSQLWithErr(c, ctx, query, "not_found", "Not found", 0.0, time.Now(), orgRepo)
mtx.Lock()
notFound++
mtx.Unlock()
}
cl := gcs[hint]
ary := strings.Split(orgRepo, "/")
if len(ary) < 2 {
lib.Printf("WARNING: malformed repo name: '%s'\n", orgRepo)
return
}
org := ary[0]
repo := ary[1]
var license *github.RepositoryLicense
for {
lic, resp, err := cl.Repositories.License(gctx, org, repo)
if resp == nil {
lib.Printf("License API response is null for %s/%s, skipping\n", org, repo)
return
}
if resp.StatusCode == 404 {
lib.Printf("No license found for: %s/%s (404)\n", org, repo)
noLicense()
return
}
if resp.StatusCode >= 400 {
if resp.StatusCode == 403 {
lib.Printf("Licenses abuse detected on %s/%s, retrying\n", org, repo)
mtx.Lock()
if !iter(true) {
mtx.Unlock()
return
}
mtx.Unlock()
continue
} else {
lib.Printf("No license found for: %s/%s, skipping (%d)\n", org, repo, resp.StatusCode)
}
return
}
lib.FatalOnError(err)
if lic == nil {
lib.Printf("License is null for %s/%s, skipping\n", org, repo)
return
}
if lic.License == nil {
lib.Printf("No license found for: %s/%s (nil)\n", org, repo)
return
}
license = lic
break
}
if ctx.Debug > 0 {
lib.Printf("%s license:%+v\n", orgRepo, license.License)
}
query := fmt.Sprintf(
"update gha_repos set license_key = %s, license_name = %s, license_prob = %s, updated_at = %s where name = %s",
lib.NValue(1),
lib.NValue(2),
lib.NValue(3),
lib.NValue(4),
lib.NValue(5),
)
lib.ExecSQLWithErr(c, ctx, query, license.License.Key, license.License.Name, 100.0, time.Now(), orgRepo)
mtx.Lock()
found++
mtx.Unlock()
}
if thrN > 1 {
ch := make(chan struct{})
nThreads := 0
for _, repo := range repos {
go getLicense(ch, repo)
nThreads++
if nThreads == thrN {
<-ch
nThreads--
if !iter(false) {
return
}
}
}
for nThreads > 0 {
<-ch
nThreads--
if !iter(false) {
return
}
}
} else {
for _, repo := range repos {
getLicense(nil, repo)
if !iter(false) {
return
}
}
}
lib.Printf("Processed %d, found %d licenses, %d not found, abuses %d\n", processed, found, notFound, abuses)
}
func syncLangs(ctx *lib.Ctx) {
gctx, gcs := lib.GHClient(ctx)
c := lib.PgConn(ctx)
defer func() { lib.FatalOnError(c.Close()) }()
query := lib.RepoNamesQuery
if !ctx.ForceAPILangs {
query += " and name not in (select distinct repo_name from gha_repos_langs)"
}
repos := []string{}
repo := ""
rows := lib.QuerySQLWithErr(c, ctx, query)
defer func() { lib.FatalOnError(rows.Close()) }()
for rows.Next() {
lib.FatalOnError(rows.Scan(&repo))
repos = append(repos, repo)
}
lib.FatalOnError(rows.Err())
nRepos := len(repos)
lib.Printf("Checking programming languages on %d repos\n", nRepos)
hint, _, rem, wait := lib.GetRateLimits(gctx, ctx, gcs, true)
allowed := 0
handleRate := func() (ok bool) {
if rem[hint] <= ctx.MinGHAPIPoints {
if wait[hint].Seconds() <= float64(ctx.MaxGHAPIWaitSeconds) {
if ctx.GitHubDebug > 0 {
lib.Printf("API limit reached while getting programming languages data, waiting %v\n", wait[hint])
}
time.Sleep(time.Duration(1) * time.Second)
time.Sleep(wait[hint])
} else {
if ctx.GHAPIErrorIsFatal {
lib.Fatalf("API limit reached while getting programming languages data, aborting, don't want to wait %v", wait[hint])
os.Exit(1)
} else {
lib.Printf("Error: API limit reached while getting programming languages data, aborting, don't want to wait %v\n", wait[hint])
return
}
}
hint, _, rem, wait = lib.GetRateLimits(gctx, ctx, gcs, true)
}
allowed = rem[hint] / 10
ok = true
return
}
if !handleRate() {
return
}
thrN := lib.GetThreadsNum(ctx)
processed := 0
lastTime := time.Now()
dtStart := lastTime
freq := time.Duration(30) * time.Second
mtx := &sync.Mutex{}
found := 0
notFound := 0
abuses := 0
iter := func(abused bool) (ok bool) {
if !abused {
processed++
allowed--
} else {
allowed = 0
abuses++
}
if allowed <= 0 {
hint, _, rem, wait = lib.GetRateLimits(gctx, ctx, gcs, true)
if !handleRate() {
return
}
}
lib.ProgressInfo(processed, nRepos, dtStart, &lastTime, freq, fmt.Sprintf("API points: %+v, resets in: %+v, hint: %d", rem, wait, hint))
ok = true
return
}
getLangs := func(ch chan struct{}, orgRepo string) {
defer func() {
if ch != nil {
ch <- struct{}{}
}
}()
noLangs := func() {
lib.ExecSQLWithErr(c, ctx, lib.InsertIgnore("into gha_repos_langs(repo_name, lang_name, lang_loc, lang_perc) "+lib.NValues(4)), orgRepo, "unknown", 0, 0.0)
mtx.Lock()
notFound++
mtx.Unlock()
}
cl := gcs[hint]
ary := strings.Split(orgRepo, "/")
if len(ary) < 2 {
lib.Printf("WARNING: malformed repo name: '%s'\n", orgRepo)
return
}
org := ary[0]
repo := ary[1]
var langs map[string]int
when := time.Now()
for {
ls, resp, err := cl.Repositories.ListLanguages(gctx, org, repo)
if resp == nil {
lib.Printf("Languages API response is null for %s/%s, skipping\n", org, repo)
return
}
if resp.StatusCode == 404 {
lib.Printf("No programming languages found for: %s/%s (404)\n", org, repo)
noLangs()
return
}
if resp.StatusCode >= 400 {
if resp.StatusCode == 403 {
lib.Printf("Languages abuse detected on %s/%s, retrying\n", org, repo)
mtx.Lock()
if !iter(true) {
mtx.Unlock()
return
}
mtx.Unlock()
continue
} else {
lib.Printf("No languages found for: %s/%s, skipping (%d)\n", org, repo, resp.StatusCode)
}
return
}
lib.FatalOnError(err)
if len(ls) == 0 {
lib.Printf("No programming languages found for: %s/%s (0)\n", org, repo)
noLangs()
return
}
langs = ls
break
}
if ctx.Debug > 0 {
lib.Printf("%s languages: %+v\n", orgRepo, langs)
}
allLOC := 0
for _, loc := range langs {
allLOC += loc
}
if allLOC == 0 {
lib.Printf("All BOC sum to 0 for: %s/%s\n", org, repo)
noLangs()
return
}
lib.ExecSQLWithErr(c, ctx, "delete from gha_repos_langs where repo_name = "+lib.NValue(1), orgRepo)
for lang, loc := range langs {
perc := (float64(loc) * 100.0) / float64(allLOC)
lib.ExecSQLWithErr(c, ctx, "insert into gha_repos_langs(repo_name, lang_name, lang_loc, lang_perc, dt) "+lib.NValues(5), orgRepo, lang, loc, perc, when)
}
mtx.Lock()
found++
mtx.Unlock()
}
if thrN > 1 {
ch := make(chan struct{})
nThreads := 0
for _, repo := range repos {
go getLangs(ch, repo)
nThreads++
if nThreads == thrN {
<-ch
nThreads--
if !iter(false) {
return
}
}
}
for nThreads > 0 {
<-ch
nThreads--
if !iter(false) {
return
}
}
} else {
for _, repo := range repos {
getLangs(nil, repo)
if !iter(false) {
return
}
}
}
lib.Printf("Processed %d, found languages on %d repos, on %d not found, abuses: %d\n", processed, found, notFound, abuses)
}
func main() {
// Environment context parse
var ctx lib.Ctx
ctx.Init()
dtStart := time.Now()
// Create artificial events
if !ctx.SkipGHAPI {
if !ctx.SkipAPILicenses {
syncLicenses(&ctx)
}
if !ctx.SkipAPILangs {
syncLangs(&ctx)
}
if !ctx.SkipAPIEvents {
syncEvents(&ctx)
}
if !ctx.SkipAPICommits {
syncCommits(&ctx)
}
}
dtEnd := time.Now()
lib.Printf("Time: %v\n", dtEnd.Sub(dtStart))
}
|
[
"\"REPO\"",
"\"DTFROM\"",
"\"DTTO\"",
"\"DTFROM\"",
"\"DTTO\"",
"\"MILESTONE\"",
"\"ISSUE\""
] |
[] |
[
"DTTO",
"REPO",
"DTFROM",
"ISSUE",
"MILESTONE"
] |
[]
|
["DTTO", "REPO", "DTFROM", "ISSUE", "MILESTONE"]
|
go
| 5 | 0 | |
error-inject/inject.go
|
// +build debug
package inject
import (
"os"
"strconv"
"time"
"github.com/sirupsen/logrus"
)
var (
pingTimeout = true
)
// DisablePunchHoles is used for disabling punch holes
func DisablePunchHoles() bool {
ok := os.Getenv("DISABLE_PUNCH_HOLES")
if ok == "True" {
return true
}
return false
}
// AddTimeout add delays into the code
func AddTimeout() {
timeout, _ := strconv.Atoi(os.Getenv("DEBUG_TIMEOUT"))
logrus.Infof("Add timeout of %vs for debug build", timeout)
time.Sleep(time.Duration(timeout) * time.Second)
}
// AddPingTimeout add delay in ping response
func AddPingTimeout() {
if pingTimeout {
timeout, _ := strconv.Atoi(os.Getenv("RPC_PING_TIMEOUT"))
logrus.Infof("Add ping timeout of %vs for debug build", timeout)
time.Sleep(time.Duration(timeout) * time.Second)
pingTimeout = false
}
}
// AddPreloadTimeout add delay in preload
func AddPreloadTimeout() {
timeout, _ := strconv.Atoi(os.Getenv("PRELOAD_TIMEOUT"))
logrus.Infof("Add preload timeout of %vs for debug build", timeout)
time.Sleep(time.Duration(timeout) * time.Second)
}
// PanicAfterPrepareRebuild panic the replica just after prepare rebuild
func PanicAfterPrepareRebuild() {
ok := os.Getenv("PANIC_AFTER_PREPARE_REBUILD")
if ok == "TRUE" {
time.Sleep(2 * time.Second)
panic("panic replica after getting start signal")
}
}
// AddPunchHoleTimeout add delay in while punching hole
func AddPunchHoleTimeout() {
timeout, _ := strconv.Atoi(os.Getenv("PUNCH_HOLE_TIMEOUT"))
logrus.Infof("Add punch hole timeout of %vs for debug build", timeout)
time.Sleep(time.Duration(timeout) * time.Second)
}
var UpdateLUNMapTimeoutTriggered bool
// AddUpdateLUNMapTimeout adds delay during UpdateLUNMap
func AddUpdateLUNMapTimeout() {
timeout, _ := strconv.Atoi(os.Getenv("UpdateLUNMap_TIMEOUT"))
logrus.Infof("AddUpdateLUNMap timeout of %vs for debug build", timeout)
UpdateLUNMapTimeoutTriggered = true
time.Sleep(time.Duration(timeout) * time.Second)
}
|
[
"\"DISABLE_PUNCH_HOLES\"",
"\"DEBUG_TIMEOUT\"",
"\"RPC_PING_TIMEOUT\"",
"\"PRELOAD_TIMEOUT\"",
"\"PANIC_AFTER_PREPARE_REBUILD\"",
"\"PUNCH_HOLE_TIMEOUT\"",
"\"UpdateLUNMap_TIMEOUT\""
] |
[] |
[
"PANIC_AFTER_PREPARE_REBUILD",
"DISABLE_PUNCH_HOLES",
"PRELOAD_TIMEOUT",
"DEBUG_TIMEOUT",
"PUNCH_HOLE_TIMEOUT",
"UpdateLUNMap_TIMEOUT",
"RPC_PING_TIMEOUT"
] |
[]
|
["PANIC_AFTER_PREPARE_REBUILD", "DISABLE_PUNCH_HOLES", "PRELOAD_TIMEOUT", "DEBUG_TIMEOUT", "PUNCH_HOLE_TIMEOUT", "UpdateLUNMap_TIMEOUT", "RPC_PING_TIMEOUT"]
|
go
| 7 | 0 | |
go/client/versionfix.go
|
// Copyright 2015 Keybase, Inc. All rights reserved. Use of
// this source code is governed by the included BSD license.
package client
import (
"fmt"
"io/ioutil"
"net"
"os"
"runtime"
"strconv"
"strings"
"time"
"github.com/blang/semver"
"github.com/keybase/client/go/install"
"github.com/keybase/client/go/libkb"
keybase1 "github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/client/go/status"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"golang.org/x/net/context"
)
func getPid(g *libkb.GlobalContext) (int, error) {
fn, err := g.Env.GetPidFile()
if err != nil {
return -1, err
}
data, err := ioutil.ReadFile(fn)
if err != nil {
return -1, err
}
pidString := strings.TrimSpace(string(data))
pid, err := strconv.ParseInt(pidString, 10, 64)
if err != nil {
return -1, err
}
return int(pid), nil
}
func killPid(pid int) error {
if pid < 0 {
return fmt.Errorf("invalid pid given to kill")
}
p, err := os.FindProcess(pid)
if err != nil {
return err
}
err = p.Signal(os.Kill)
return err
}
func FixVersionClash(g *libkb.GlobalContext, cl libkb.CommandLine) (err error) {
var cli keybase1.ConfigClient
var ctlCli keybase1.CtlClient
var serviceConfig keybase1.Config
var socket net.Conn
g.Log.Debug("+ FixVersionClash")
defer func() {
if socket != nil {
socket.Close()
socket = nil
}
g.Log.Debug("- FixVersionClash -> %v", err)
}()
// Make our own stack here, circumventing all of our libraries, so
// as not to introduce any incompatibilities with earlier services
// (like 1.0.8)
socket, err = g.SocketInfo.DialSocket()
if err != nil {
g.Log.Debug("| Failed to DialSocket, but ignoring error: %s\n", err)
return nil
}
xp := libkb.NewTransportFromSocket(g, socket, keybase1.NetworkSource_LOCAL)
srv := rpc.NewServer(xp, libkb.MakeWrapError(g))
gcli := rpc.NewClient(xp, libkb.NewContextifiedErrorUnwrapper(g), nil)
cli = keybase1.ConfigClient{Cli: gcli}
err = srv.Register(NewLogUIProtocol(g))
if err != nil {
return err
}
serviceConfig, err = cli.GetConfig(context.TODO(), 0)
if err != nil {
return err
}
g.Log.Debug("| Contacted service; got version: %s", serviceConfig.Version)
// We'll check and restart the service if there is a new version.
var semverClient, semverService semver.Version
cliVersion := libkb.VersionString()
if g.Env.GetRunMode() == libkb.DevelRunMode {
tmp := os.Getenv("KEYBASE_SET_VERSION")
if len(tmp) > 0 {
cliVersion = tmp
}
}
semverClient, err = semver.Make(cliVersion)
if err != nil {
return err
}
semverService, err = semver.Make(serviceConfig.Version)
if err != nil {
return err
}
g.Log.Debug("| version check %s v %s", semverClient, semverService)
if semverClient.EQ(semverService) {
g.Log.Debug("| versions check out")
return nil
} else if semverClient.LT(semverService) && semverClient.Major < semverService.Major {
return fmt.Errorf("Unexpected version clash; client is at v%s, which is significantly *less than* server at v%s",
semverClient, semverService)
}
// There's a common situation in development where the service is running
// from a production install under a watchdog/launchd/systemd, but the
// client is a binary you just built from your GOPATH. In this case,
// restarting the service (e.g. `systemctl --user restart keybase.service`)
// isn't going to help. Detect that situation by comparing the paths of the
// binaries involved, and print a warning instead of restarting. Note that
// older services don't send the BinaryRealpath field, so we have to check
// that it's not empty.
clientRealpath, err := libkb.CurrentBinaryRealpath()
if err != nil {
g.Log.Warning("Failed to get current realpath: %s", err)
} else if serviceConfig.BinaryRealpath != "" && serviceConfig.BinaryRealpath != clientRealpath {
g.Log.Warning("Service is running v%s, while client is running v%s.",
semverService, semverClient)
g.Log.Warning("Skipping restart, because their paths differ:")
g.Log.Warning("service: %s", serviceConfig.BinaryRealpath)
g.Log.Warning(" client: %s", clientRealpath)
return nil
}
g.Log.Warning("Restarting after upgrade; service is running v%s, while v%s is available",
semverService, semverClient)
origPid, err := getPid(g)
if err != nil {
g.Log.Warning("Failed to find pid for service: %v\n", err)
}
if serviceConfig.ForkType == keybase1.ForkType_LAUNCHD {
return restartLaunchdService(g, serviceConfig.Label, g.Env.GetServiceInfoPath())
}
ctlCli = keybase1.CtlClient{Cli: gcli}
err = ctlCli.Stop(context.TODO(), keybase1.StopArg{})
if err != nil && origPid >= 0 {
// A fallback approach. I haven't seen a need for it, but it can't really hurt.
// If we fail to restart via Stop() then revert to kill techniques.
g.Log.Warning("Error in Stopping %d via RPC: %v; trying fallback (kill via pidfile)", origPid, err)
time.Sleep(time.Second)
var newPid int
newPid, err = getPid(g)
if err != nil {
g.Log.Warning("No pid; shutdown must have worked (%v)", err)
} else if newPid != origPid {
g.Log.Warning("New service found with pid=%d; assuming restart", newPid)
return nil
} else {
if err = killPid(origPid); err != nil {
g.Log.Warning("Kill via pidfile failed: %v\n", err)
return err
}
g.Log.Warning("Successful kill() on pid=%d", origPid)
}
}
socket.Close()
socket = nil
time.Sleep(10 * time.Millisecond)
g.Log.Debug("Waiting for shutdown...")
time.Sleep(1 * time.Second)
if serviceConfig.ForkType == keybase1.ForkType_AUTO || serviceConfig.ForkType == keybase1.ForkType_SYSTEMD {
g.Log.Info("Restarting service...")
_, err = AutoForkServer(g, cl)
}
return err
}
func WarnOutdatedKBFS(g *libkb.GlobalContext, cl libkb.CommandLine) (err error) {
cli, err := GetConfigClient(g)
if err != nil {
return err
}
clientStatus, err := cli.GetClientStatus(context.TODO(), 0)
if err != nil {
return err
}
var kbfsClientVersion string
kbfs := status.GetFirstClient(clientStatus, keybase1.ClientType_KBFS)
if kbfs == nil {
g.Log.Debug("| KBFS not running; skip KBFS version check")
return nil
}
kbfsClientVersion = kbfs.Version
kbfsInstalledVersion, err := install.KBFSBundleVersion(g, "")
if err != nil {
return err
}
g.Log.Debug("| KBFS version check installed=%s v. client=%s", kbfsInstalledVersion, kbfsClientVersion)
kbfsClientSemver, err := semver.Make(kbfsClientVersion)
if err != nil {
return err
}
kbfsInstalledSemver, err := semver.Make(kbfsInstalledVersion)
if err != nil {
return err
}
if kbfsClientSemver.GT(kbfsInstalledSemver) {
g.Log.Debug("| KBFS client version greater than installed")
} else if kbfsClientSemver.EQ(kbfsInstalledSemver) {
g.Log.Debug("| KBFS versions check out")
} else if kbfsClientSemver.Major < kbfsInstalledSemver.Major {
return fmt.Errorf("Unexpected KBFS version clash; client is at v%s, which is significantly *less than* installed at v%s",
kbfsClientSemver, kbfsInstalledSemver)
} else {
g.Log.Warning("KBFS needs to restart; running version %s, but %s installed.", kbfsClientSemver, kbfsInstalledSemver)
if runtime.GOOS == "linux" {
mountDir, err := g.Env.GetMountDir()
g.Log.Debug("| KBFS mountdir %s", mountDir)
if err != nil {
return err
}
processes, err := install.LsofMount(mountDir, g.Log)
g.Log.Debug("| KBFS lsof err=%s", err)
g.Log.Debug("| KBFS lsof processes=%v", processes)
if err != nil || len(processes) == 0 {
g.Log.Warning("Run 'run_keybase' to restart Keybase services.")
} else {
g.Log.Warning("KBFS currently in use by the following processes:")
for _, process := range processes {
g.Log.Warning("- pid=%s, cmd=%s", process.PID, process.Command)
}
g.Log.Warning("Please terminate the above processes and then run 'run_keybase' to restart Keybase services safely.")
}
}
}
return nil
}
|
[
"\"KEYBASE_SET_VERSION\""
] |
[] |
[
"KEYBASE_SET_VERSION"
] |
[]
|
["KEYBASE_SET_VERSION"]
|
go
| 1 | 0 | |
modules/openapi-generator/src/main/java/org/openapitools/codegen/languages/AbstractKotlinCodegen.java
|
/*
* Copyright 2018 OpenAPI-Generator Contributors (https://openapi-generator.tech)
* Copyright 2018 SmartBear Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openapitools.codegen.languages;
import io.swagger.v3.oas.models.media.ArraySchema;
import io.swagger.v3.oas.models.media.Schema;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.StringUtils;
import org.openapitools.codegen.CliOption;
import org.openapitools.codegen.CodegenConfig;
import org.openapitools.codegen.CodegenConstants;
import org.openapitools.codegen.CodegenModel;
import org.openapitools.codegen.DefaultCodegen;
import org.openapitools.codegen.utils.ModelUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.*;
import java.util.stream.Collectors;
import static org.openapitools.codegen.utils.StringUtils.*;
public abstract class AbstractKotlinCodegen extends DefaultCodegen implements CodegenConfig {
public static final String SERIALIZATION_LIBRARY_DESC = "What serialization library to use: 'moshi' (default), or 'gson'";
public enum SERIALIZATION_LIBRARY_TYPE {moshi, gson}
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractKotlinCodegen.class);
protected String artifactId;
protected String artifactVersion = "1.0.0";
protected String groupId = "org.openapitools";
protected String packageName = "org.openapitools";
protected String apiSuffix = "Api";
protected String sourceFolder = "src/main/kotlin";
protected String testFolder = "src/test/kotlin";
protected String apiDocPath = "docs/";
protected String modelDocPath = "docs/";
protected boolean parcelizeModels = false;
protected boolean serializableModel = false;
protected boolean needsDataClassBody = false;
protected boolean hasEnums = false;
protected CodegenConstants.ENUM_PROPERTY_NAMING_TYPE enumPropertyNaming = CodegenConstants.ENUM_PROPERTY_NAMING_TYPE.camelCase;
protected SERIALIZATION_LIBRARY_TYPE serializationLibrary = SERIALIZATION_LIBRARY_TYPE.moshi;
public AbstractKotlinCodegen() {
super();
supportsInheritance = true;
languageSpecificPrimitives = new HashSet<String>(Arrays.asList(
"kotlin.Byte",
"kotlin.ByteArray",
"kotlin.Short",
"kotlin.Int",
"kotlin.Long",
"kotlin.Float",
"kotlin.Double",
"kotlin.Boolean",
"kotlin.Char",
"kotlin.String",
"kotlin.Array",
"kotlin.collections.List",
"kotlin.collections.Map",
"kotlin.collections.Set"
));
// this includes hard reserved words defined by https://github.com/JetBrains/kotlin/blob/master/core/descriptors/src/org/jetbrains/kotlin/renderer/KeywordStringsGenerated.java
// as well as keywords from https://kotlinlang.org/docs/reference/keyword-reference.html
reservedWords = new HashSet<String>(Arrays.asList(
"abstract",
"annotation",
"as",
"break",
"case",
"catch",
"class",
"companion",
"const",
"constructor",
"continue",
"crossinline",
"data",
"delegate",
"do",
"else",
"enum",
"external",
"false",
"final",
"finally",
"for",
"fun",
"if",
"in",
"infix",
"init",
"inline",
"inner",
"interface",
"internal",
"is",
"it",
"lateinit",
"lazy",
"noinline",
"null",
"object",
"open",
"operator",
"out",
"override",
"package",
"private",
"protected",
"public",
"reified",
"return",
"sealed",
"super",
"suspend",
"tailrec",
"this",
"throw",
"true",
"try",
"typealias",
"typeof",
"val",
"var",
"vararg",
"when",
"while"
));
defaultIncludes = new HashSet<String>(Arrays.asList(
"kotlin.Byte",
"kotlin.ByteArray",
"kotlin.Short",
"kotlin.Int",
"kotlin.Long",
"kotlin.Float",
"kotlin.Double",
"kotlin.Boolean",
"kotlin.Char",
"kotlin.Array",
"kotlin.collections.List",
"kotlin.collections.Set",
"kotlin.collections.Map"
));
typeMapping = new HashMap<String, String>();
typeMapping.put("string", "kotlin.String");
typeMapping.put("boolean", "kotlin.Boolean");
typeMapping.put("integer", "kotlin.Int");
typeMapping.put("float", "kotlin.Float");
typeMapping.put("long", "kotlin.Long");
typeMapping.put("double", "kotlin.Double");
typeMapping.put("ByteArray", "kotlin.ByteArray");
typeMapping.put("number", "java.math.BigDecimal");
typeMapping.put("date-time", "java.time.LocalDateTime");
typeMapping.put("date", "java.time.LocalDate");
typeMapping.put("file", "java.io.File");
typeMapping.put("array", "kotlin.Array");
typeMapping.put("list", "kotlin.collections.List");
typeMapping.put("map", "kotlin.collections.Map");
typeMapping.put("object", "kotlin.Any");
typeMapping.put("binary", "kotlin.Array<kotlin.Byte>");
typeMapping.put("Date", "java.time.LocalDate");
typeMapping.put("DateTime", "java.time.LocalDateTime");
instantiationTypes.put("array", "kotlin.arrayOf");
instantiationTypes.put("list", "kotlin.arrayOf");
instantiationTypes.put("map", "kotlin.mapOf");
importMapping = new HashMap<String, String>();
importMapping.put("BigDecimal", "java.math.BigDecimal");
importMapping.put("UUID", "java.util.UUID");
importMapping.put("URI", "java.net.URI");
importMapping.put("File", "java.io.File");
importMapping.put("Date", "java.util.Date");
importMapping.put("Timestamp", "java.sql.Timestamp");
importMapping.put("DateTime", "java.time.LocalDateTime");
importMapping.put("LocalDateTime", "java.time.LocalDateTime");
importMapping.put("LocalDate", "java.time.LocalDate");
importMapping.put("LocalTime", "java.time.LocalTime");
specialCharReplacements.put(";", "Semicolon");
cliOptions.clear();
addOption(CodegenConstants.SOURCE_FOLDER, CodegenConstants.SOURCE_FOLDER_DESC, sourceFolder);
addOption(CodegenConstants.PACKAGE_NAME, "Generated artifact package name.", packageName);
addOption(CodegenConstants.API_SUFFIX, CodegenConstants.API_SUFFIX_DESC, apiSuffix);
addOption(CodegenConstants.GROUP_ID, "Generated artifact package's organization (i.e. maven groupId).", groupId);
addOption(CodegenConstants.ARTIFACT_ID, "Generated artifact id (name of jar).", artifactId);
addOption(CodegenConstants.ARTIFACT_VERSION, "Generated artifact's package version.", artifactVersion);
CliOption enumPropertyNamingOpt = new CliOption(CodegenConstants.ENUM_PROPERTY_NAMING, CodegenConstants.ENUM_PROPERTY_NAMING_DESC);
cliOptions.add(enumPropertyNamingOpt.defaultValue(enumPropertyNaming.name()));
CliOption serializationLibraryOpt = new CliOption(CodegenConstants.SERIALIZATION_LIBRARY, SERIALIZATION_LIBRARY_DESC);
cliOptions.add(serializationLibraryOpt.defaultValue(serializationLibrary.name()));
cliOptions.add(new CliOption(CodegenConstants.PARCELIZE_MODELS, CodegenConstants.PARCELIZE_MODELS_DESC));
cliOptions.add(new CliOption(CodegenConstants.SERIALIZABLE_MODEL, CodegenConstants.SERIALIZABLE_MODEL_DESC));
}
@Override
public String apiDocFileFolder() {
return (outputFolder + File.separator + apiDocPath).replace('/', File.separatorChar);
}
@Override
public String apiFileFolder() {
return (outputFolder + File.separator + sourceFolder + File.separator + apiPackage().replace('.', File.separatorChar)).replace('/', File.separatorChar);
}
@Override
public String apiTestFileFolder() {
return (outputFolder + File.separator + testFolder + File.separator + apiPackage().replace('.', File.separatorChar)).replace('/', File.separatorChar) ;
}
@Override
public String escapeQuotationMark(String input) {
// remove " to avoid code injection
return input.replace("\"", "");
}
@Override
public String escapeReservedWord(String name) {
// TODO: Allow enum escaping as an option (e.g. backticks vs append/prepend underscore vs match model property escaping).
return String.format(Locale.ROOT, "`%s`", name);
}
@Override
public String escapeUnsafeCharacters(String input) {
return input.replace("*/", "*_/").replace("/*", "/_*");
}
public CodegenConstants.ENUM_PROPERTY_NAMING_TYPE getEnumPropertyNaming() {
return this.enumPropertyNaming;
}
public SERIALIZATION_LIBRARY_TYPE getSerializationLibrary() {
return this.serializationLibrary;
}
/**
* Sets the naming convention for Kotlin enum properties
*
* @param enumPropertyNamingType The string representation of the naming convention, as defined by {@link org.openapitools.codegen.CodegenConstants.ENUM_PROPERTY_NAMING_TYPE}
*/
public void setEnumPropertyNaming(final String enumPropertyNamingType) {
try {
this.enumPropertyNaming = CodegenConstants.ENUM_PROPERTY_NAMING_TYPE.valueOf(enumPropertyNamingType);
} catch (IllegalArgumentException ex) {
StringBuilder sb = new StringBuilder(enumPropertyNamingType + " is an invalid enum property naming option. Please choose from:");
for (CodegenConstants.ENUM_PROPERTY_NAMING_TYPE t : CodegenConstants.ENUM_PROPERTY_NAMING_TYPE.values()) {
sb.append("\n ").append(t.name());
}
throw new RuntimeException(sb.toString());
}
}
/**
* Sets the serialization engine for Kotlin
*
* @param enumSerializationLibrary The string representation of the serialization library as defined by
* {@link org.openapitools.codegen.languages.AbstractKotlinCodegen.SERIALIZATION_LIBRARY_TYPE}
*/
public void setSerializationLibrary(final String enumSerializationLibrary) {
try {
this.serializationLibrary = SERIALIZATION_LIBRARY_TYPE.valueOf(enumSerializationLibrary);
} catch (IllegalArgumentException ex) {
StringBuilder sb = new StringBuilder(enumSerializationLibrary + " is an invalid enum property naming option. Please choose from:");
for (SERIALIZATION_LIBRARY_TYPE t : SERIALIZATION_LIBRARY_TYPE.values()) {
sb.append("\n ").append(t.name());
}
throw new RuntimeException(sb.toString());
}
}
/**
* returns the swagger type for the property
*
* @param p Swagger property object
* @return string presentation of the type
**/
@Override
public String getSchemaType(Schema p) {
String openAPIType = super.getSchemaType(p);
String type;
// This maps, for example, long -> kotlin.Long based on hashes in this type's constructor
if (typeMapping.containsKey(openAPIType)) {
type = typeMapping.get(openAPIType);
if (languageSpecificPrimitives.contains(type)) {
return toModelName(type);
}
} else {
type = openAPIType;
}
return toModelName(type);
}
/**
* Output the type declaration of the property
*
* @param p Swagger Property object
* @return a string presentation of the property type
*/
@Override
public String getTypeDeclaration(Schema p) {
if (ModelUtils.isArraySchema(p)) {
return getArrayTypeDeclaration((ArraySchema) p);
} else if (ModelUtils.isMapSchema(p)) {
Schema inner = ModelUtils.getAdditionalProperties(p);
// Maps will be keyed only by primitive Kotlin string
return getSchemaType(p) + "<kotlin.String, " + getTypeDeclaration(inner) + ">";
}
return super.getTypeDeclaration(p);
}
@Override
public String modelDocFileFolder() {
return (outputFolder + "/" + modelDocPath).replace('/', File.separatorChar);
}
@Override
public String modelFileFolder() {
return outputFolder + File.separator + sourceFolder + File.separator + modelPackage().replace('.', File.separatorChar);
}
@Override
public Map<String, Object> postProcessModels(Map<String, Object> objs) {
return postProcessModelsEnum(super.postProcessModels(objs));
}
@Override
public void processOpts() {
super.processOpts();
if (StringUtils.isEmpty(System.getenv("KOTLIN_POST_PROCESS_FILE"))) {
LOGGER.info("Environment variable KOTLIN_POST_PROCESS_FILE not defined so the Kotlin code may not be properly formatted. To define it, try 'export KOTLIN_POST_PROCESS_FILE=\"/usr/local/bin/ktlint -F\"' (Linux/Mac)");
LOGGER.info("NOTE: To enable file post-processing, 'enablePostProcessFile' must be set to `true` (--enable-post-process-file for CLI).");
}
if (additionalProperties.containsKey(CodegenConstants.ENUM_PROPERTY_NAMING)) {
setEnumPropertyNaming((String) additionalProperties.get(CodegenConstants.ENUM_PROPERTY_NAMING));
}
if (additionalProperties.containsKey(CodegenConstants.SERIALIZATION_LIBRARY)) {
setSerializationLibrary((String) additionalProperties.get(CodegenConstants.SERIALIZATION_LIBRARY));
additionalProperties.put(this.serializationLibrary.name(), true);
}
else {
additionalProperties.put(this.serializationLibrary.name(), true);
}
if (additionalProperties.containsKey(CodegenConstants.SOURCE_FOLDER)) {
this.setSourceFolder((String) additionalProperties.get(CodegenConstants.SOURCE_FOLDER));
} else {
additionalProperties.put(CodegenConstants.SOURCE_FOLDER, sourceFolder);
}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_NAME)) {
this.setPackageName((String) additionalProperties.get(CodegenConstants.PACKAGE_NAME));
if (!additionalProperties.containsKey(CodegenConstants.MODEL_PACKAGE))
this.setModelPackage(packageName + ".models");
if (!additionalProperties.containsKey(CodegenConstants.API_PACKAGE))
this.setApiPackage(packageName + ".apis");
} else {
additionalProperties.put(CodegenConstants.PACKAGE_NAME, packageName);
}
if (additionalProperties.containsKey(CodegenConstants.API_SUFFIX)) {
this.setApiSuffix((String) additionalProperties.get(CodegenConstants.API_SUFFIX));
}
if (additionalProperties.containsKey(CodegenConstants.ARTIFACT_ID)) {
this.setArtifactId((String) additionalProperties.get(CodegenConstants.ARTIFACT_ID));
} else {
additionalProperties.put(CodegenConstants.ARTIFACT_ID, artifactId);
}
if (additionalProperties.containsKey(CodegenConstants.GROUP_ID)) {
this.setGroupId((String) additionalProperties.get(CodegenConstants.GROUP_ID));
} else {
additionalProperties.put(CodegenConstants.GROUP_ID, groupId);
}
if (additionalProperties.containsKey(CodegenConstants.ARTIFACT_VERSION)) {
this.setArtifactVersion((String) additionalProperties.get(CodegenConstants.ARTIFACT_VERSION));
} else {
additionalProperties.put(CodegenConstants.ARTIFACT_VERSION, artifactVersion);
}
if (additionalProperties.containsKey(CodegenConstants.INVOKER_PACKAGE)) {
LOGGER.warn(CodegenConstants.INVOKER_PACKAGE + " with " + this.getName() + " generator is ignored. Use " + CodegenConstants.PACKAGE_NAME + ".");
}
if (additionalProperties.containsKey(CodegenConstants.SERIALIZABLE_MODEL)) {
this.setSerializableModel(Boolean.valueOf((String) additionalProperties.get(CodegenConstants.SERIALIZABLE_MODEL)));
} else {
additionalProperties.put(CodegenConstants.SERIALIZABLE_MODEL, serializableModel);
}
if (additionalProperties.containsKey(CodegenConstants.PARCELIZE_MODELS)) {
this.setParcelizeModels(Boolean.valueOf((String) additionalProperties.get(CodegenConstants.PARCELIZE_MODELS)));
} else {
additionalProperties.put(CodegenConstants.PARCELIZE_MODELS, parcelizeModels);
}
additionalProperties.put(CodegenConstants.NEEDS_DATACLASS_BODY, this.hasEnums || serializableModel);
additionalProperties.put(CodegenConstants.API_PACKAGE, apiPackage());
additionalProperties.put(CodegenConstants.MODEL_PACKAGE, modelPackage());
additionalProperties.put("apiDocPath", apiDocPath);
additionalProperties.put("modelDocPath", modelDocPath);
}
public void setArtifactId(String artifactId) {
this.artifactId = artifactId;
}
public void setArtifactVersion(String artifactVersion) {
this.artifactVersion = artifactVersion;
}
public void setGroupId(String groupId) {
this.groupId = groupId;
}
public void setPackageName(String packageName) {
this.packageName = packageName;
}
public void setApiSuffix(String apiSuffix) {
this.apiSuffix = apiSuffix;
}
public void setSourceFolder(String sourceFolder) {
this.sourceFolder = sourceFolder;
}
public void setTestFolder(String testFolder) {
this.testFolder = testFolder;
}
public Boolean getParcelizeModels() {
return parcelizeModels;
}
public void setParcelizeModels(Boolean parcelizeModels) {
this.parcelizeModels = parcelizeModels;
}
public boolean isSerializableModel() {
return serializableModel;
}
public void setSerializableModel(boolean serializableModel) {
this.serializableModel = serializableModel;
}
public boolean isNeedsDataClassBody() {
return needsDataClassBody;
}
public void setNeedsDataClassBody(boolean needsDataClassBody) {
this.needsDataClassBody = needsDataClassBody;
}
/**
* Return the sanitized variable name for enum
*
* @param value enum variable name
* @param datatype data type
* @return the sanitized variable name for enum
*/
@Override
public String toEnumVarName(String value, String datatype) {
String modified;
if (value.length() == 0) {
modified = "EMPTY";
} else {
modified = value;
modified = sanitizeKotlinSpecificNames(modified);
}
switch (getEnumPropertyNaming()) {
case original:
// NOTE: This is provided as a last-case allowance, but will still result in reserved words being escaped.
modified = value;
break;
case camelCase:
// NOTE: Removes hyphens and underscores
modified = camelize(modified, true);
break;
case PascalCase:
// NOTE: Removes hyphens and underscores
String result = camelize(modified);
modified = titleCase(result);
break;
case snake_case:
// NOTE: Removes hyphens
modified = underscore(modified);
break;
case UPPERCASE:
modified = modified.toUpperCase(Locale.ROOT);
break;
}
if (reservedWords.contains(modified)) {
return escapeReservedWord(modified);
}
// NOTE: another sanitize because camelize can create an invalid name
return sanitizeKotlinSpecificNames(modified);
}
@Override
public String toInstantiationType(Schema p) {
if (ModelUtils.isArraySchema(p)) {
return getArrayTypeDeclaration((ArraySchema) p);
}
return super.toInstantiationType(p);
}
@Override
public String toApiName(String name) {
if (name.length() == 0) {
return "DefaultApi";
}
return (this.apiSuffix.isEmpty() ? camelize(name) : camelize(name) + this.apiSuffix);
}
/**
* Return the fully-qualified "Model" name for import
*
* @param name the name of the "Model"
* @return the fully-qualified "Model" name for import
*/
@Override
public String toModelImport(String name) {
// toModelImport is called while processing operations, but DefaultCodegen doesn't
// define imports correctly with fully qualified primitives and models as defined in this generator.
if (needToImport(name)) {
return super.toModelImport(name);
}
return name;
}
/**
* Output the proper model name (capitalized).
* In case the name belongs to the TypeSystem it won't be renamed.
*
* @param name the name of the model
* @return capitalized model name
*/
@Override
public String toModelName(final String name) {
// Allow for explicitly configured kotlin.* and java.* types
if (name.startsWith("kotlin.") || name.startsWith("java.")) {
return name;
}
// If importMapping contains name, assume this is a legitimate model name.
if (importMapping.containsKey(name)) {
return importMapping.get(name);
}
String modifiedName = name.replaceAll("\\.", "");
String sanitizedName = sanitizeKotlinSpecificNames(modifiedName);
String nameWithPrefixSuffix = sanitizedName;
if (!StringUtils.isEmpty(modelNamePrefix)) {
// add '_' so that model name can be camelized correctly
nameWithPrefixSuffix = modelNamePrefix + "_" + nameWithPrefixSuffix;
}
if (!StringUtils.isEmpty(modelNameSuffix)) {
// add '_' so that model name can be camelized correctly
nameWithPrefixSuffix = nameWithPrefixSuffix + "_" + modelNameSuffix;
}
// Camelize name of nested properties
modifiedName = camelize(nameWithPrefixSuffix);
// model name cannot use reserved keyword, e.g. return
if (isReservedWord(modifiedName)) {
final String modelName = "Model" + modifiedName;
LOGGER.warn(modifiedName + " (reserved word) cannot be used as model name. Renamed to " + modelName);
return modelName;
}
// model name starts with number
if (modifiedName.matches("^\\d.*")) {
final String modelName = "Model" + modifiedName; // e.g. 200Response => Model200Response (after camelize)
LOGGER.warn(name + " (model name starts with number) cannot be used as model name. Renamed to " + modelName);
return modelName;
}
return titleCase(modifiedName);
}
/**
* Return the operation ID (method name)
*
* @param operationId operation ID
* @return the sanitized method name
*/
@Override
public String toOperationId(String operationId) {
// throw exception if method name is empty
if (StringUtils.isEmpty(operationId))
throw new RuntimeException("Empty method/operation name (operationId) not allowed");
operationId = camelize(sanitizeName(operationId), true);
// method name cannot use reserved keyword, e.g. return
if (isReservedWord(operationId)) {
String newOperationId = camelize("call_" + operationId, true);
LOGGER.warn(operationId + " (reserved word) cannot be used as method name. Renamed to " + newOperationId);
return newOperationId;
}
// operationId starts with a number
if (operationId.matches("^\\d.*")) {
LOGGER.warn(operationId + " (starting with a number) cannot be used as method sname. Renamed to " + camelize("call_" + operationId), true);
operationId = camelize("call_" + operationId, true);
}
return operationId;
}
@Override
public String toModelFilename(String name) {
// Should be the same as the model name
return toModelName(name);
}
/**
* Provides a strongly typed declaration for simple arrays of some type and arrays of arrays of some type.
*
* @param arr Array schema
* @return type declaration of array
*/
private String getArrayTypeDeclaration(ArraySchema arr) {
// TODO: collection type here should be fully qualified namespace to avoid model conflicts
// This supports arrays of arrays.
String arrayType = typeMapping.get("array");
StringBuilder instantiationType = new StringBuilder(arrayType);
Schema items = arr.getItems();
String nestedType = getTypeDeclaration(items);
additionalProperties.put("nestedType", nestedType);
// TODO: We may want to differentiate here between generics and primitive arrays.
instantiationType.append("<").append(nestedType).append(">");
return instantiationType.toString();
}
/**
* Sanitize against Kotlin specific naming conventions, which may differ from those required by {@link DefaultCodegen#sanitizeName}.
*
* @param name string to be sanitize
* @return sanitized string
*/
private String sanitizeKotlinSpecificNames(final String name) {
String word = name;
for (Map.Entry<String, String> specialCharacters : specialCharReplacements.entrySet()) {
word = replaceSpecialCharacters(word, specialCharacters);
}
// Fallback, replace unknowns with underscore.
word = word.replaceAll("\\W+", "_");
if (word.matches("\\d.*")) {
word = "_" + word;
}
// _, __, and ___ are reserved in Kotlin. Treat all names with only underscores consistently, regardless of count.
if (word.matches("^_*$")) {
word = word.replaceAll("\\Q_\\E", "Underscore");
}
return word;
}
private String replaceSpecialCharacters(String word, Map.Entry<String, String> specialCharacters) {
String specialChar = specialCharacters.getKey();
String replacementChar = specialCharacters.getValue();
// Underscore is the only special character we'll allow
if (!specialChar.equals("_") && word.contains(specialChar)) {
return replaceCharacters(word, specialChar, replacementChar);
}
return word;
}
private String replaceCharacters(String word, String oldValue, String newValue) {
if (!word.contains(oldValue)) {
return word;
}
if (word.equals(oldValue)) {
return newValue;
}
int i = word.indexOf(oldValue);
String start = word.substring(0, i);
String end = recurseOnEndOfWord(word, oldValue, newValue, i);
return start + newValue + end;
}
private String recurseOnEndOfWord(String word, String oldValue, String newValue, int lastReplacedValue) {
String end = word.substring(lastReplacedValue + 1);
if (!end.isEmpty()) {
end = titleCase(end);
end = replaceCharacters(end, oldValue, newValue);
}
return end;
}
private String titleCase(final String input) {
return input.substring(0, 1).toUpperCase(Locale.ROOT) + input.substring(1);
}
@Override
protected boolean isReservedWord(String word) {
// We want case-sensitive escaping, to avoid unnecessary backtick-escaping.
return reservedWords.contains(word);
}
/**
* Check the type to see if it needs import the library/module/package
*
* @param type name of the type
* @return true if the library/module/package of the corresponding type needs to be imported
*/
@Override
protected boolean needToImport(String type) {
// provides extra protection against improperly trying to import language primitives and java types
boolean imports = !type.startsWith("kotlin.") && !type.startsWith("java.") && !defaultIncludes.contains(type) && !languageSpecificPrimitives.contains(type);
return imports;
}
@Override
public CodegenModel fromModel(String name, Schema schema) {
CodegenModel m = super.fromModel(name, schema);
m.optionalVars = m.optionalVars.stream().distinct().collect(Collectors.toList());
m.allVars.stream().filter(p -> !m.vars.contains(p)).forEach(p -> p.isInherited = true);
this.hasEnums = m.hasEnums;
return m;
}
@Override
public String toEnumValue(String value, String datatype) {
if ("kotlin.Int".equals(datatype) || "kotlin.Long".equals(datatype)) {
return value;
} else if ("kotlin.Double".equals(datatype)) {
if (value.contains(".")) {
return value;
} else {
return value + ".0"; // Float and double must have .0
}
} else if ("kotlin.Float".equals(datatype)) {
return value + "f";
} else {
return "\"" + escapeText(value) + "\"";
}
}
@Override
public boolean isDataTypeString(final String dataType) {
return "String".equals(dataType) || "kotlin.String".equals(dataType);
}
@Override
public String toParamName(String name) {
// to avoid conflicts with 'callback' parameter for async call
if ("callback".equals(name)) {
return "paramCallback";
}
// should be the same as variable name
return toVarName(name);
}
@Override
public String toVarName(String name) {
// sanitize name
name = sanitizeKotlinSpecificNames(name);
name = sanitizeName(name, "\\W-[\\$]");
if (name.toLowerCase(Locale.ROOT).matches("^_*class$")) {
return "propertyClass";
}
if ("_".equals(name)) {
name = "_u";
}
// if it's all uppper case, do nothing
if (name.matches("^[A-Z0-9_]*$")) {
return name;
}
if (startsWithTwoUppercaseLetters(name)) {
name = name.substring(0, 2).toLowerCase(Locale.ROOT) + name.substring(2);
}
// If name contains special chars -> replace them.
if ((name.chars().anyMatch(character -> specialCharReplacements.keySet().contains("" + ((char) character))))) {
List<String> allowedCharacters = new ArrayList<>();
allowedCharacters.add("_");
allowedCharacters.add("$");
name = escape(name, specialCharReplacements, allowedCharacters, "_");
}
// camelize (lower first character) the variable name
// pet_id => petId
name = camelize(name, true);
// for reserved word or word starting with number or containing dollar symbol, escape it
if (isReservedWord(name) || name.matches("(^\\d.*)|(.*[$].*)")) {
name = escapeReservedWord(name);
}
return name;
}
@Override
public String toRegularExpression(String pattern) {
return escapeText(pattern);
}
private boolean startsWithTwoUppercaseLetters(String name) {
boolean startsWithTwoUppercaseLetters = false;
if (name.length() > 1) {
startsWithTwoUppercaseLetters = name.substring(0, 2).equals(name.substring(0, 2).toUpperCase(Locale.ROOT));
}
return startsWithTwoUppercaseLetters;
}
@Override
public void postProcessFile(File file, String fileType) {
if (file == null) {
return;
}
String kotlinPostProcessFile = System.getenv("KOTLIN_POST_PROCESS_FILE");
if (StringUtils.isEmpty(kotlinPostProcessFile)) {
return; // skip if KOTLIN_POST_PROCESS_FILE env variable is not defined
}
// only process files with kt extension
if ("kt".equals(FilenameUtils.getExtension(file.toString()))) {
String command = kotlinPostProcessFile + " " + file.toString();
try {
Process p = Runtime.getRuntime().exec(command);
p.waitFor();
int exitValue = p.exitValue();
if (exitValue != 0) {
LOGGER.error("Error running the command ({}). Exit value: {}", command, exitValue);
} else {
LOGGER.info("Successfully executed: " + command);
}
} catch (Exception e) {
LOGGER.error("Error running the command ({}). Exception: {}", command, e.getMessage());
}
}
}
@Override
public String toDefaultValue(Schema p) {
if (ModelUtils.isBooleanSchema(p)) {
if (p.getDefault() != null) {
return p.getDefault().toString();
}
} else if (ModelUtils.isDateSchema(p)) {
// TODO
} else if (ModelUtils.isDateTimeSchema(p)) {
// TODO
} else if (ModelUtils.isNumberSchema(p)) {
if (p.getDefault() != null) {
return p.getDefault().toString();
}
} else if (ModelUtils.isIntegerSchema(p)) {
if (p.getDefault() != null) {
return p.getDefault().toString();
}
} else if (ModelUtils.isURISchema(p)) {
if (p.getDefault() != null) {
return "URI.create('" + p.getDefault() + "')";
}
} else if (ModelUtils.isStringSchema(p)) {
if (p.getDefault() != null) {
return "'" + p.getDefault() + "'";
}
}
return null;
}
}
|
[
"\"KOTLIN_POST_PROCESS_FILE\"",
"\"KOTLIN_POST_PROCESS_FILE\""
] |
[] |
[
"KOTLIN_POST_PROCESS_FILE"
] |
[]
|
["KOTLIN_POST_PROCESS_FILE"]
|
java
| 1 | 0 | |
pkg/controller/api_test.go
|
/*
Copyright 2016 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"reflect"
"strings"
"testing"
"time"
uuid "github.com/satori/go.uuid"
"go.uber.org/zap"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
fv1 "github.com/srcmesh/kubefaas/pkg/apis/core/v1"
"github.com/srcmesh/kubefaas/pkg/controller/client"
"github.com/srcmesh/kubefaas/pkg/controller/client/rest"
"github.com/srcmesh/kubefaas/pkg/crd"
ferror "github.com/srcmesh/kubefaas/pkg/error"
"github.com/srcmesh/kubefaas/pkg/cli/cmd"
)
var (
g struct {
cmd.CommandActioner
}
testNS = metav1.NamespaceDefault
)
func panicIf(err error) {
if err != nil {
log.Panicf("err: %v", err)
}
}
func assert(c bool, msg string) {
if !c {
log.Fatalf("assert failed: %v", msg)
}
}
func assertNameReuseFailure(err error, name string) {
assert(err != nil, "recreating "+name+" with same name must fail")
fe, ok := err.(ferror.Error)
assert(ok, "error must be a kubefaas Error")
assert(fe.Code == ferror.ErrorNameExists, "error must be a name exists error")
}
func assertNotFoundFailure(err error, name string) {
assert(err != nil, "requesting a non-existent "+name+" must fail")
fe, ok := err.(ferror.Error)
assert(ok, "error must be a kubefaas Error")
if fe.Code != ferror.ErrorNotFound {
log.Fatalf("error must be a not found error: %v", fe)
}
}
func assertCronSpecFails(err error) {
assert(err != nil, "using an invalid cron spec must fail")
ok := strings.Contains(err.Error(), "not a valid cron spec")
assert(ok, "invalid cron spec must fail")
}
func TestFunctionApi(t *testing.T) {
testFunc := &fv1.Function{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: testNS,
},
Spec: fv1.FunctionSpec{
Environment: fv1.EnvironmentReference{
Name: "nodejs",
Namespace: testNS,
},
Package: fv1.FunctionPackageRef{
FunctionName: "xxx",
PackageRef: fv1.PackageRef{
Namespace: testNS,
Name: "xxx",
ResourceVersion: "12345",
},
},
},
}
_, err := g.Client().V1().Function().Get(&metav1.ObjectMeta{
Name: testFunc.ObjectMeta.Name,
Namespace: testNS,
})
assertNotFoundFailure(err, "function")
m, err := g.Client().V1().Function().Create(testFunc)
panicIf(err)
defer func() {
err := g.Client().V1().Function().Delete(m)
panicIf(err)
}()
_, err = g.Client().V1().Function().Create(testFunc)
assertNameReuseFailure(err, "function")
testFunc.ObjectMeta.ResourceVersion = m.ResourceVersion
testFunc.Spec.Package.FunctionName = "yyy"
_, err = g.Client().V1().Function().Update(testFunc)
panicIf(err)
testFunc.ObjectMeta.ResourceVersion = ""
testFunc.ObjectMeta.Name = "bar"
m2, err := g.Client().V1().Function().Create(testFunc)
panicIf(err)
defer g.Client().V1().Function().Delete(m2)
funcs, err := g.Client().V1().Function().List(testNS)
panicIf(err)
assert(len(funcs) == 2, fmt.Sprintf("created two functions, but found %v", len(funcs)))
funcs_url := g.Client().ServerURL() + "/v2/functions"
resp, err := http.Get(funcs_url)
panicIf(err)
defer resp.Body.Close()
assert(resp.StatusCode == 200, "http get status code on /v1/functions")
var found bool = false
for _, b := range resp.Header["Content-Type"] {
if b == "application/json; charset=utf-8" {
found = true
}
}
assert(found, "incorrect response content type")
}
func TestHTTPTriggerApi(t *testing.T) {
testTrigger := &fv1.HTTPTrigger{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: testNS,
},
Spec: fv1.HTTPTriggerSpec{
Method: http.MethodGet,
RelativeURL: "/hello",
FunctionReference: fv1.FunctionReference{
Type: fv1.FunctionReferenceTypeFunctionName,
Name: "foo",
},
},
}
_, err := g.Client().V1().HTTPTrigger().Get(&metav1.ObjectMeta{
Name: testTrigger.ObjectMeta.Name,
Namespace: testNS,
})
assertNotFoundFailure(err, "httptrigger")
m, err := g.Client().V1().HTTPTrigger().Create(testTrigger)
panicIf(err)
defer g.Client().V1().HTTPTrigger().Delete(m)
_, err = g.Client().V1().HTTPTrigger().Create(testTrigger)
assertNameReuseFailure(err, "httptrigger")
tr, err := g.Client().V1().HTTPTrigger().Get(m)
panicIf(err)
assert(testTrigger.Spec.Method == tr.Spec.Method &&
testTrigger.Spec.RelativeURL == tr.Spec.RelativeURL &&
testTrigger.Spec.FunctionReference.Type == tr.Spec.FunctionReference.Type &&
testTrigger.Spec.FunctionReference.Name == tr.Spec.FunctionReference.Name, "trigger should match after reading")
testTrigger.ObjectMeta.ResourceVersion = m.ResourceVersion
testTrigger.Spec.RelativeURL = "/hi"
_, err = g.Client().V1().HTTPTrigger().Update(testTrigger)
panicIf(err)
testTrigger.ObjectMeta.ResourceVersion = ""
testTrigger.ObjectMeta.Name = "yyy"
_, err = g.Client().V1().HTTPTrigger().Create(testTrigger)
assert(err != nil, "duplicate trigger should not be allowed")
testTrigger.Spec.RelativeURL = "/hi2"
m2, err := g.Client().V1().HTTPTrigger().Create(testTrigger)
panicIf(err)
defer g.Client().V1().HTTPTrigger().Delete(m2)
ts, err := g.Client().V1().HTTPTrigger().List(testNS)
panicIf(err)
assert(len(ts) == 2, fmt.Sprintf("created two triggers, but found %v", len(ts)))
}
func TestEnvironmentApi(t *testing.T) {
testEnv := &fv1.Environment{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: testNS,
},
Spec: fv1.EnvironmentSpec{
Version: 1,
Runtime: fv1.Runtime{
Image: "gcr.io/xyz",
},
Resources: v1.ResourceRequirements{},
},
}
_, err := g.Client().V1().Environment().Get(&metav1.ObjectMeta{
Name: testEnv.ObjectMeta.Name,
Namespace: testNS,
})
assertNotFoundFailure(err, "environment")
m, err := g.Client().V1().Environment().Create(testEnv)
panicIf(err)
defer g.Client().V1().Environment().Delete(m)
_, err = g.Client().V1().Environment().Create(testEnv)
assertNameReuseFailure(err, "environment")
e, err := g.Client().V1().Environment().Get(m)
panicIf(err)
assert(reflect.DeepEqual(testEnv.Spec, e.Spec), "env should match after reading")
testEnv.ObjectMeta.ResourceVersion = m.ResourceVersion
testEnv.Spec.Runtime.Image = "another-img"
_, err = g.Client().V1().Environment().Update(testEnv)
panicIf(err)
testEnv.ObjectMeta.ResourceVersion = ""
testEnv.ObjectMeta.Name = "bar"
m2, err := g.Client().V1().Environment().Create(testEnv)
panicIf(err)
defer g.Client().V1().Environment().Delete(m2)
ts, err := g.Client().V1().Environment().List(testNS)
panicIf(err)
assert(len(ts) == 2, fmt.Sprintf("created two envs, but found %v", len(ts)))
}
func TestWatchApi(t *testing.T) {
testWatch := &fv1.KubernetesWatchTrigger{
ObjectMeta: metav1.ObjectMeta{
Name: "xxx",
Namespace: testNS,
},
Spec: fv1.KubernetesWatchTriggerSpec{
Namespace: "default",
Type: "pod",
FunctionReference: fv1.FunctionReference{
Type: fv1.FunctionReferenceTypeFunctionName,
Name: "foo",
},
},
}
_, err := g.Client().V1().KubeWatcher().Get(&metav1.ObjectMeta{
Name: testWatch.ObjectMeta.Name,
Namespace: testNS,
})
assertNotFoundFailure(err, "watch")
m, err := g.Client().V1().KubeWatcher().Create(testWatch)
panicIf(err)
defer g.Client().V1().KubeWatcher().Delete(m)
_, err = g.Client().V1().KubeWatcher().Create(testWatch)
assertNameReuseFailure(err, "watch")
w, err := g.Client().V1().KubeWatcher().Get(m)
panicIf(err)
assert(testWatch.Spec.Namespace == w.Spec.Namespace &&
testWatch.Spec.Type == w.Spec.Type &&
testWatch.Spec.FunctionReference.Type == w.Spec.FunctionReference.Type &&
testWatch.Spec.FunctionReference.Name == w.Spec.FunctionReference.Name, "watch should match after reading")
testWatch.ObjectMeta.Name = "yyy"
m2, err := g.Client().V1().KubeWatcher().Create(testWatch)
panicIf(err)
defer g.Client().V1().KubeWatcher().Delete(m2)
ws, err := g.Client().V1().KubeWatcher().List(testNS)
panicIf(err)
assert(len(ws) == 2, fmt.Sprintf("created two watches, but found %v", len(ws)))
}
func TestTimeTriggerApi(t *testing.T) {
testTrigger := &fv1.TimeTrigger{
ObjectMeta: metav1.ObjectMeta{
Name: "xxx",
Namespace: testNS,
},
Spec: fv1.TimeTriggerSpec{
Cron: "0 30 * * * *",
FunctionReference: fv1.FunctionReference{
Type: fv1.FunctionReferenceTypeFunctionName,
Name: "asdf",
},
},
}
_, err := g.Client().V1().TimeTrigger().Get(&metav1.ObjectMeta{Name: testTrigger.ObjectMeta.Name})
assertNotFoundFailure(err, "trigger")
m, err := g.Client().V1().TimeTrigger().Create(testTrigger)
panicIf(err)
defer g.Client().V1().TimeTrigger().Delete(m)
_, err = g.Client().V1().TimeTrigger().Create(testTrigger)
assertNameReuseFailure(err, "trigger")
tr, err := g.Client().V1().TimeTrigger().Get(m)
panicIf(err)
assert(testTrigger.Spec.Cron == tr.Spec.Cron &&
testTrigger.Spec.FunctionReference.Type == tr.Spec.FunctionReference.Type &&
testTrigger.Spec.FunctionReference.Name == tr.Spec.FunctionReference.Name, "trigger should match after reading")
testTrigger.ObjectMeta.ResourceVersion = m.ResourceVersion
testTrigger.Spec.Cron = "@hourly"
_, err = g.Client().V1().TimeTrigger().Update(testTrigger)
panicIf(err)
testTrigger.ObjectMeta.ResourceVersion = ""
testTrigger.ObjectMeta.Name = "yyy"
testTrigger.Spec.Cron = "Not valid cron spec"
_, err = g.Client().V1().TimeTrigger().Create(testTrigger)
assertCronSpecFails(err)
ts, err := g.Client().V1().TimeTrigger().List(testNS)
panicIf(err)
assert(len(ts) == 1, fmt.Sprintf("created two time triggers, but found %v", len(ts)))
}
func TestMain(m *testing.M) {
flag.Parse()
// skip test if no cluster available for testing
kubeconfig := os.Getenv("KUBECONFIG")
if len(kubeconfig) == 0 {
log.Println("Skipping test, no kubernetes cluster")
return
}
_, kubeClient, _, err := crd.GetKubernetesClient()
panicIf(err)
// testNS isolation for running multiple CI builds concurrently.
testNS = uuid.NewV4().String()
kubeClient.CoreV1().Namespaces().Create(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNS,
},
})
defer kubeClient.CoreV1().Namespaces().Delete(testNS, nil)
logger, err := zap.NewDevelopment()
panicIf(err)
go Start(logger, 8888, true)
time.Sleep(5 * time.Second)
restClient := rest.NewRESTClient("http://localhost:8888")
// TODO: use fake rest client for offline spec generation
cmd.SetClientset(client.MakeClientset(restClient))
resp, err := http.Get("http://localhost:8888/")
panicIf(err)
assert(resp.StatusCode == 200, "http get status code on root")
var found bool = false
for _, b := range resp.Header["Content-Type"] {
if b == "application/json; charset=utf-8" {
found = true
}
}
assert(found, "incorrect response content type")
_, err = ioutil.ReadAll(resp.Body)
panicIf(err)
os.Exit(m.Run())
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
distsql/request_builder_test.go
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package distsql
import (
"os"
"testing"
. "github.com/pingcap/check"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tipb/go-tipb"
)
var _ = Suite(&testSuite{})
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
TestingT(t)
}
var _ = Suite(&testSuite{})
type testSuite struct {
sctx sessionctx.Context
}
func (s *testSuite) SetUpSuite(c *C) {
ctx := mock.NewContext()
ctx.GetSessionVars().StmtCtx = &stmtctx.StatementContext{
MemTracker: memory.NewTracker(-1, -1),
DiskTracker: disk.NewTracker(-1, -1),
}
ctx.Store = &mock.Store{
Client: &mock.Client{
MockResponse: &mockResponse{
ctx: ctx,
batch: 1,
total: 2,
},
},
}
s.sctx = ctx
}
func (s *testSuite) TearDownSuite(c *C) {
}
func (s *testSuite) SetUpTest(c *C) {
testleak.BeforeTest()
ctx := s.sctx.(*mock.Context)
store := ctx.Store.(*mock.Store)
store.Client = &mock.Client{
MockResponse: &mockResponse{
ctx: ctx,
batch: 1,
total: 2,
},
}
}
func (s *testSuite) TearDownTest(c *C) {
testleak.AfterTest(c)()
}
type handleRange struct {
start int64
end int64
}
func (s *testSuite) getExpectedRanges(tid int64, hrs []*handleRange) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(hrs))
for _, hr := range hrs {
low := codec.EncodeInt(nil, hr.start)
high := codec.EncodeInt(nil, hr.end)
high = []byte(kv.Key(high).PrefixNext())
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
return krs
}
func (s *testSuite) TestTableHandlesToKVRanges(c *C) {
handles := []int64{0, 2, 3, 4, 5, 10, 11, 100, 9223372036854775806, 9223372036854775807}
// Build expected key ranges.
hrs := make([]*handleRange, 0, len(handles))
hrs = append(hrs, &handleRange{start: 0, end: 0})
hrs = append(hrs, &handleRange{start: 2, end: 5})
hrs = append(hrs, &handleRange{start: 10, end: 11})
hrs = append(hrs, &handleRange{start: 100, end: 100})
hrs = append(hrs, &handleRange{start: 9223372036854775806, end: 9223372036854775807})
// Build key ranges.
expect := s.getExpectedRanges(1, hrs)
actual := TableHandlesToKVRanges(1, handles)
// Compare key ranges and expected key ranges.
c.Assert(len(actual), Equals, len(expect))
for i := range actual {
c.Assert(actual[i].StartKey, DeepEquals, expect[i].StartKey)
c.Assert(actual[i].EndKey, DeepEquals, expect[i].EndKey)
}
}
func (s *testSuite) TestTableRangesToKVRanges(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual := TableRangesToKVRanges(13, ranges, nil)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestIndexRangesToKVRanges(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}
actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges, nil)
c.Assert(err, IsNil)
for i := range actual {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestRequestBuilder1(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual, err := (&RequestBuilder{}).SetTableRanges(12, ranges, nil).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
},
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder2(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual, err := (&RequestBuilder{}).SetIndexRanges(new(stmtctx.StatementContext), 12, 15, ranges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
},
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder3(c *C) {
handles := []int64{0, 2, 3, 4, 5, 10, 11, 100}
actual, err := (&RequestBuilder{}).SetTableHandles(15, handles).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
},
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder4(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetStreaming(true).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: keyRanges,
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
Streaming: true,
NotFillCache: false,
SyncLog: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder5(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetAnalyzeRequest(&tipb.AnalyzeReq{}).
SetKeepOrder(true).
SetConcurrency(15).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 104,
StartTs: 0x0,
Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0},
KeyRanges: keyRanges,
KeepOrder: true,
Desc: false,
Concurrency: 15,
IsolationLevel: kv.RC,
Priority: 1,
NotFillCache: true,
SyncLog: false,
Streaming: false,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder6(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x00, 0x01},
EndKey: kv.Key{0x02, 0x03},
},
}
concurrency := 10
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetChecksumRequest(&tipb.ChecksumRequest{}).
SetConcurrency(concurrency).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 105,
StartTs: 0x0,
Data: []uint8{0x10, 0x0, 0x18, 0x0},
KeyRanges: keyRanges,
KeepOrder: false,
Desc: false,
Concurrency: concurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: true,
SyncLog: false,
Streaming: false,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder7(c *C) {
for _, replicaRead := range []kv.ReplicaReadType{
kv.ReplicaReadLeader,
kv.ReplicaReadFollower,
kv.ReplicaReadMixed,
} {
vars := variable.NewSessionVars()
vars.SetReplicaRead(replicaRead)
concurrency := 10
actual, err := (&RequestBuilder{}).
SetFromSessionVars(vars).
SetConcurrency(concurrency).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 0,
StartTs: 0x0,
KeepOrder: false,
Desc: false,
Concurrency: concurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: replicaRead,
}
c.Assert(actual, DeepEquals, expect)
}
}
func (s *testSuite) TestRequestBuilder8(c *C) {
sv := variable.NewSessionVars()
sv.SnapshotInfoschema = infoschema.MockInfoSchemaWithSchemaVer(nil, 10000)
actual, err := (&RequestBuilder{}).
SetFromSessionVars(sv).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 0,
StartTs: 0x0,
Data: []uint8(nil),
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
MemTracker: (*memory.Tracker)(nil),
ReplicaRead: 0x1,
SchemaVar: 10000,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestTableRangesToKVRangesWithFbs(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(4)},
},
}
hist := statistics.NewHistogram(1, 30, 30, 0, types.NewFieldType(mysql.TypeLonglong), chunk.InitialCapacity, 0)
for i := 0; i < 10; i++ {
hist.Bounds.AppendInt64(0, int64(i))
hist.Bounds.AppendInt64(0, int64(i+2))
hist.Buckets = append(hist.Buckets, statistics.Bucket{Repeat: 10, Count: int64(i + 30)})
}
fb := statistics.NewQueryFeedback(0, hist, 0, false)
lower, upper := types.NewIntDatum(2), types.NewIntDatum(3)
fb.Feedback = []statistics.Feedback{
{Lower: &lower, Upper: &upper, Count: 1, Repeat: 1},
}
actual := TableRangesToKVRanges(0, ranges, fb)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestIndexRangesToKVRangesWithFbs(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(4)},
},
}
hist := statistics.NewHistogram(1, 30, 30, 0, types.NewFieldType(mysql.TypeLonglong), chunk.InitialCapacity, 0)
for i := 0; i < 10; i++ {
hist.Bounds.AppendInt64(0, int64(i))
hist.Bounds.AppendInt64(0, int64(i+2))
hist.Buckets = append(hist.Buckets, statistics.Bucket{Repeat: 10, Count: int64(i + 30)})
}
fb := statistics.NewQueryFeedback(0, hist, 0, false)
lower, upper := types.NewIntDatum(2), types.NewIntDatum(3)
fb.Feedback = []statistics.Feedback{
{Lower: &lower, Upper: &upper, Count: 1, Repeat: 1},
}
actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 0, 0, ranges, fb)
c.Assert(err, IsNil)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
|
[
"\"log_level\""
] |
[] |
[
"log_level"
] |
[]
|
["log_level"]
|
go
| 1 | 0 | |
cli/edit.go
|
package cli
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
"github.com/aws/aws-sdk-go/service/s3"
myS3 "github.com/tsub/s3-edit/cli/s3"
"github.com/tsub/s3-edit/config"
)
// Edit directly a file on S3
func Edit(path myS3.Path, params *config.AWSParams) {
svc := s3.New(params.Session)
object := myS3.GetObject(svc, path)
tempDirPath, tempfilePath := createTempfile(path, object.Body)
defer os.RemoveAll(tempDirPath)
editedBody := editFile(tempfilePath)
object.Body = []byte(editedBody)
myS3.PutObject(svc, path, object)
}
func createTempfile(path myS3.Path, body []byte) (tempDirPath string, tempfilePath string) {
tempDirPath, err := ioutil.TempDir("/tmp", "s3-edit")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
keys := strings.Split(path.Key, "/")
fileName := keys[len(keys)-1]
tempfilePath = tempDirPath + "/" + fileName
if err := ioutil.WriteFile(tempfilePath, body, os.ModePerm); err != nil {
fmt.Println(err)
os.Exit(1)
}
return
}
func editFile(path string) string {
command := getDefaultEditor() + " " + path
cmd := exec.Command("sh", "-c", command)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
cmd.Stdin = os.Stdin
if err := cmd.Run(); err != nil {
fmt.Println(err)
os.Exit(1)
}
changedFile, err := ioutil.ReadFile(path)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
return string(changedFile[:])
}
func getDefaultEditor() string {
editor := os.Getenv("EDITOR")
if editor == "" {
return "vi"
}
return editor
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
opentamp/robo_wiping.py
|
import os
import sys
import time
import numpy as np
import pybullet as P
import robosuite
import robosuite.utils.transform_utils as robo_T
from robosuite.controllers import load_controller_config
from scipy.spatial.transform import Rotation
import opentamp.core.util_classes.transform_utils as T
import main
from opentamp.core.parsing import parse_domain_config, parse_problem_config
from opentamp.core.util_classes.openrave_body import *
from opentamp.core.util_classes.transform_utils import *
from opentamp.core.util_classes.viewer import PyBulletViewer
from pma import backtrack_ll_solver_gurobi as bt_ll
from pma.hl_solver import *
from pma.pr_graph import *
from pma.robosuite_solver import RobotSolver
from sco_py.expr import *
import random
random.seed(23)
REF_QUAT = np.array([0, 0, -0.7071, -0.7071])
def theta_error(cur_quat, next_quat):
sign1 = np.sign(cur_quat[np.argmax(np.abs(cur_quat))])
sign2 = np.sign(next_quat[np.argmax(np.abs(next_quat))])
next_quat = np.array(next_quat)
cur_quat = np.array(cur_quat)
angle = -(sign1 * sign2) * robo_T.get_orientation_error(
sign1 * next_quat, sign2 * cur_quat
)
return angle
# controller_config = load_controller_config(default_controller="OSC_POSE")
# controller_config = load_controller_config(default_controller="JOINT_VELOCITY")
# controller_config['control_delta'] = False
# controller_config['kp'] = 500
# controller_config['kp'] = [750, 750, 500, 5000, 5000, 5000]
ctrl_mode = "JOINT_POSITION"
true_mode = "JOINT"
# ctrl_mode = 'OSC_POSE'
# true_mode = 'IK'
controller_config = load_controller_config(default_controller=ctrl_mode)
if ctrl_mode.find("JOINT") >= 0:
controller_config["kp"] = [7500, 6500, 6500, 6500, 6500, 6500, 12000]
controller_config["output_max"] = 0.2
controller_config["output_min"] = -0.2
else:
controller_config["kp"] = 5000 # [8000, 8000, 8000, 4000, 4000, 4000]
controller_config["input_max"] = 0.2 # [0.05, 0.05, 0.05, 4, 4, 4]
controller_config["input_min"] = -0.2 # [-0.05, -0.05, -0.05, -4, -4, -4]
controller_config["output_max"] = 0.02 # [0.1, 0.1, 0.1, 2, 2, 2]
controller_config["output_min"] = -0.02 # [-0.1, -0.1, -0.1, -2, -2, -2]
visual = len(os.environ.get("DISPLAY", "")) > 0
has_render = visual
obj_mode = 2
env = robosuite.make(
"Wipe",
robots=["Sawyer"], # load a Sawyer robot
controller_configs=controller_config, # each arm is controlled using OSC
has_renderer=has_render, # on-screen rendering
render_camera="frontview", # visualize the "frontview" camera
has_offscreen_renderer=(not has_render), # no off-screen rendering
control_freq=50, # 50 hz control for applied actions
horizon=200, # each episode terminates after 200 steps
use_object_obs=True, # no observations needed
use_camera_obs=False, # no observations needed
ignore_done=True,
reward_shaping=True,
initialization_noise={'magnitude': 0., 'type': 'gaussian'},
camera_widths=128,
camera_heights=128,
)
obs, _, _, _ = env.step(np.zeros(7)) # Step a null action to 'boot' the environment.
# wipe_centroid_pose = obs['wipe_centroid']
# Get the locations of all dirt particles
dirt_locs = np.zeros((env.num_markers, 3))
for i, marker in enumerate(env.model.mujoco_arena.markers):
marker_pos = np.array(env.sim.data.body_xpos[env.sim.model.body_name2id(marker.root_body)])
dirt_locs[i,:] = marker_pos
# First, we reset the environment and then manually set the joint positions to their
# initial positions and all the joint velocities and accelerations to 0.
obs = env.reset()
jnts = env.sim.data.qpos[:7]
for _ in range(40):
env.step(np.zeros(7))
env.sim.data.qpos[:7] = jnts
env.sim.forward()
env.sim.data.qvel[:] = 0
env.sim.data.qacc[:] = 0
env.sim.forward()
bt_ll.DEBUG = True
openrave_bodies = None
domain_fname = os.getcwd() + "/opentamp/domains/robot_wiping_domain/right_wipe_onlytable.domain"
prob = os.getcwd() + "/opentamp/domains/robot_wiping_domain/probs/simple_move_onlytable_prob.prob"
d_c = main.parse_file_to_dict(domain_fname)
domain = parse_domain_config.ParseDomainConfig.parse(d_c)
hls = FFSolver(d_c)
p_c = main.parse_file_to_dict(prob)
visual = len(os.environ.get('DISPLAY', '')) > 0
problem = parse_problem_config.ParseProblemConfig.parse(p_c, domain, None, use_tf=True, sess=None, visual=visual)
params = problem.init_state.params
body_ind = env.mjpy_model.body_name2id("robot0_base")
# Resetting the initial state to specific values
params["sawyer"].pose[:, 0] = env.sim.data.body_xpos[body_ind]
jnts = params["sawyer"].geom.jnt_names["right"]
jnts = ["robot0_" + jnt for jnt in jnts]
jnt_vals = []
sawyer_inds = []
for jnt in jnts:
jnt_adr = env.mjpy_model.joint_name2id(jnt)
jnt_ind = env.mjpy_model.jnt_qposadr[jnt_adr]
sawyer_inds.append(jnt_ind)
jnt_vals.append(env.sim.data.qpos[jnt_ind])
params["sawyer"].right[:, 0] = jnt_vals
params["sawyer"].openrave_body.set_pose(params["sawyer"].pose[:, 0])
params["sawyer"].openrave_body.set_dof({"right": params["sawyer"].right[:, 0]})
info = params["sawyer"].openrave_body.fwd_kinematics("right")
params["sawyer"].right_ee_pos[:, 0] = info["pos"]
params["sawyer"].right_ee_pos[:, 0] = T.quaternion_to_euler(info["quat"], "xyzw")
goal = "(RobotAt sawyer region_pose5_5)"
# goal = "(InContactRobotTable sawyer table)"
# goal = "(WipedSurface sawyer) (InContactRobotTable sawyer table)"
solver = RobotSolver()
plan, descr = p_mod_abs(
hls, solver, domain, problem, goal=goal, debug=True, n_resamples=10
)
if len(sys.argv) > 1 and sys.argv[1] == "end":
sys.exit(0)
if plan is None:
print("Could not find plan; terminating.")
sys.exit(1)
sawyer = plan.params["sawyer"]
cmds = []
for t in range(plan.horizon):
rgrip = sawyer.right_gripper[0, t]
if true_mode.find("JOINT") >= 0:
act = np.r_[sawyer.right[:, t]]
else:
pos, euler = sawyer.right_ee_pos[:, t], sawyer.right_ee_rot[:, t]
quat = np.array(T.euler_to_quaternion(euler, "xyzw"))
# angle = robosuite.utils.transform_utils.quat2axisangle(quat)
rgrip = sawyer.right_gripper[0, t]
act = np.r_[pos, quat]
# act = np.r_[pos, angle, [-rgrip]]
# act = np.r_[sawyer.right[:,t], [-rgrip]]
cmds.append(act)
grip_ind = env.mjpy_model.site_name2id("gripper0_grip_site")
hand_ind = env.mjpy_model.body_name2id("robot0_right_hand")
env.reset()
env.sim.data.qpos[:7] = params["sawyer"].right[:, 0]
env.sim.data.qacc[:] = 0
env.sim.data.qvel[:] = 0
env.sim.forward()
rot_ref = T.euler_to_quaternion(params["sawyer"].right_ee_rot[:, 0], "xyzw")
for _ in range(40):
env.step(np.zeros(7))
env.sim.data.qpos[:7] = params["sawyer"].right[:, 0]
env.sim.forward()
nsteps = 60
cur_ind = 0
tol = 1e-3
true_lb, true_ub = plan.params["sawyer"].geom.get_joint_limits("right")
factor = (np.array(true_ub) - np.array(true_lb)) / 5
ref_jnts = env.sim.data.qpos[:7]
ref_jnts = np.array([0, -np.pi / 4, 0, np.pi / 4, 0, np.pi / 2, 0])
for act in plan.actions:
t = act.active_timesteps[0]
plan.params["sawyer"].right[:, t] = env.sim.data.qpos[:7]
grip = env.sim.data.qpos[7:9].copy()
failed_preds = plan.get_failed_preds(active_ts=(t, t), priority=3, tol=tol)
oldqfrc = env.sim.data.qfrc_applied[:]
oldxfrc = env.sim.data.xfrc_applied[:]
oldacc = env.sim.data.qacc[:]
oldvel = env.sim.data.qvel[:]
oldwarm = env.sim.data.qacc_warmstart[:]
oldctrl = env.sim.data.ctrl[:]
# failed_preds = [p for p in failed_preds if (p[1]._rollout or not type(p[1].expr) is EqExpr)]
print("FAILED:", t, failed_preds, act.name)
old_state = env.sim.get_state()
# env.sim.reset()
# env.sim.data.qpos[:7] = plan.params['sawyer'].right[:,t]
# env.sim.data.qpos[cereal_ind:cereal_ind+3] = plan.params['cereal'].pose[:,t]
# env.sim.data.qpos[cereal_ind+3:cereal_ind+7] = cereal_quat
# env.sim.data.qpos[7:9] = grip
# env.sim.data.qacc[:] = 0. #oldacc
# env.sim.data.qacc_warmstart[:] = 0.#oldwarm
# env.sim.data.qvel[:] = 0.
# env.sim.data.ctrl[:] = 0.#oldctrl
# env.sim.data.qfrc_applied[:] = 0.#oldqfrc
# env.sim.data.xfrc_applied[:] = 0.#oldxfrc
# env.sim.forward()
# env.sim.set_state(old_state)
# env.sim.forward()
sawyer = plan.params["sawyer"]
for t in range(act.active_timesteps[0], act.active_timesteps[1]):
base_act = cmds[cur_ind]
cur_ind += 1
print("TIME:", t)
init_jnts = env.sim.data.qpos[:7]
if ctrl_mode.find("JOINT") >= 0 and true_mode.find("JOINT") < 0:
cur_jnts = env.sim.data.qpos[:7]
if t < plan.horizon:
targ_pos, targ_rot = (
sawyer.right_ee_pos[:, t + 1],
sawyer.right_ee_rot[:, t + 1],
)
else:
targ_pos, targ_rot = (
sawyer.right_ee_pos[:, t],
sawyer.right_ee_rot[:, t],
)
lb = env.sim.data.qpos[:7] - factor
ub = env.sim.data.qpos[:7] + factor
sawyer.openrave_body.set_dof({"right": np.zeros(7)})
sawyer.openrave_body.set_dof({"right": ref_jnts})
targ_jnts = sawyer.openrave_body.get_ik_from_pose(
targ_pos, targ_rot, "right", bnds=(lb, ub)
)
base_act = np.r_[targ_jnts, base_act[-1]]
true_act = base_act.copy()
if ctrl_mode.find("JOINT") >= 0:
targ_jnts = base_act[:7] # + env.sim.data.qpos[:7]
for n in range(nsteps):
act = base_act.copy()
act[:7] = targ_jnts - env.sim.data.qpos[:7]
obs = env.step(act)
end_jnts = env.sim.data.qpos[:7]
ee_to_sim_discrepancy = (
env.sim.data.site_xpos[grip_ind] - sawyer.right_ee_pos[:, t]
)
print(
"EE PLAN VS SIM:",
ee_to_sim_discrepancy,
t,
)
# if ee_to_sim_discrepancy[2] > 0.01:
# from IPython import embed; embed()
# print('\n\n\n')
else:
targ = base_act[3:7]
cur = env.sim.data.body_xquat[hand_ind]
cur = np.array([cur[1], cur[2], cur[3], cur[0]])
truerot = Rotation.from_quat(targ)
currot = Rotation.from_quat(cur)
base_angle = (truerot * currot.inv()).as_rotvec()
# base_angle = robosuite.utils.transform_utils.get_orientation_error(sign*targ, cur)
rot = Rotation.from_rotvec(base_angle)
targrot = (rot * currot).as_quat()
# print('TARGETS:', targ, targrot)
for n in range(nsteps):
act = base_act.copy()
act[:3] -= env.sim.data.site_xpos[grip_ind]
# act[:3] *= 1e2
cur = env.sim.data.body_xquat[hand_ind]
cur = np.array([cur[1], cur[2], cur[3], cur[0]])
# targ = act[3:7]
sign = np.sign(targ[np.argmax(np.abs(targrot))])
cur_sign = np.sign(targ[np.argmax(np.abs(cur))])
targ = targrot
# if sign != cur_sign:
# sign = -1.
# else:
# sign = 1.
rotmult = 1e0 # 1e1
##angle = 5e2*theta_error(cur, targ) #robosuite.utils.transform_utils.get_orientation_error(sign*targ, cur)
# angle = robosuite.utils.transform_utils.get_orientation_error(sign*targ, cur)
# rot = Rotation.from_rotvec(angle)
# currot = Rotation.from_quat(cur)
angle = (
-rotmult
* sign
* cur_sign
* robosuite.utils.transform_utils.get_orientation_error(
sign * targrot, cur_sign * cur
)
)
# a = np.linalg.norm(angle)
# if a > 2*np.pi:
# angle = (a - 2*np.pi) * angle / a
act = np.r_[act[:3], angle, act[-1:]]
# act[3:6] -= robosuite.utils.transform_utils.quat2axisangle(cur)
# act[:7] = (act[:7] - np.array([env.sim.data.qpos[ind] for ind in sawyer_inds]))
obs = env.step(act)
print('EE PLAN VS SIM:', env.sim.data.site_xpos[grip_ind]-sawyer.right_ee_pos[:,t], t, env.reward())
if has_render: env.render()
plan.params['sawyer'].right[:,t] = env.sim.data.qpos[:7]
|
[] |
[] |
[
"DISPLAY"
] |
[]
|
["DISPLAY"]
|
python
| 1 | 0 | |
pkg/command/cf/tail.go
|
package cf
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"os"
"regexp"
"strings"
"text/template"
"time"
"unicode/utf8"
"code.cloudfoundry.org/cli/plugin"
logcache "code.cloudfoundry.org/go-log-cache"
logcache_v1 "code.cloudfoundry.org/go-log-cache/rpc/logcache_v1"
"code.cloudfoundry.org/go-loggregator/v8/rpc/loggregator_v2"
"github.com/blang/semver"
flags "github.com/jessevdk/go-flags"
)
const (
timeFormat = "2006-01-02T15:04:05.00-0700"
)
// Command is the interface to implement plugin commands
type Command func(ctx context.Context, cli plugin.CliConnection, args []string, c HTTPClient, log Logger, w io.Writer)
// Logger is used for outputting log-cache results and errors
type Logger interface {
Fatalf(format string, args ...interface{})
Printf(format string, args ...interface{})
}
// HTTPClient is the client used for HTTP requests
type HTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
type TailOption func(*tailOptions)
func WithTailNoHeaders() TailOption {
return func(o *tailOptions) {
o.noHeaders = true
}
}
// Tail will fetch the logs for a given application guid and write them to
// stdout.
func Tail(
ctx context.Context,
cli plugin.CliConnection,
args []string,
c HTTPClient,
log Logger,
w io.Writer,
opts ...TailOption,
) {
o, err := newTailOptions(cli, args, log)
if err != nil {
log.Fatalf("%s", err)
}
for _, opt := range opts {
opt(&o)
}
sourceID := o.guid
formatter := newFormatter(o.providedName, o.follow, formatterKindFromOptions(o), log, o.outputTemplate, o.newLineReplacer)
lw := lineWriter{w: w}
defer func() {
if value, ok := formatter.flush(); ok {
lw.Write(value)
}
}()
logCacheAddr := os.Getenv("LOG_CACHE_ADDR")
if logCacheAddr == "" {
hasAPI, err := cli.HasAPIEndpoint()
if err != nil {
log.Fatalf("%s", err)
}
if !hasAPI {
log.Fatalf("No API endpoint targeted.")
}
tokenURL, err := cli.ApiEndpoint()
if err != nil {
log.Fatalf("%s", err)
}
user, err := cli.Username()
if err != nil {
log.Fatalf("%s", err)
}
org, err := cli.GetCurrentOrg()
if err != nil {
log.Fatalf("%s", err)
}
space, err := cli.GetCurrentSpace()
if err != nil {
log.Fatalf("%s", err)
}
logCacheAddr = strings.Replace(tokenURL, "api", "log-cache", 1)
headerPrinter := formatter.appHeader
if o.isService {
headerPrinter = formatter.serviceHeader
}
if sourceID == "" {
// not an app or service, use generic header
headerPrinter = formatter.sourceHeader
}
if !o.noHeaders {
header, ok := headerPrinter(o.providedName, org.Name, space.Name, user)
if ok {
lw.Write(header)
lw.Write("")
}
}
}
filterAndFormat := func(e *loggregator_v2.Envelope) (string, bool) {
if !typeFilter(e, o) {
return "", false
}
return formatter.formatEnvelope(e)
}
tokenClient := &tokenHTTPClient{
c: c,
tokenFunc: func() string { return "" },
}
if strings.ToLower(os.Getenv("LOG_CACHE_SKIP_AUTH")) != "true" {
tokenClient.tokenFunc = func() string {
token, err := cli.AccessToken()
if err != nil {
log.Fatalf("Unable to get Access Token: %s", err)
}
return token
}
}
client := logcache.NewClient(logCacheAddr, logcache.WithHTTPClient(tokenClient))
checkFeatureVersioning(client, ctx, log, o.nameFilter)
if sourceID == "" {
// fall back to provided name
sourceID = o.providedName
}
walkStartTime := time.Now().Add(-5 * time.Second).UnixNano()
if o.lines > 0 {
envelopes, err := client.Read(
context.Background(),
sourceID,
o.startTime,
logcache.WithEndTime(o.endTime),
logcache.WithEnvelopeTypes(o.envelopeType),
logcache.WithLimit(o.lines),
logcache.WithDescending(),
logcache.WithNameFilter(o.nameFilter),
)
if err != nil && !o.follow {
log.Fatalf("%s", err)
}
// we get envelopes in descending order but want to print them ascending
for i := len(envelopes) - 1; i >= 0; i-- {
walkStartTime = envelopes[i].Timestamp + 1
if formatted, ok := filterAndFormat(envelopes[i]); ok {
lw.Write(formatted)
}
}
}
if o.follow {
logcache.Walk(
ctx,
sourceID,
logcache.Visitor(func(envelopes []*loggregator_v2.Envelope) bool {
for _, e := range envelopes {
if formatted, ok := filterAndFormat(e); ok {
lw.Write(formatted)
}
}
return true
}),
client.Read,
logcache.WithWalkStartTime(time.Unix(0, walkStartTime)),
logcache.WithWalkEnvelopeTypes(o.envelopeType),
logcache.WithWalkBackoff(logcache.NewAlwaysRetryBackoff(250*time.Millisecond)),
logcache.WithWalkNameFilter(o.nameFilter),
)
}
}
type lineWriter struct {
w io.Writer
}
func (w *lineWriter) Write(line string) error {
line = strings.TrimSuffix(line, "\n") + "\n"
_, err := w.w.Write([]byte(line))
return err
}
const (
envelopeClassAny envelopeClass = iota
envelopeClassMetric
envelopeClassLog
)
type envelopeClass int
type tailOptions struct {
startTime time.Time
endTime time.Time
envelopeType logcache_v1.EnvelopeType
envelopeClass envelopeClass
lines int
follow bool
guid string
isService bool
providedName string
outputTemplate *template.Template
jsonOutput bool
tokenRefreshInterval time.Duration
nameFilter string
noHeaders bool
newLineReplacer rune
}
type tailOptionFlags struct {
StartTime int64 `long:"start-time"`
EndTime int64 `long:"end-time"`
EnvelopeType string `long:"envelope-type" short:"t"`
Lines uint `long:"lines" short:"n" default:"10"`
Follow bool `long:"follow" short:"f"`
OutputFormat string `long:"output-format" short:"o"`
JSONOutput bool `long:"json"`
EnvelopeClass string `long:"envelope-class" short:"c"`
NewLine string `long:"new-line" optional:"true" optional-value:"\\u2028"`
NameFilter string `long:"name-filter"`
}
func newTailOptions(cli plugin.CliConnection, args []string, log Logger) (tailOptions, error) {
opts := tailOptionFlags{
EndTime: time.Now().UnixNano(),
}
args, err := flags.ParseArgs(&opts, args)
if err != nil {
return tailOptions{}, err
}
if len(args) != 1 {
return tailOptions{}, fmt.Errorf("Expected 1 argument, got %d.", len(args))
}
if opts.JSONOutput && opts.OutputFormat != "" {
return tailOptions{}, errors.New("Cannot use output-format and json flags together")
}
if opts.EnvelopeType != "" && opts.EnvelopeClass != "" {
return tailOptions{}, errors.New("--envelope-type cannot be used with --envelope-class")
}
if opts.EnvelopeClass != "" {
opts.EnvelopeType = "ANY"
}
var outputTemplate *template.Template
if opts.OutputFormat != "" {
outputTemplate, err = parseOutputFormat(opts.OutputFormat)
if err != nil {
log.Fatalf("%s", err)
}
}
id, isService := getGUID(args[0], cli, log)
o := tailOptions{
startTime: time.Unix(0, opts.StartTime),
endTime: time.Unix(0, opts.EndTime),
envelopeType: translateEnvelopeType(opts.EnvelopeType, log),
lines: int(opts.Lines),
guid: id,
isService: isService,
providedName: args[0],
follow: opts.Follow,
outputTemplate: outputTemplate,
jsonOutput: opts.JSONOutput,
tokenRefreshInterval: 5 * time.Minute,
nameFilter: opts.NameFilter,
envelopeClass: toEnvelopeClass(opts.EnvelopeClass),
}
if opts.NewLine != "" {
o.newLineReplacer, err = parseNewLineArgument(opts.NewLine)
if err != nil {
log.Fatalf("%s", err)
}
}
return o, o.validate()
}
func toEnvelopeClass(class string) envelopeClass {
switch strings.ToUpper(class) {
case "METRICS":
return envelopeClassMetric
case "LOGS":
return envelopeClassLog
case "ANY":
return envelopeClassAny
default:
return envelopeClassAny
}
}
func formatterKindFromOptions(o tailOptions) formatterKind {
if o.jsonOutput {
return jsonFormat
}
if o.outputTemplate != nil {
return templateFormat
}
return prettyFormat
}
func typeFilter(e *loggregator_v2.Envelope, o tailOptions) bool {
if o.envelopeClass == envelopeClassAny {
return true
}
switch e.Message.(type) {
case *loggregator_v2.Envelope_Counter, *loggregator_v2.Envelope_Gauge, *loggregator_v2.Envelope_Timer:
return o.envelopeClass == envelopeClassMetric
case *loggregator_v2.Envelope_Log, *loggregator_v2.Envelope_Event:
return o.envelopeClass == envelopeClassLog
}
return false
}
func (o tailOptions) validate() error {
if o.startTime.After(o.endTime) && o.endTime != time.Unix(0, 0) {
return errors.New("Invalid date/time range. Ensure your start time is prior or equal the end time.")
}
if o.lines > 1000 || o.lines < 0 {
return errors.New("Lines cannot be greater than 1000.")
}
_, err := regexp.Compile(o.nameFilter)
if err != nil {
return errors.New(fmt.Sprintf("Invalid name filter '%s'. Ensure your name-filter is a valid regex.", o.nameFilter))
}
return nil
}
func parseOutputFormat(f string) (*template.Template, error) {
templ := template.New("OutputFormat")
_, err := templ.Parse(f)
if err != nil {
return nil, err
}
return templ, nil
}
func translateEnvelopeType(t string, log Logger) logcache_v1.EnvelopeType {
t = strings.ToUpper(t)
switch t {
case "ANY", "":
return logcache_v1.EnvelopeType_ANY
case "LOG":
return logcache_v1.EnvelopeType_LOG
case "COUNTER":
return logcache_v1.EnvelopeType_COUNTER
case "GAUGE":
return logcache_v1.EnvelopeType_GAUGE
case "TIMER":
return logcache_v1.EnvelopeType_TIMER
case "EVENT":
return logcache_v1.EnvelopeType_EVENT
default:
log.Fatalf("--envelope-type must be LOG, COUNTER, GAUGE, TIMER, EVENT or ANY")
// Won't get here, but log.Fatalf isn't obvious to the compiler that
// execution will halt.
return logcache_v1.EnvelopeType_ANY
}
}
func getGUID(name string, cli plugin.CliConnection, log Logger) (string, bool) {
var id string
if id = getAppGUID(name, cli, log); id == "" {
return getServiceGUID(name, cli, log), true
}
return id, false
}
func getAppGUID(appName string, cli plugin.CliConnection, log Logger) string {
r, err := cli.CliCommandWithoutTerminalOutput(
"app",
appName,
"--guid",
)
if err != nil {
if err.Error() != "App "+appName+" not found" {
log.Printf("%s", err)
}
return ""
}
return strings.Join(r, "")
}
func getServiceGUID(serviceName string, cli plugin.CliConnection, log Logger) string {
r, err := cli.CliCommandWithoutTerminalOutput(
"service",
serviceName,
"--guid",
)
if err != nil {
if err.Error() != "Service instance "+serviceName+" not found" {
log.Printf("%s", err)
}
return ""
}
return strings.Join(r, "")
}
func parseNewLineArgument(s string) (rune, error) {
if strings.TrimSpace(s) == "" {
return '\u2028', nil
}
if utf8.RuneCountInString(s) == 1 {
r, _ := utf8.DecodeRuneInString(s)
return r, nil
}
s = strings.ToLower(s)
if strings.HasPrefix(s, "\\u") {
var r rune
_, err := fmt.Sscanf(s, "\\u%x", &r)
if err != nil {
return 0, err
}
return r, nil
}
return 0, errors.New("--new-line argument must be single unicode character or in the format \\uXXXXX")
}
func checkFeatureVersioning(client *logcache.Client, ctx context.Context, log Logger, nameFilter string) {
version, _ := client.LogCacheVersion(ctx)
if nameFilter != "" {
nameFilterVersion, _ := semver.Parse("2.1.0")
if version.LT(nameFilterVersion) {
log.Fatalf("Use of --name-filter requires minimum log-cache version 2.1.0")
}
}
}
type backoff struct {
logcache.AlwaysDoneBackoff
logger Logger
}
func newBackoff(log Logger) backoff {
return backoff{logger: log}
}
func (b backoff) OnErr(err error) bool {
b.logger.Fatalf("%s", err)
return b.AlwaysDoneBackoff.OnErr(err)
}
type tokenHTTPClient struct {
c HTTPClient
tokenFunc func() string
}
func (c *tokenHTTPClient) Do(req *http.Request) (*http.Response, error) {
accessToken := c.tokenFunc()
if len(accessToken) > 0 {
req.Header.Set("Authorization", accessToken)
}
return c.c.Do(req)
}
|
[
"\"LOG_CACHE_ADDR\"",
"\"LOG_CACHE_SKIP_AUTH\""
] |
[] |
[
"LOG_CACHE_SKIP_AUTH",
"LOG_CACHE_ADDR"
] |
[]
|
["LOG_CACHE_SKIP_AUTH", "LOG_CACHE_ADDR"]
|
go
| 2 | 0 | |
cairis/test/test_GoalAPI.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from urllib import quote
import jsonpickle
from cairis.core.Goal import Goal
from cairis.core.GoalEnvironmentProperties import GoalEnvironmentProperties
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
import os
from cairis.mio.ModelImport import importModelFile
__author__ = 'Robin Quetin, Shamal Faily'
class GoalAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
def setUp(self):
# region Class fields
self.logger = logging.getLogger(__name__)
self.existing_goal_name = 'Multi-Factor Authentication'
self.existing_category = 'Maintain'
self.existing_environment_name_1 = 'Stroke'
self.existing_environment_name_2 = 'Psychosis'
self.goal_class = Goal.__module__+'.'+Goal.__name__
self.to_delete_ids = []
# endregion
def test_get_all(self):
method = 'test_get_all'
rv = self.app.get('/api/goals?session_id=test')
goals = jsonpickle.decode(rv.data)
self.assertIsNotNone(goals, 'No results after deserialization')
self.assertIsInstance(goals, dict, 'The result is not a dictionary as expected')
self.assertGreater(len(goals), 0, 'No goals in the dictionary')
self.logger.info('[%s] Goals found: %d', method, len(goals))
goal = goals.values()[0]
self.logger.info('[%s] First goal: %s [%d]\n', method, goal['theName'], goal['theId'])
def test_get_by_name(self):
method = 'test_get_by_name'
url = '/api/goals/name/%s?session_id=test' % quote(self.existing_goal_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
self.logger.debug('[%s] Response data: %s', method, rv.data)
goal = jsonpickle.decode(rv.data)
self.assertIsNotNone(goal, 'No results after deserialization')
self.logger.info('[%s] Goal: %s [%d]\n', method, goal['theName'], goal['theId'])
def test_delete(self):
method = 'test_delete'
url = '/api/goals/name/%s?session_id=test' % quote(self.prepare_new_goal().theName)
new_goal_body = self.prepare_json()
self.app.delete(url)
self.logger.info('[%s] Object to delete: %s', method, new_goal_body)
self.app.post('/api/goals', content_type='application/json', data=new_goal_body)
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.delete(url)
self.logger.info('[%s] Response data: %s', method, rv.data)
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(rv.data)
self.assertIsInstance(json_resp, dict, 'The response cannot be converted to a dictionary')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s\n', method, message)
def test_post(self):
method = 'test_post'
url = '/api/goals'
self.logger.info('[%s] URL: %s', method, url)
new_goal_body = self.prepare_json()
self.app.delete('/api/goals/name/%s?session_id=test' % quote(self.prepare_new_goal().theName))
rv = self.app.post(url, content_type='application/json', data=new_goal_body)
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
env_id = json_resp.get('goal_id', None)
self.assertIsNotNone(env_id, 'No goal ID returned')
self.assertGreater(env_id, 0, 'Invalid goal ID returned [%d]' % env_id)
self.logger.info('[%s] Goal ID: %d\n', method, env_id)
rv = self.app.delete('/api/goals/name/%s?session_id=test' % quote(self.prepare_new_goal().theName))
def test_put(self):
method = 'test_put'
url = '/api/goals'
self.logger.info('[%s] URL: %s', method, url)
new_goal_body = self.prepare_json()
rv = self.app.delete('/api/goals/name/%s?session_id=test' % quote(self.prepare_new_goal().theName))
rv = self.app.post(url, content_type='application/json', data=new_goal_body)
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
env_id = json_resp.get('goal_id', None)
self.assertIsNotNone(env_id, 'No goal ID returned')
self.assertGreater(env_id, 0, 'Invalid goal ID returned [%d]' % env_id)
self.logger.info('[%s] Goal ID: %d', method, env_id)
goal_to_update = self.prepare_new_goal()
goal_to_update.theName = 'Edited test goal'
goal_to_update.theId = env_id
upd_env_body = self.prepare_json(goal=goal_to_update)
rv = self.app.put('/api/goals/name/%s?session_id=test' % quote(self.prepare_new_goal().theName), data=upd_env_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp)
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s', method, message)
self.assertGreater(message.find('successfully updated'), -1, 'The goal was not successfully updated')
rv = self.app.get('/api/goals/name/%s?session_id=test' % quote(goal_to_update.theName))
upd_goal = jsonpickle.decode(rv.data)
self.assertIsNotNone(upd_goal, 'Unable to decode JSON data')
self.logger.debug('[%s] Response data: %s', method, rv.data)
self.logger.info('[%s] Goal: %s [%d]\n', method, upd_goal['theName'], upd_goal['theId'])
rv = self.app.delete('/api/goals/name/%s?session_id=test' % quote(goal_to_update.theName))
def prepare_new_goal(self):
new_goal_refinements = [
[
"PreventUnauthorised Certificate Access",
"goal",
"or",
"No",
"None"
]
]
new_subgoal_refinements = [
[
"PreventUnauthorised Certificate Access",
"goal",
"or",
"No",
"None"
]
]
new_goal_props = [
GoalEnvironmentProperties(
environmentName=self.existing_environment_name_1,
lbl='Test 1',
definition='This is a first test property',
category=self.existing_category,
priority='Medium',
fitCriterion='None',
issue='None',
goalRefinements=new_goal_refinements,
subGoalRefinements=new_subgoal_refinements,
concs=[],cas=[]
),
GoalEnvironmentProperties(
environmentName=self.existing_environment_name_2,
lbl='Test 2',
definition='This is a second test property',
category=self.existing_category,
priority='Low',
fitCriterion='None',
issue='Test issue',
goalRefinements=new_goal_refinements,
subGoalRefinements=new_subgoal_refinements,
concs=[],cas=[]
)
]
new_goal = Goal(
goalId=-1,
goalName='Test goal',
goalOrig='',
tags=['test', 'test123'],
environmentProperties=[]
)
new_goal.theEnvironmentProperties = new_goal_props
new_goal.theEnvironmentDictionary = {}
new_goal.theGoalPropertyDictionary = {}
delattr(new_goal, 'theEnvironmentDictionary')
delattr(new_goal, 'theGoalPropertyDictionary')
return new_goal
def prepare_dict(self, goal=None):
if goal is None:
goal = self.prepare_new_goal()
else:
assert isinstance(goal, Goal)
return {
'session_id': 'test',
'object': goal,
}
def prepare_json(self, data_dict=None, goal=None):
if data_dict is None:
data_dict = self.prepare_dict(goal=goal)
else:
assert isinstance(data_dict, dict)
new_goal_body = jsonpickle.encode(data_dict, unpicklable=False)
self.logger.info('JSON data: %s', new_goal_body)
return new_goal_body
|
[] |
[] |
[
"CAIRIS_SRC"
] |
[]
|
["CAIRIS_SRC"]
|
python
| 1 | 0 | |
test/utils.go
|
//go:build integration
// +build integration
/*
Copyright 2020 Adevinta
*/
package test
import (
"errors"
"fmt"
"os"
"os/exec"
"path"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" // postgres driver
)
var (
dbPort = "5432" //default port, can be overriden by TEST_DB_PORT environment variable
defaultFlywayVersion = "8"
)
const (
// DB Conn. str template.
dbConnStrFmt = "host=%s port=%s user=%s password=%s dbname=%s sslmode=%s"
// Test DB config.
dbHost = "127.0.0.1"
dbUser = "vulndb_test"
dbPass = "vulndb_test"
dbName = "vulndb_test"
dbSSLMode = "disable"
// Relative db dir path.
dbDirPath = "../db"
// Flyway commands.
flywayClean = "clean"
flywayMigrate = "migrate"
// Query statements.
sourcesStmt = "SELECT id as source_id, * FROM sources WHERE instance = :instance"
targetsStmt = "SELECT id as target_id, * FROM targets WHERE identifier = :identifier"
issuesStmt = "SELECT id as issue_id, * FROM issues WHERE summary = :summary AND description = :description"
findingsStmt = "SELECT id as finding_id, * FROM findings WHERE issue_id = :issue_id AND target_id = :target_id and affected_resource = :affected_resource"
fEventsStmt = "SELECT * FROM finding_events WHERE finding_id = :finding_id AND time = :time"
fExposuresStmt = "SELECT * FROM finding_exposures WHERE finding_id = :finding_id AND found_at = :found_at"
)
func init() {
if envDBPort := os.Getenv("TEST_DB_PORT"); envDBPort != "" {
dbPort = envDBPort
}
}
// dbConnStr returns the connection
// str for postgres test db.
func dbConnStr() string {
return fmt.Sprintf(dbConnStrFmt, dbHost, dbPort, dbUser, dbPass, dbName, dbSSLMode)
}
// db returns a new sqlx DB.
func db() (*sqlx.DB, error) {
return sqlx.Connect("postgres", dbConnStr())
}
// resetDB cleans the current vulndb database
// in the test postgres container and loads the
// initial schema again.
func resetDB() error {
return runFlywayCmd(dbDirPath, flywayClean, flywayMigrate)
}
// runFlywayCmd runs the specified flyway cmd against the
// test postgres container.
func runFlywayCmd(dbDirPath string, flywayCommand ...string) error {
wd, err := os.Getwd()
if err != nil {
return err
}
dir := path.Join(wd, dbDirPath)
flywayVersion := defaultFlywayVersion
if value, ok := os.LookupEnv("FLYWAY_VERSION"); ok {
flywayVersion = value
}
cmdName := "docker"
cmdArgs := []string{
"run",
"--net=host",
"--rm",
"-v",
dir + ":/flyway/sql",
"flyway/flyway:" + flywayVersion + "-alpine",
"-q",
"-community",
"-user=" + dbUser,
"-password=" + dbPass,
"-url=jdbc:postgresql://" + dbHost + ":" + dbPort + "/" + dbName,
"-baselineOnMigrate=true",
}
cmdArgs = append(cmdArgs, flywayCommand...)
cmd := exec.Command(cmdName, cmdArgs...)
cmd.Env = os.Environ()
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Error executing flyway command, command output:\n%s.\n Error:\n %s", output, err)
}
return nil
}
// fetchData retrieves data from test db.
// Because DB IDs are autogenerated, we can not query by them,
// so we have to retrieve data based on other attributes.
// E.g.: Issue{summary, description}, target{identifier}, etc.
func fetchDBData(table string, args map[string]interface{}, db *sqlx.DB) (map[string]interface{}, error) {
var stmt string
switch table {
case sourcesTable:
stmt = sourcesStmt
case targetsTable:
stmt = targetsStmt
case issuesTable:
stmt = issuesStmt
case findingsTable:
stmt = findingsStmt
case fEventsTable:
stmt = fEventsStmt
case fExposuresTable:
stmt = fExposuresStmt
}
if stmt == "" {
return nil, errors.New("Table not supported")
}
nstmt, err := db.PrepareNamed(stmt)
rows, err := nstmt.Queryx(args)
if err != nil {
return nil, err
}
data := make(map[string]interface{})
if rows.Next() {
if err = rows.MapScan(data); err != nil {
return nil, err
}
}
if rows.Next() {
return nil, errors.New(fmt.Sprintf("Multiple results. Table %v. Args %v. Stmt %v", table, args, stmt))
}
return data, nil
}
|
[
"\"TEST_DB_PORT\""
] |
[] |
[
"TEST_DB_PORT"
] |
[]
|
["TEST_DB_PORT"]
|
go
| 1 | 0 | |
src/runtime/gc_test.go
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"fmt"
"os"
"reflect"
"runtime"
"runtime/debug"
"sync"
"sync/atomic"
"testing"
"time"
"unsafe"
)
func TestGcSys(t *testing.T) {
if os.Getenv("GOGC") == "off" {
t.Skip("skipping test; GOGC=off in environment")
}
if runtime.GOOS == "windows" {
t.Skip("skipping test; GOOS=windows http://golang.org/issue/27156")
}
if runtime.GOOS == "linux" && runtime.GOARCH == "arm64" {
t.Skip("skipping test; GOOS=linux GOARCH=arm64 https://github.com/golang/go/issues/27636")
}
got := runTestProg(t, "testprog", "GCSys")
want := "OK\n"
if got != want {
t.Fatalf("expected %q, but got %q", want, got)
}
}
func TestGcDeepNesting(t *testing.T) {
type T [2][2][2][2][2][2][2][2][2][2]*int
a := new(T)
// Prevent the compiler from applying escape analysis.
// This makes sure new(T) is allocated on heap, not on the stack.
t.Logf("%p", a)
a[0][0][0][0][0][0][0][0][0][0] = new(int)
*a[0][0][0][0][0][0][0][0][0][0] = 13
runtime.GC()
if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
t.Fail()
}
}
func TestGcMapIndirection(t *testing.T) {
defer debug.SetGCPercent(debug.SetGCPercent(1))
runtime.GC()
type T struct {
a [256]int
}
m := make(map[T]T)
for i := 0; i < 2000; i++ {
var a T
a.a[0] = i
m[a] = T{}
}
}
func TestGcArraySlice(t *testing.T) {
type X struct {
buf [1]byte
nextbuf []byte
next *X
}
var head *X
for i := 0; i < 10; i++ {
p := &X{}
p.buf[0] = 42
p.next = head
if head != nil {
p.nextbuf = head.buf[:]
}
head = p
runtime.GC()
}
for p := head; p != nil; p = p.next {
if p.buf[0] != 42 {
t.Fatal("corrupted heap")
}
}
}
func TestGcRescan(t *testing.T) {
type X struct {
c chan error
nextx *X
}
type Y struct {
X
nexty *Y
p *int
}
var head *Y
for i := 0; i < 10; i++ {
p := &Y{}
p.c = make(chan error)
if head != nil {
p.nextx = &head.X
}
p.nexty = head
p.p = new(int)
*p.p = 42
head = p
runtime.GC()
}
for p := head; p != nil; p = p.nexty {
if *p.p != 42 {
t.Fatal("corrupted heap")
}
}
}
func TestGcLastTime(t *testing.T) {
ms := new(runtime.MemStats)
t0 := time.Now().UnixNano()
runtime.GC()
t1 := time.Now().UnixNano()
runtime.ReadMemStats(ms)
last := int64(ms.LastGC)
if t0 > last || last > t1 {
t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
}
pause := ms.PauseNs[(ms.NumGC+255)%256]
// Due to timer granularity, pause can actually be 0 on windows
// or on virtualized environments.
if pause == 0 {
t.Logf("last GC pause was 0")
} else if pause > 10e9 {
t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
}
}
var hugeSink interface{}
func TestHugeGCInfo(t *testing.T) {
// The test ensures that compiler can chew these huge types even on weakest machines.
// The types are not allocated at runtime.
if hugeSink != nil {
// 400MB on 32 bots, 4TB on 64-bits.
const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
hugeSink = new([n]*byte)
hugeSink = new([n]uintptr)
hugeSink = new(struct {
x float64
y [n]*byte
z []string
})
hugeSink = new(struct {
x float64
y [n]uintptr
z []string
})
}
}
func TestPeriodicGC(t *testing.T) {
if runtime.GOARCH == "wasm" {
t.Skip("no sysmon on wasm yet")
}
// Make sure we're not in the middle of a GC.
runtime.GC()
var ms1, ms2 runtime.MemStats
runtime.ReadMemStats(&ms1)
// Make periodic GC run continuously.
orig := *runtime.ForceGCPeriod
*runtime.ForceGCPeriod = 0
// Let some periodic GCs happen. In a heavily loaded system,
// it's possible these will be delayed, so this is designed to
// succeed quickly if things are working, but to give it some
// slack if things are slow.
var numGCs uint32
const want = 2
for i := 0; i < 200 && numGCs < want; i++ {
time.Sleep(5 * time.Millisecond)
// Test that periodic GC actually happened.
runtime.ReadMemStats(&ms2)
numGCs = ms2.NumGC - ms1.NumGC
}
*runtime.ForceGCPeriod = orig
if numGCs < want {
t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
}
}
func BenchmarkSetTypePtr(b *testing.B) {
benchSetType(b, new(*byte))
}
func BenchmarkSetTypePtr8(b *testing.B) {
benchSetType(b, new([8]*byte))
}
func BenchmarkSetTypePtr16(b *testing.B) {
benchSetType(b, new([16]*byte))
}
func BenchmarkSetTypePtr32(b *testing.B) {
benchSetType(b, new([32]*byte))
}
func BenchmarkSetTypePtr64(b *testing.B) {
benchSetType(b, new([64]*byte))
}
func BenchmarkSetTypePtr126(b *testing.B) {
benchSetType(b, new([126]*byte))
}
func BenchmarkSetTypePtr128(b *testing.B) {
benchSetType(b, new([128]*byte))
}
func BenchmarkSetTypePtrSlice(b *testing.B) {
benchSetType(b, make([]*byte, 1<<10))
}
type Node1 struct {
Value [1]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode1(b *testing.B) {
benchSetType(b, new(Node1))
}
func BenchmarkSetTypeNode1Slice(b *testing.B) {
benchSetType(b, make([]Node1, 32))
}
type Node8 struct {
Value [8]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode8(b *testing.B) {
benchSetType(b, new(Node8))
}
func BenchmarkSetTypeNode8Slice(b *testing.B) {
benchSetType(b, make([]Node8, 32))
}
type Node64 struct {
Value [64]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode64(b *testing.B) {
benchSetType(b, new(Node64))
}
func BenchmarkSetTypeNode64Slice(b *testing.B) {
benchSetType(b, make([]Node64, 32))
}
type Node64Dead struct {
Left, Right *byte
Value [64]uintptr
}
func BenchmarkSetTypeNode64Dead(b *testing.B) {
benchSetType(b, new(Node64Dead))
}
func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
benchSetType(b, make([]Node64Dead, 32))
}
type Node124 struct {
Value [124]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode124(b *testing.B) {
benchSetType(b, new(Node124))
}
func BenchmarkSetTypeNode124Slice(b *testing.B) {
benchSetType(b, make([]Node124, 32))
}
type Node126 struct {
Value [126]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode126(b *testing.B) {
benchSetType(b, new(Node126))
}
func BenchmarkSetTypeNode126Slice(b *testing.B) {
benchSetType(b, make([]Node126, 32))
}
type Node128 struct {
Value [128]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode128(b *testing.B) {
benchSetType(b, new(Node128))
}
func BenchmarkSetTypeNode128Slice(b *testing.B) {
benchSetType(b, make([]Node128, 32))
}
type Node130 struct {
Value [130]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode130(b *testing.B) {
benchSetType(b, new(Node130))
}
func BenchmarkSetTypeNode130Slice(b *testing.B) {
benchSetType(b, make([]Node130, 32))
}
type Node1024 struct {
Value [1024]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode1024(b *testing.B) {
benchSetType(b, new(Node1024))
}
func BenchmarkSetTypeNode1024Slice(b *testing.B) {
benchSetType(b, make([]Node1024, 32))
}
func benchSetType(b *testing.B, x interface{}) {
v := reflect.ValueOf(x)
t := v.Type()
switch t.Kind() {
case reflect.Ptr:
b.SetBytes(int64(t.Elem().Size()))
case reflect.Slice:
b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
}
b.ResetTimer()
runtime.BenchSetType(b.N, x)
}
func BenchmarkAllocation(b *testing.B) {
type T struct {
x, y *byte
}
ngo := runtime.GOMAXPROCS(0)
work := make(chan bool, b.N+ngo)
result := make(chan *T)
for i := 0; i < b.N; i++ {
work <- true
}
for i := 0; i < ngo; i++ {
work <- false
}
for i := 0; i < ngo; i++ {
go func() {
var x *T
for <-work {
for i := 0; i < 1000; i++ {
x = &T{}
}
}
result <- x
}()
}
for i := 0; i < ngo; i++ {
<-result
}
}
func TestPrintGC(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
done := make(chan bool)
go func() {
for {
select {
case <-done:
return
default:
runtime.GC()
}
}
}()
for i := 0; i < 1e4; i++ {
func() {
defer print("")
}()
}
close(done)
}
func testTypeSwitch(x interface{}) error {
switch y := x.(type) {
case nil:
// ok
case error:
return y
}
return nil
}
func testAssert(x interface{}) error {
if y, ok := x.(error); ok {
return y
}
return nil
}
func testAssertVar(x interface{}) error {
var y, ok = x.(error)
if ok {
return y
}
return nil
}
var a bool
//go:noinline
func testIfaceEqual(x interface{}) {
if x == "abc" {
a = true
}
}
func TestPageAccounting(t *testing.T) {
// Grow the heap in small increments. This used to drop the
// pages-in-use count below zero because of a rounding
// mismatch (golang.org/issue/15022).
const blockSize = 64 << 10
blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
for i := range blocks {
blocks[i] = new([blockSize]byte)
}
// Check that the running page count matches reality.
pagesInUse, counted := runtime.CountPagesInUse()
if pagesInUse != counted {
t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
}
}
func TestReadMemStats(t *testing.T) {
base, slow := runtime.ReadMemStatsSlow()
if base != slow {
logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
t.Fatal("memstats mismatch")
}
}
func TestUnscavHugePages(t *testing.T) {
// Allocate 20 MiB and immediately free it a few times to increase
// the chance that unscavHugePages isn't zero and that some kind of
// accounting had to happen in the runtime.
for j := 0; j < 3; j++ {
var large [][]byte
for i := 0; i < 5; i++ {
large = append(large, make([]byte, runtime.PhysHugePageSize))
}
runtime.KeepAlive(large)
runtime.GC()
}
base, slow := runtime.UnscavHugePagesSlow()
if base != slow {
logDiff(t, "unscavHugePages", reflect.ValueOf(base), reflect.ValueOf(slow))
t.Fatal("unscavHugePages mismatch")
}
}
func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
typ := got.Type()
switch typ.Kind() {
case reflect.Array, reflect.Slice:
if got.Len() != want.Len() {
t.Logf("len(%s): got %v, want %v", prefix, got, want)
return
}
for i := 0; i < got.Len(); i++ {
logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
}
case reflect.Struct:
for i := 0; i < typ.NumField(); i++ {
gf, wf := got.Field(i), want.Field(i)
logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
}
case reflect.Map:
t.Fatal("not implemented: logDiff for map")
default:
if got.Interface() != want.Interface() {
t.Logf("%s: got %v, want %v", prefix, got, want)
}
}
}
func BenchmarkReadMemStats(b *testing.B) {
var ms runtime.MemStats
const heapSize = 100 << 20
x := make([]*[1024]byte, heapSize/1024)
for i := range x {
x[i] = new([1024]byte)
}
hugeSink = x
b.ResetTimer()
for i := 0; i < b.N; i++ {
runtime.ReadMemStats(&ms)
}
hugeSink = nil
}
func TestUserForcedGC(t *testing.T) {
// Test that runtime.GC() triggers a GC even if GOGC=off.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
var ms1, ms2 runtime.MemStats
runtime.ReadMemStats(&ms1)
runtime.GC()
runtime.ReadMemStats(&ms2)
if ms1.NumGC == ms2.NumGC {
t.Fatalf("runtime.GC() did not trigger GC")
}
if ms1.NumForcedGC == ms2.NumForcedGC {
t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
}
}
func writeBarrierBenchmark(b *testing.B, f func()) {
runtime.GC()
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
//b.Logf("heap size: %d MB", ms.HeapAlloc>>20)
// Keep GC running continuously during the benchmark, which in
// turn keeps the write barrier on continuously.
var stop uint32
done := make(chan bool)
go func() {
for atomic.LoadUint32(&stop) == 0 {
runtime.GC()
}
close(done)
}()
defer func() {
atomic.StoreUint32(&stop, 1)
<-done
}()
b.ResetTimer()
f()
b.StopTimer()
}
func BenchmarkWriteBarrier(b *testing.B) {
if runtime.GOMAXPROCS(-1) < 2 {
// We don't want GC to take our time.
b.Skip("need GOMAXPROCS >= 2")
}
// Construct a large tree both so the GC runs for a while and
// so we have a data structure to manipulate the pointers of.
type node struct {
l, r *node
}
var wbRoots []*node
var mkTree func(level int) *node
mkTree = func(level int) *node {
if level == 0 {
return nil
}
n := &node{mkTree(level - 1), mkTree(level - 1)}
if level == 10 {
// Seed GC with enough early pointers so it
// doesn't start termination barriers when it
// only has the top of the tree.
wbRoots = append(wbRoots, n)
}
return n
}
const depth = 22 // 64 MB
root := mkTree(22)
writeBarrierBenchmark(b, func() {
var stack [depth]*node
tos := -1
// There are two write barriers per iteration, so i+=2.
for i := 0; i < b.N; i += 2 {
if tos == -1 {
stack[0] = root
tos = 0
}
// Perform one step of reversing the tree.
n := stack[tos]
if n.l == nil {
tos--
} else {
n.l, n.r = n.r, n.l
stack[tos] = n.l
stack[tos+1] = n.r
tos++
}
if i%(1<<12) == 0 {
// Avoid non-preemptible loops (see issue #10958).
runtime.Gosched()
}
}
})
runtime.KeepAlive(wbRoots)
}
func BenchmarkBulkWriteBarrier(b *testing.B) {
if runtime.GOMAXPROCS(-1) < 2 {
// We don't want GC to take our time.
b.Skip("need GOMAXPROCS >= 2")
}
// Construct a large set of objects we can copy around.
const heapSize = 64 << 20
type obj [16]*byte
ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
for i := range ptrs {
ptrs[i] = new(obj)
}
writeBarrierBenchmark(b, func() {
const blockSize = 1024
var pos int
for i := 0; i < b.N; i += blockSize {
// Rotate block.
block := ptrs[pos : pos+blockSize]
first := block[0]
copy(block, block[1:])
block[blockSize-1] = first
pos += blockSize
if pos+blockSize > len(ptrs) {
pos = 0
}
runtime.Gosched()
}
})
runtime.KeepAlive(ptrs)
}
func BenchmarkScanStackNoLocals(b *testing.B) {
var ready sync.WaitGroup
teardown := make(chan bool)
for j := 0; j < 10; j++ {
ready.Add(1)
go func() {
x := 100000
countpwg(&x, &ready, teardown)
}()
}
ready.Wait()
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StartTimer()
runtime.GC()
runtime.GC()
b.StopTimer()
}
close(teardown)
}
func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
if *n == 0 {
ready.Done()
<-teardown
return
}
*n--
countpwg(n, ready, teardown)
}
|
[
"\"GOGC\""
] |
[] |
[
"GOGC"
] |
[]
|
["GOGC"]
|
go
| 1 | 0 | |
PhysicsTools/HeppyCore/python/utils/dataset.py
|
#!/usr/bin/env python
import os
import pprint
import re
import pickle
import sys
from castorBaseDir import castorBaseDir
import eostools as castortools
import fnmatch
import six
class IntegrityCheckError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class BaseDataset( object ):
### def __init__(self, name, user, pattern='.*root', run_range=None):
def __init__(self, name, user, pattern='.*root', run_range=None, dbsInstance=None):
self.name = name
self.user = user
self.pattern = pattern
self.run_range = run_range
### MM
self.dbsInstance = dbsInstance
### MM
self.primaryDatasetEntries = -1
self.report = None
self.buildListOfFiles( self.pattern )
self.extractFileSizes()
self.buildListOfBadFiles()
self.primaryDatasetEntries = self.getPrimaryDatasetEntries()
def buildListOfFiles( self, pattern ):
self.files = []
def extractFileSizes(self):
'''Get the file size for each file,
from the eos ls -l command.'''
self.filesAndSizes = {}
def buildListOfBadFiles(self):
self.good_files = []
self.bad_files = {}
def printInfo(self):
print 'sample : ' + self.name
print 'user : ' + self.user
def getPrimaryDatasetEntries(self):
return self.primaryDatasetEntries
def printFiles(self, abspath=True, info=True):
# import pdb; pdb.set_trace()
if self.files == None:
self.buildListOfFiles(self.pattern)
for file in self.files:
status = 'OK'
if file in self.bad_files:
status = self.bad_files[file]
elif file not in self.good_files:
status = 'UNKNOWN'
fileNameToPrint = file
if abspath == False:
fileNameToPrint = os.path.basename(file)
if info:
size=self.filesAndSizes.get(file,'UNKNOWN').rjust(10)
# if size is not None:
# size = size.rjust(10)
print status.ljust(10), size, \
'\t', fileNameToPrint
else:
print fileNameToPrint
print 'PrimaryDatasetEntries: %d' % self.primaryDatasetEntries
def listOfFiles(self):
'''Returns all files, even the bad ones.'''
return self.files
def listOfGoodFiles(self):
'''Returns all files flagged as good in the integrity
check text output, or not present in this file, are
considered as good.'''
self.good_files = []
for file in self.files:
if file not in self.bad_files:
self.good_files.append( file )
return self.good_files
def listOfGoodFilesWithPrescale(self, prescale):
"""Takes the list of good files and selects a random sample
from them according to the prescale factor.
E.g. a prescale of 10 will select 1 in 10 files."""
good_files = self.listOfGoodFiles()
if prescale < 2:
return self.good_files
#the number of files to select from the dataset
num_files = int( (len(good_files)/(1.0*prescale)) + 0.5)
if num_files < 1:
num_files = 1
if num_files > len(good_files):
num_files = len(good_files)
#pick unique good files randomly
import random
subset = set()
while len(subset) < num_files:
#pick a random file from the list
choice = random.choice(good_files)
slen = len(subset)
#add to the set
subset.add(choice)
#if this was a unique file remove so we don't get
#very slow corner cases where prescale is small
if len(subset) > slen:
good_files.remove(choice)
assert len(subset)==num_files,'The number of files does not match'
return [f for f in subset]
class CMSDataset( BaseDataset ):
def __init__(self, name, run_range = None):
super(CMSDataset, self).__init__( name, 'CMS', run_range=run_range)
def buildListOfFilesDBS(self, pattern, begin=-1, end=-1):
print 'buildListOfFilesDBS',begin,end
sampleName = self.name.rstrip('/')
query, qwhat = sampleName, "dataset"
if "#" in sampleName: qwhat = "block"
if self.run_range is not None and self.run_range != (-1,-1):
if self.run_range[0] == self.run_range[1]:
query += " run=%s" % self.run_range[0]
else:
print "WARNING: queries with run ranges are slow in DAS"
query += " run between [%s,%s]" % ( self.run_range[0],self.run_range[1] )
dbs='das_client.py --query="file %s=%s"'%(qwhat,query)
if begin >= 0:
dbs += ' --index %d' % begin
if end >= 0:
dbs += ' --limit %d' % (end-begin+1)
else:
dbs += ' --limit 0'
print 'dbs\t: %s' % dbs
dbsOut = os.popen(dbs)
files = []
for line in dbsOut:
if line.find('/store')==-1:
continue
line = line.rstrip()
# print 'line',line
files.append(line)
return files
def buildListOfFiles(self, pattern='.*root'):
runs = (-1,-1)
if self.run_range is not None:
runs = self.run_range
num_files=self.findPrimaryDatasetNumFiles(self.name.rstrip('/'),
runs[0],runs[1])
limit = 10000
if num_files > limit:
num_steps = int(num_files/limit)+1
self.files = []
for i in xrange(num_steps):
DBSFiles=self.buildListOfFilesDBS(pattern,
i*limit,
((i+1)*limit)-1)
self.files.extend(DBSFiles)
else:
self.files = self.buildListOfFilesDBS(pattern)
@staticmethod
def findPrimaryDatasetEntries(dataset, runmin, runmax):
query, qwhat = dataset, "dataset"
if "#" in dataset: qwhat = "block"
if runmin >0 or runmax > 0:
if runmin == runmax:
query = "%s run=%d" % (query,runmin)
else:
print "WARNING: queries with run ranges are slow in DAS"
query = "%s run between [%d, %d]" % (query,runmin if runmin > 0 else 1, runmax if runmax > 0 else 999999)
dbs='das_client.py --query="summary %s=%s"'%(qwhat,query)
dbsOut = os.popen(dbs).readlines()
entries = []
for line in dbsOut:
line = line.replace('\n','')
if "nevents" in line:
entries.append(int(line.split(":")[1]))
if entries:
return sum(entries)
return -1
@staticmethod
def findPrimaryDatasetNumFiles(dataset, runmin, runmax):
query, qwhat = dataset, "dataset"
if "#" in dataset: qwhat = "block"
if runmin >0 or runmax > 0:
if runmin == runmax:
query = "%s run=%d" % (query,runmin)
else:
print "WARNING: queries with run ranges are slow in DAS"
query = "%s run between [%d, %d]" % (query,runmin if runmin > 0 else 1, runmax if runmax > 0 else 999999)
dbs='das_client.py --query="summary %s=%s"'%(qwhat,query)
dbsOut = os.popen(dbs).readlines()
entries = []
for line in dbsOut:
line = line.replace('\n','')
if "nfiles" in line:
entries.append(int(line.split(":")[1]))
if entries:
return sum(entries)
return -1
def getPrimaryDatasetEntries(self):
runmin = -1
runmax = -1
if self.run_range is not None:
runmin = self.run_range[0]
runmax = self.run_range[1]
return self.findPrimaryDatasetEntries(self.name, runmin, runmax)
class LocalDataset( BaseDataset ):
def __init__(self, name, basedir, pattern):
self.basedir = basedir
super(LocalDataset, self).__init__( name, 'LOCAL', pattern)
def buildListOfFiles(self, pattern='.*root'):
pat = re.compile( pattern )
sampleName = self.name.rstrip('/')
self.dir = ''.join( [os.path.abspath(self.basedir),
sampleName ] )
self.files = []
for file in sorted(os.listdir( self.dir )):
if pat.match( file ) is not None:
self.files.append( '/'.join([self.dir, file]) )
# print file
class EOSDataset(BaseDataset):
'''A dataset located in any given eos directory'''
def __init__(self, name, basedir, pattern):
self.castorDir = '/'.join([basedir, name])
if not castortools.isEOSDir(self.castorDir):
raise ValueError('directory should be a directory on EOS.')
super(EOSDataset, self).__init__( name, 'EOS', pattern)
def buildListOfFiles(self, pattern='.*root'):
self.files = castortools.matchingFiles( self.castorDir, pattern )
class Dataset( BaseDataset ):
def __init__(self, name, user, pattern='.*root'):
self.lfnDir = castorBaseDir(user) + name
self.castorDir = castortools.lfnToCastor( self.lfnDir )
self.maskExists = False
self.report = None
super(Dataset, self).__init__(name, user, pattern)
def buildListOfFiles(self, pattern='.*root'):
'''fills list of files, taking all root files matching the pattern in the castor dir'''
self.files = castortools.matchingFiles( self.castorDir, pattern )
def buildListOfBadFiles(self):
'''fills the list of bad files from the IntegrityCheck log.
When the integrity check file is not available,
files are considered as good.'''
mask = "IntegrityCheck"
self.bad_files = {}
self.good_files = []
file_mask = castortools.matchingFiles(self.castorDir, '^%s_.*\.txt$' % mask)
if file_mask:
# here to avoid circular dependency
from edmIntegrityCheck import PublishToFileSystem
p = PublishToFileSystem(mask)
report = p.get(self.castorDir)
if report is not None and report:
self.maskExists = True
self.report = report
dup = report.get('ValidDuplicates',{})
for name, status in six.iteritems(report['Files']):
# print name, status
if not status[0]:
self.bad_files[name] = 'MarkedBad'
elif name in dup:
self.bad_files[name] = 'ValidDup'
else:
self.good_files.append( name )
else:
raise IntegrityCheckError( "ERROR: IntegrityCheck log file IntegrityCheck_XXXXXXXXXX.txt not found" )
def extractFileSizes(self):
'''Get the file size for each file, from the eos ls -l command.'''
# EOS command does not work in tier3
lsout = castortools.runXRDCommand(self.castorDir,'dirlist')[0]
lsout = lsout.split('\n')
self.filesAndSizes = {}
for entry in lsout:
values = entry.split()
if( len(values) != 5):
continue
# using full abs path as a key.
file = '/'.join([self.lfnDir, values[4].split("/")[-1]])
size = values[1]
self.filesAndSizes[file] = size
def printInfo(self):
print 'sample : ' + self.name
print 'LFN : ' + self.lfnDir
print 'Castor path : ' + self.castorDir
def getPrimaryDatasetEntries(self):
if self.report is not None and self.report:
return int(self.report.get('PrimaryDatasetEntries',-1))
return -1
### MM
class PrivateDataset ( BaseDataset ):
def __init__(self, name, dbsInstance=None):
super(PrivateDataset, self).__init__(name, 'PRIVATE', dbsInstance=dbsInstance)
def buildListOfFilesDBS(self, name, dbsInstance):
entries = self.findPrimaryDatasetNumFiles(name, dbsInstance, -1, -1)
files = []
dbs = 'das_client.py --query="file dataset=%s instance=prod/%s" --limit=%s' % (name, dbsInstance, entries)
dbsOut = os.popen(dbs)
for line in dbsOut:
if line.find('/store')==-1:
continue
line = line.rstrip()
# print 'line',line
files.append(line)
#return ['root://eoscms//eos/cms%s' % f for f in files]
return files
def buildListOfFiles(self, pattern='.*root'):
self.files = self.buildListOfFilesDBS(self.name, self.dbsInstance)
@staticmethod
def findPrimaryDatasetEntries(dataset, dbsInstance, runmin, runmax):
query, qwhat = dataset, "dataset"
if "#" in dataset: qwhat = "block"
if runmin >0 or runmax > 0:
if runmin == runmax:
query = "%s run=%d" % (query,runmin)
else:
print "WARNING: queries with run ranges are slow in DAS"
query = "%s run between [%d, %d]" % (query,runmin if runmin > 0 else 1, runmax if runmax > 0 else 999999)
dbs='das_client.py --query="summary %s=%s instance=prod/%s"'%(qwhat, query, dbsInstance)
dbsOut = os.popen(dbs).readlines()
entries = []
for line in dbsOut:
line = line.replace('\n','')
if "nevents" in line:
entries.append(int(line.split(":")[1]))
if entries:
return sum(entries)
return -1
@staticmethod
def findPrimaryDatasetNumFiles(dataset, dbsInstance, runmin, runmax):
query, qwhat = dataset, "dataset"
if "#" in dataset: qwhat = "block"
if runmin >0 or runmax > 0:
if runmin == runmax:
query = "%s run=%d" % (query,runmin)
else:
print "WARNING: queries with run ranges are slow in DAS"
query = "%s run between [%d, %d]" % (query,runmin if runmin > 0 else 1, runmax if runmax > 0 else 999999)
dbs='das_client.py --query="summary %s=%s instance=prod/%s"'%(qwhat, query, dbsInstance)
dbsOut = os.popen(dbs).readlines()
entries = []
for line in dbsOut:
line = line.replace('\n','')
if "nfiles" in line:
entries.append(int(line.split(":")[1]))
if entries:
return sum(entries)
return -1
def getPrimaryDatasetEntries(self):
runmin = -1
runmax = -1
if self.run_range is not None:
runmin = self.run_range[0]
runmax = self.run_range[1]
return self.findPrimaryDatasetEntries(self.name, self.dbsInstance, runmin, runmax)
### MM
def getDatasetFromCache( cachename ) :
cachedir = '/'.join( [os.environ['HOME'],'.cmgdataset'])
pckfile = open( cachedir + "/" + cachename )
dataset = pickle.load(pckfile)
return dataset
def writeDatasetToCache( cachename, dataset ):
cachedir = '/'.join( [os.environ['HOME'],'.cmgdataset'])
if not os.path.exists(cachedir):
os.mkdir(cachedir)
pckfile = open( cachedir + "/" + cachename, 'w')
pickle.dump(dataset, pckfile)
def createDataset( user, dataset, pattern, readcache=False,
basedir = None, run_range = None):
def cacheFileName(data, user, pattern):
return '{user}%{name}%{pattern}.pck'.format( user = user, name = data.replace('/','_'), pattern = pattern)
def writeCache(dataset):
writeDatasetToCache( cacheFileName(dataset.name, dataset.user, dataset.pattern), dataset )
def readCache(data, user, pattern):
return getDatasetFromCache( cacheFileName(data, user, pattern) )
if readcache:
try:
data = readCache(dataset, user, pattern)
except IOError:
readcache = False
if not readcache:
if user == 'CMS':
data = CMSDataset( dataset , run_range = run_range)
info = False
elif user == 'LOCAL':
data = LocalDataset( dataset, basedir, pattern)
info = False
elif user == 'EOS':
data = EOSDataset(dataset, basedir, pattern)
info = False
else:
data = Dataset( dataset, user, pattern)
writeCache(data)
## if user == 'CMS':
## data = CMSDataset( dataset )
## elif user == 'LOCAL':
## if basedir is None:
## basedir = os.environ['CMGLOCALBASEDIR']
## data = LocalDataset( dataset, basedir, pattern )
## else:
## data = Dataset( user, dataset, pattern )
return data
### MM
def createMyDataset( user, dataset, pattern, dbsInstance, readcache=False):
cachedir = '/'.join( [os.environ['HOME'],'.cmgdataset'])
def cacheFileName(data, user, dbsInstance, pattern):
cf = data.replace('/','_')
name = '{dir}/{user}%{dbsInstance}%{name}%{pattern}.pck'.format(
dir = cachedir,
user = user,
dbsInstance = dbsInstance,
name = cf,
pattern = pattern)
return name
def writeCache(dataset):
if not os.path.exists(cachedir):
os.mkdir(cachedir)
cachename = cacheFileName(dataset.name,
dataset.user,
dataset.dbsInstance,
dataset.pattern)
pckfile = open( cachename, 'w')
pickle.dump(dataset, pckfile)
def readCache(data, user, dbsInstance, pattern):
cachename = cacheFileName(data, user, dbsInstance, pattern)
pckfile = open( cachename)
dataset = pickle.load(pckfile)
#print 'reading cache'
return dataset
if readcache:
try:
data = readCache(dataset, user, dbsInstance, pattern)
except IOError:
readcache = False
if not readcache:
if user == 'PRIVATE':
data = PrivateDataset( dataset, dbsInstance )
info = False
writeCache(data)
return data
### MM
|
[] |
[] |
[
"CMGLOCALBASEDIR",
"HOME"
] |
[]
|
["CMGLOCALBASEDIR", "HOME"]
|
python
| 2 | 0 | |
scripts/perf/nvmf/run_nvmf.py
|
#!/usr/bin/env python3
from json.decoder import JSONDecodeError
import os
import re
import sys
import argparse
import json
import zipfile
import threading
import subprocess
import itertools
import configparser
import time
import uuid
from collections import OrderedDict
import paramiko
import pandas as pd
import rpc
import rpc.client
from common import *
class Server:
def __init__(self, name, general_config, server_config):
self.name = name
self.username = general_config["username"]
self.password = general_config["password"]
self.transport = general_config["transport"].lower()
self.nic_ips = server_config["nic_ips"]
self.mode = server_config["mode"]
self.irq_scripts_dir = "/usr/src/local/mlnx-tools/ofed_scripts"
if "irq_scripts_dir" in server_config and server_config["irq_scripts_dir"]:
self.irq_scripts_dir = server_config["irq_scripts_dir"]
self.local_nic_info = []
self._nics_json_obj = {}
self.svc_restore_dict = {}
self.sysctl_restore_dict = {}
self.tuned_restore_dict = {}
self.governor_restore = ""
self.tuned_profile = ""
self.enable_adq = False
self.adq_priority = None
if "adq_enable" in server_config and server_config["adq_enable"]:
self.enable_adq = server_config["adq_enable"]
self.adq_priority = 1
if "tuned_profile" in server_config:
self.tuned_profile = server_config["tuned_profile"]
if not re.match("^[A-Za-z0-9]*$", name):
self.log_print("Please use a name which contains only letters or numbers")
sys.exit(1)
def log_print(self, msg):
print("[%s] %s" % (self.name, msg), flush=True)
def get_uncommented_lines(self, lines):
return [line for line in lines if line and not line.startswith('#')]
def get_nic_name_by_ip(self, ip):
if not self._nics_json_obj:
nics_json_obj = self.exec_cmd(["ip", "-j", "address", "show"])
self._nics_json_obj = list(filter(lambda x: x["addr_info"], json.loads(nics_json_obj)))
for nic in self._nics_json_obj:
for addr in nic["addr_info"]:
if ip in addr["local"]:
return nic["ifname"]
def set_local_nic_info_helper(self):
pass
def set_local_nic_info(self, pci_info):
def extract_network_elements(json_obj):
nic_list = []
if isinstance(json_obj, list):
for x in json_obj:
nic_list.extend(extract_network_elements(x))
elif isinstance(json_obj, dict):
if "children" in json_obj:
nic_list.extend(extract_network_elements(json_obj["children"]))
if "class" in json_obj.keys() and "network" in json_obj["class"]:
nic_list.append(json_obj)
return nic_list
self.local_nic_info = extract_network_elements(pci_info)
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
return ""
def configure_system(self):
self.configure_services()
self.configure_sysctl()
self.configure_tuned()
self.configure_cpu_governor()
self.configure_irq_affinity()
def configure_adq(self):
if self.mode == "kernel":
self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
return
self.adq_load_modules()
self.adq_configure_nic()
def adq_load_modules(self):
self.log_print("Modprobing ADQ-related Linux modules...")
adq_module_deps = ["sch_mqprio", "act_mirred", "cls_flower"]
for module in adq_module_deps:
try:
self.exec_cmd(["sudo", "modprobe", module])
self.log_print("%s loaded!" % module)
except CalledProcessError as e:
self.log_print("ERROR: failed to load module %s" % module)
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
def adq_configure_tc(self):
self.log_print("Configuring ADQ Traffic classes and filters...")
if self.mode == "kernel":
self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
return
num_queues_tc0 = 2 # 2 is minimum number of queues for TC0
num_queues_tc1 = self.num_cores
port_param = "dst_port" if isinstance(self, Target) else "src_port"
port = "4420"
xps_script_path = os.path.join(self.spdk_dir, "scripts", "perf", "nvmf", "set_xps_rxqs")
for nic_ip in self.nic_ips:
nic_name = self.get_nic_name_by_ip(nic_ip)
tc_qdisc_map_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name,
"root", "mqprio", "num_tc", "2", "map", "0", "1",
"queues", "%s@0" % num_queues_tc0,
"%s@%s" % (num_queues_tc1, num_queues_tc0),
"hw", "1", "mode", "channel"]
self.log_print(" ".join(tc_qdisc_map_cmd))
self.exec_cmd(tc_qdisc_map_cmd)
time.sleep(5)
tc_qdisc_ingress_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name, "ingress"]
self.log_print(" ".join(tc_qdisc_ingress_cmd))
self.exec_cmd(tc_qdisc_ingress_cmd)
tc_filter_cmd = ["sudo", "tc", "filter", "add", "dev", nic_name,
"protocol", "ip", "ingress", "prio", "1", "flower",
"dst_ip", "%s/32" % nic_ip, "ip_proto", "tcp", port_param, port,
"skip_sw", "hw_tc", "1"]
self.log_print(" ".join(tc_filter_cmd))
self.exec_cmd(tc_filter_cmd)
# show tc configuration
self.log_print("Show tc configuration for %s NIC..." % nic_name)
tc_disk_out = self.exec_cmd(["sudo", "tc", "qdisc", "show", "dev", nic_name])
tc_filter_out = self.exec_cmd(["sudo", "tc", "filter", "show", "dev", nic_name, "ingress"])
self.log_print("%s" % tc_disk_out)
self.log_print("%s" % tc_filter_out)
# Ethtool coalesce settings must be applied after configuring traffic classes
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-rx", "off", "rx-usecs", "0"])
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-tx", "off", "tx-usecs", "500"])
self.log_print("Running set_xps_rxqs script for %s NIC..." % nic_name)
xps_cmd = ["sudo", xps_script_path, nic_name]
self.log_print(xps_cmd)
self.exec_cmd(xps_cmd)
def adq_configure_nic(self):
self.log_print("Configuring NIC port settings for ADQ testing...")
# Reload the driver first, to make sure any previous settings are re-set.
try:
self.exec_cmd(["sudo", "rmmod", "ice"])
self.exec_cmd(["sudo", "modprobe", "ice"])
except CalledProcessError as e:
self.log_print("ERROR: failed to reload ice module!")
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
self.log_print(nic)
try:
self.exec_cmd(["sudo", "ethtool", "-K", nic,
"hw-tc-offload", "on"]) # Enable hardware TC offload
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-inline-flow-director", "on"]) # Enable Intel Flow Director
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic, "fw-lldp-agent", "off"]) # Disable LLDP
# As temporary workaround for ADQ, channel packet inspection optimization is turned on during connection establishment.
# Then turned off before fio ramp_up expires in ethtool_after_fio_ramp().
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-pkt-inspect-optimize", "on"])
except CalledProcessError as e:
self.log_print("ERROR: failed to configure NIC port using ethtool!")
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
self.log_print("Please update your NIC driver and firmware versions and try again.")
self.log_print(self.exec_cmd(["sudo", "ethtool", "-k", nic]))
self.log_print(self.exec_cmd(["sudo", "ethtool", "--show-priv-flags", nic]))
def configure_services(self):
self.log_print("Configuring active services...")
svc_config = configparser.ConfigParser(strict=False)
# Below list is valid only for RHEL / Fedora systems and might not
# contain valid names for other distributions.
svc_target_state = {
"firewalld": "inactive",
"irqbalance": "inactive",
"lldpad.service": "inactive",
"lldpad.socket": "inactive"
}
for service in svc_target_state:
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
out = "\n".join(["[%s]" % service, out])
svc_config.read_string(out)
if "LoadError" in svc_config[service] and "not found" in svc_config[service]["LoadError"]:
continue
service_state = svc_config[service]["ActiveState"]
self.log_print("Current state of %s service is %s" % (service, service_state))
self.svc_restore_dict.update({service: service_state})
if service_state != "inactive":
self.log_print("Disabling %s. It will be restored after the test has finished." % service)
self.exec_cmd(["sudo", "systemctl", "stop", service])
def configure_sysctl(self):
self.log_print("Tuning sysctl settings...")
busy_read = 0
if self.enable_adq and self.mode == "spdk":
busy_read = 1
sysctl_opts = {
"net.core.busy_poll": 0,
"net.core.busy_read": busy_read,
"net.core.somaxconn": 4096,
"net.core.netdev_max_backlog": 8192,
"net.ipv4.tcp_max_syn_backlog": 16384,
"net.core.rmem_max": 268435456,
"net.core.wmem_max": 268435456,
"net.ipv4.tcp_mem": "268435456 268435456 268435456",
"net.ipv4.tcp_rmem": "8192 1048576 33554432",
"net.ipv4.tcp_wmem": "8192 1048576 33554432",
"net.ipv4.route.flush": 1,
"vm.overcommit_memory": 1,
}
for opt, value in sysctl_opts.items():
self.sysctl_restore_dict.update({opt: self.exec_cmd(["sysctl", "-n", opt]).strip()})
self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
def configure_tuned(self):
if not self.tuned_profile:
self.log_print("WARNING: Tuned profile not set in configuration file. Skipping configuration.")
return
self.log_print("Configuring tuned-adm profile to %s." % self.tuned_profile)
service = "tuned"
tuned_config = configparser.ConfigParser(strict=False)
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
out = "\n".join(["[%s]" % service, out])
tuned_config.read_string(out)
tuned_state = tuned_config[service]["ActiveState"]
self.svc_restore_dict.update({service: tuned_state})
if tuned_state != "inactive":
profile = self.exec_cmd(["cat", "/etc/tuned/active_profile"]).strip()
profile_mode = self.exec_cmd(["cat", "/etc/tuned/profile_mode"]).strip()
self.tuned_restore_dict = {
"profile": profile,
"mode": profile_mode
}
self.exec_cmd(["sudo", "systemctl", "start", service])
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_profile])
self.log_print("Tuned profile set to %s." % self.exec_cmd(["cat", "/etc/tuned/active_profile"]))
def configure_cpu_governor(self):
self.log_print("Setting CPU governor to performance...")
# This assumes that there is the same CPU scaling governor on each CPU
self.governor_restore = self.exec_cmd(["cat", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"]).strip()
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "performance"])
def configure_irq_affinity(self):
self.log_print("Setting NIC irq affinity for NICs...")
irq_script_path = os.path.join(self.irq_scripts_dir, "set_irq_affinity.sh")
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
irq_cmd = ["sudo", irq_script_path, nic]
self.log_print(irq_cmd)
self.exec_cmd(irq_cmd, change_dir=self.irq_scripts_dir)
def restore_services(self):
self.log_print("Restoring services...")
for service, state in self.svc_restore_dict.items():
cmd = "stop" if state == "inactive" else "start"
self.exec_cmd(["sudo", "systemctl", cmd, service])
def restore_sysctl(self):
self.log_print("Restoring sysctl settings...")
for opt, value in self.sysctl_restore_dict.items():
self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
def restore_tuned(self):
self.log_print("Restoring tuned-adm settings...")
if not self.tuned_restore_dict:
return
if self.tuned_restore_dict["mode"] == "auto":
self.exec_cmd(["sudo", "tuned-adm", "auto_profile"])
self.log_print("Reverted tuned-adm to auto_profile.")
else:
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_restore_dict["profile"]])
self.log_print("Reverted tuned-adm to %s profile." % self.tuned_restore_dict["profile"])
def restore_governor(self):
self.log_print("Restoring CPU governor setting...")
if self.governor_restore:
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", self.governor_restore])
self.log_print("Reverted CPU governor to %s." % self.governor_restore)
class Target(Server):
def __init__(self, name, general_config, target_config):
super(Target, self).__init__(name, general_config, target_config)
# Defaults
self.enable_sar = False
self.sar_delay = 0
self.sar_interval = 0
self.sar_count = 0
self.enable_pcm = False
self.pcm_dir = ""
self.pcm_delay = 0
self.pcm_interval = 0
self.pcm_count = 0
self.enable_bandwidth = 0
self.bandwidth_count = 0
self.enable_dpdk_memory = False
self.dpdk_wait_time = 0
self.enable_zcopy = False
self.scheduler_name = "static"
self.null_block = 0
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
self.subsystem_info_list = []
if "null_block_devices" in target_config:
self.null_block = target_config["null_block_devices"]
if "sar_settings" in target_config:
self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = target_config["sar_settings"]
if "pcm_settings" in target_config:
self.enable_pcm = True
self.pcm_dir, self.pcm_delay, self.pcm_interval, self.pcm_count = target_config["pcm_settings"]
if "enable_bandwidth" in target_config:
self.enable_bandwidth, self.bandwidth_count = target_config["enable_bandwidth"]
if "enable_dpdk_memory" in target_config:
self.enable_dpdk_memory, self.dpdk_wait_time = target_config["enable_dpdk_memory"]
if "scheduler_settings" in target_config:
self.scheduler_name = target_config["scheduler_settings"]
if "zcopy_settings" in target_config:
self.enable_zcopy = target_config["zcopy_settings"]
if "results_dir" in target_config:
self.results_dir = target_config["results_dir"]
self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
self.set_local_nic_info(self.set_local_nic_info_helper())
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.zip_spdk_sources(self.spdk_dir, "/tmp/spdk.zip")
self.configure_system()
if self.enable_adq:
self.configure_adq()
self.sys_config()
def set_local_nic_info_helper(self):
return json.loads(self.exec_cmd(["lshw", "-json"]))
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
stderr_opt = None
if stderr_redirect:
stderr_opt = subprocess.STDOUT
if change_dir:
old_cwd = os.getcwd()
os.chdir(change_dir)
self.log_print("Changing directory to %s" % change_dir)
out = check_output(cmd, stderr=stderr_opt).decode(encoding="utf-8")
if change_dir:
os.chdir(old_cwd)
self.log_print("Changing directory to %s" % old_cwd)
return out
def zip_spdk_sources(self, spdk_dir, dest_file):
self.log_print("Zipping SPDK source directory")
fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
for root, directories, files in os.walk(spdk_dir, followlinks=True):
for file in files:
fh.write(os.path.relpath(os.path.join(root, file)))
fh.close()
self.log_print("Done zipping")
def read_json_stats(self, file):
with open(file, "r") as json_data:
data = json.load(json_data)
job_pos = 0 # job_post = 0 because using aggregated results
# Check if latency is in nano or microseconds to choose correct dict key
def get_lat_unit(key_prefix, dict_section):
# key prefix - lat, clat or slat.
# dict section - portion of json containing latency bucket in question
# Return dict key to access the bucket and unit as string
for k, _ in dict_section.items():
if k.startswith(key_prefix):
return k, k.split("_")[1]
def get_clat_percentiles(clat_dict_leaf):
if "percentile" in clat_dict_leaf:
p99_lat = float(clat_dict_leaf["percentile"]["99.000000"])
p99_9_lat = float(clat_dict_leaf["percentile"]["99.900000"])
p99_99_lat = float(clat_dict_leaf["percentile"]["99.990000"])
p99_999_lat = float(clat_dict_leaf["percentile"]["99.999000"])
return [p99_lat, p99_9_lat, p99_99_lat, p99_999_lat]
else:
# Latest fio versions do not provide "percentile" results if no
# measurements were done, so just return zeroes
return [0, 0, 0, 0]
read_iops = float(data["jobs"][job_pos]["read"]["iops"])
read_bw = float(data["jobs"][job_pos]["read"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["read"][clat_key])
if "ns" in lat_unit:
read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
if "ns" in clat_unit:
read_p99_lat = read_p99_lat / 1000
read_p99_9_lat = read_p99_9_lat / 1000
read_p99_99_lat = read_p99_99_lat / 1000
read_p99_999_lat = read_p99_999_lat / 1000
write_iops = float(data["jobs"][job_pos]["write"]["iops"])
write_bw = float(data["jobs"][job_pos]["write"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["write"][clat_key])
if "ns" in lat_unit:
write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
if "ns" in clat_unit:
write_p99_lat = write_p99_lat / 1000
write_p99_9_lat = write_p99_9_lat / 1000
write_p99_99_lat = write_p99_99_lat / 1000
write_p99_999_lat = write_p99_999_lat / 1000
return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat,
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat,
write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat,
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat]
def parse_results(self, results_dir, csv_file):
files = os.listdir(results_dir)
fio_files = filter(lambda x: ".fio" in x, files)
json_files = [x for x in files if ".json" in x]
headers = ["read_iops", "read_bw", "read_avg_lat_us", "read_min_lat_us", "read_max_lat_us",
"read_p99_lat_us", "read_p99.9_lat_us", "read_p99.99_lat_us", "read_p99.999_lat_us",
"write_iops", "write_bw", "write_avg_lat_us", "write_min_lat_us", "write_max_lat_us",
"write_p99_lat_us", "write_p99.9_lat_us", "write_p99.99_lat_us", "write_p99.999_lat_us"]
aggr_headers = ["iops", "bw", "avg_lat_us", "min_lat_us", "max_lat_us",
"p99_lat_us", "p99.9_lat_us", "p99.99_lat_us", "p99.999_lat_us"]
header_line = ",".join(["Name", *headers])
aggr_header_line = ",".join(["Name", *aggr_headers])
# Create empty results file
with open(os.path.join(results_dir, csv_file), "w") as fh:
fh.write(aggr_header_line + "\n")
rows = set()
for fio_config in fio_files:
self.log_print("Getting FIO stats for %s" % fio_config)
job_name, _ = os.path.splitext(fio_config)
# Look in the filename for rwmixread value. Function arguments do
# not have that information.
# TODO: Improve this function by directly using workload params instead
# of regexing through filenames.
if "read" in job_name:
rw_mixread = 1
elif "write" in job_name:
rw_mixread = 0
else:
rw_mixread = float(re.search(r"m_(\d+)", job_name).group(1)) / 100
# If "_CPU" exists in name - ignore it
# Initiators for the same job could have different num_cores parameter
job_name = re.sub(r"_\d+CPU", "", job_name)
job_result_files = [x for x in json_files if x.startswith(job_name)]
self.log_print("Matching result files for current fio config:")
for j in job_result_files:
self.log_print("\t %s" % j)
# There may have been more than 1 initiator used in test, need to check that
# Result files are created so that string after last "_" separator is server name
inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
inits_avg_results = []
for i in inits_names:
self.log_print("\tGetting stats for initiator %s" % i)
# There may have been more than 1 test run for this job, calculate average results for initiator
i_results = [x for x in job_result_files if i in x]
i_results_filename = re.sub(r"run_\d+_", "", i_results[0].replace("json", "csv"))
separate_stats = []
for r in i_results:
try:
stats = self.read_json_stats(os.path.join(results_dir, r))
separate_stats.append(stats)
self.log_print(stats)
except JSONDecodeError as e:
self.log_print("ERROR: Failed to parse %s results! Results might be incomplete!")
init_results = [sum(x) for x in zip(*separate_stats)]
init_results = [x / len(separate_stats) for x in init_results]
inits_avg_results.append(init_results)
self.log_print("\tAverage results for initiator %s" % i)
self.log_print(init_results)
with open(os.path.join(results_dir, i_results_filename), "w") as fh:
fh.write(header_line + "\n")
fh.write(",".join([job_name, *["{0:.3f}".format(x) for x in init_results]]) + "\n")
# Sum results of all initiators running this FIO job.
# Latency results are an average of latencies from accros all initiators.
inits_avg_results = [sum(x) for x in zip(*inits_avg_results)]
inits_avg_results = OrderedDict(zip(headers, inits_avg_results))
for key in inits_avg_results:
if "lat" in key:
inits_avg_results[key] /= len(inits_names)
# Aggregate separate read/write values into common labels
# Take rw_mixread into consideration for mixed read/write workloads.
aggregate_results = OrderedDict()
for h in aggr_headers:
read_stat, write_stat = [float(value) for key, value in inits_avg_results.items() if h in key]
if "lat" in h:
_ = rw_mixread * read_stat + (1 - rw_mixread) * write_stat
else:
_ = read_stat + write_stat
aggregate_results[h] = "{0:.3f}".format(_)
rows.add(",".join([job_name, *aggregate_results.values()]))
# Save results to file
for row in rows:
with open(os.path.join(results_dir, csv_file), "a") as fh:
fh.write(row + "\n")
self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
def measure_sar(self, results_dir, sar_file_name):
self.log_print("Waiting %d delay before measuring SAR stats" % self.sar_delay)
cpu_number = os.cpu_count()
sar_idle_sum = 0
time.sleep(self.sar_delay)
out = self.exec_cmd(["sar", "-P", "ALL", "%s" % self.sar_interval, "%s" % self.sar_count])
with open(os.path.join(results_dir, sar_file_name), "w") as fh:
for line in out.split("\n"):
if "Average" in line:
if "CPU" in line:
self.log_print("Summary CPU utilization from SAR:")
self.log_print(line)
elif "all" in line:
self.log_print(line)
else:
sar_idle_sum += float(line.split()[7])
fh.write(out)
sar_cpu_usage = cpu_number * 100 - sar_idle_sum
with open(os.path.join(results_dir, sar_file_name), "a") as f:
f.write("Total CPU used: " + str(sar_cpu_usage))
def ethtool_after_fio_ramp(self, fio_ramp_time):
time.sleep(fio_ramp_time//2)
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
self.log_print(nic)
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-pkt-inspect-optimize", "off"]) # Disable channel packet inspection optimization
def measure_pcm_memory(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm-memory.x" % self.pcm_dir, "%s" % self.pcm_interval, "-csv=%s/%s" % (results_dir, pcm_file_name)]
pcm_memory = subprocess.Popen(cmd)
time.sleep(self.pcm_count)
pcm_memory.terminate()
def measure_pcm(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count, "-csv=%s/%s" % (results_dir, pcm_file_name)]
subprocess.run(cmd)
df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
skt_pcm_file_name = "_".join(["skt", pcm_file_name])
skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
def measure_pcm_power(self, results_dir, pcm_power_file_name):
time.sleep(self.pcm_delay)
out = self.exec_cmd(["%s/pcm-power.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count])
with open(os.path.join(results_dir, pcm_power_file_name), "w") as fh:
fh.write(out)
def measure_network_bandwidth(self, results_dir, bandwidth_file_name):
self.log_print("INFO: starting network bandwidth measure")
self.exec_cmd(["bwm-ng", "-o", "csv", "-F", "%s/%s" % (results_dir, bandwidth_file_name),
"-a", "1", "-t", "1000", "-c", str(self.bandwidth_count)])
def measure_dpdk_memory(self, results_dir):
self.log_print("INFO: waiting to generate DPDK memory usage")
time.sleep(self.dpdk_wait_time)
self.log_print("INFO: generating DPDK memory usage")
rpc.env.env_dpdk_get_mem_stats
os.rename("/tmp/spdk_mem_dump.txt", "%s/spdk_mem_dump.txt" % (results_dir))
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(os.uname().release)
self.log_print("====Kernel command line:====")
with open('/proc/cmdline') as f:
cmdline = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(cmdline)))
self.log_print("====sysctl conf:====")
with open('/etc/sysctl.conf') as f:
sysctl = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(sysctl)))
self.log_print("====Cpu power info:====")
self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
self.log_print("====zcopy settings:====")
self.log_print("zcopy enabled: %s" % (self.enable_zcopy))
self.log_print("====Scheduler settings:====")
self.log_print("SPDK scheduler: %s" % (self.scheduler_name))
class Initiator(Server):
def __init__(self, name, general_config, initiator_config):
super(Initiator, self).__init__(name, general_config, initiator_config)
# Required fields
self.ip = initiator_config["ip"]
self.target_nic_ips = initiator_config["target_nic_ips"]
# Defaults
self.cpus_allowed = None
self.cpus_allowed_policy = "shared"
self.spdk_dir = "/tmp/spdk"
self.fio_bin = "/usr/src/fio/fio"
self.nvmecli_bin = "nvme"
self.cpu_frequency = None
self.subsystem_info_list = []
if "spdk_dir" in initiator_config:
self.spdk_dir = initiator_config["spdk_dir"]
if "fio_bin" in initiator_config:
self.fio_bin = initiator_config["fio_bin"]
if "nvmecli_bin" in initiator_config:
self.nvmecli_bin = initiator_config["nvmecli_bin"]
if "cpus_allowed" in initiator_config:
self.cpus_allowed = initiator_config["cpus_allowed"]
if "cpus_allowed_policy" in initiator_config:
self.cpus_allowed_policy = initiator_config["cpus_allowed_policy"]
if "cpu_frequency" in initiator_config:
self.cpu_frequency = initiator_config["cpu_frequency"]
if os.getenv('SPDK_WORKSPACE'):
self.spdk_dir = os.getenv('SPDK_WORKSPACE')
self.ssh_connection = paramiko.SSHClient()
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
self.exec_cmd(["sudo", "rm", "-rf", "%s/nvmf_perf" % self.spdk_dir])
self.exec_cmd(["mkdir", "-p", "%s" % self.spdk_dir])
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.copy_spdk("/tmp/spdk.zip")
self.set_local_nic_info(self.set_local_nic_info_helper())
self.set_cpu_frequency()
self.configure_system()
if self.enable_adq:
self.configure_adq()
self.sys_config()
def set_local_nic_info_helper(self):
return json.loads(self.exec_cmd(["lshw", "-json"]))
def __del__(self):
self.ssh_connection.close()
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
if change_dir:
cmd = ["cd", change_dir, ";", *cmd]
# In case one of the command elements contains whitespace and is not
# already quoted, # (e.g. when calling sysctl) quote it again to prevent expansion
# when sending to remote system.
for i, c in enumerate(cmd):
if (" " in c or "\t" in c) and not (c.startswith("'") and c.endswith("'")):
cmd[i] = '"%s"' % c
cmd = " ".join(cmd)
# Redirect stderr to stdout thanks using get_pty option if needed
_, stdout, _ = self.ssh_connection.exec_command(cmd, get_pty=stderr_redirect)
out = stdout.read().decode(encoding="utf-8")
# Check the return code
rc = stdout.channel.recv_exit_status()
if rc:
raise CalledProcessError(int(rc), cmd, out)
return out
def put_file(self, local, remote_dest):
ftp = self.ssh_connection.open_sftp()
ftp.put(local, remote_dest)
ftp.close()
def get_file(self, remote, local_dest):
ftp = self.ssh_connection.open_sftp()
ftp.get(remote, local_dest)
ftp.close()
def copy_spdk(self, local_spdk_zip):
self.log_print("Copying SPDK sources to initiator %s" % self.name)
self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
self.log_print("Copied sources zip from target")
self.exec_cmd(["unzip", "-qo", "/tmp/spdk_drop.zip", "-d", self.spdk_dir])
self.log_print("Sources unpacked")
def copy_result_files(self, dest_dir):
self.log_print("Copying results")
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
# Get list of result files from initiator and copy them back to target
file_list = self.exec_cmd(["ls", "%s/nvmf_perf" % self.spdk_dir]).strip().split("\n")
for file in file_list:
self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
os.path.join(dest_dir, file))
self.log_print("Done copying results")
def discover_subsystems(self, address_list, subsys_no):
num_nvmes = range(0, subsys_no)
nvme_discover_output = ""
for ip, subsys_no in itertools.product(address_list, num_nvmes):
self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
nvme_discover_cmd = ["sudo",
"%s" % self.nvmecli_bin,
"discover", "-t", "%s" % self.transport,
"-s", "%s" % (4420 + subsys_no),
"-a", "%s" % ip]
try:
stdout = self.exec_cmd(nvme_discover_cmd)
if stdout:
nvme_discover_output = nvme_discover_output + stdout
except CalledProcessError:
# Do nothing. In case of discovering remote subsystems of kernel target
# we expect "nvme discover" to fail a bunch of times because we basically
# scan ports.
pass
subsystems = re.findall(r'trsvcid:\s(\d+)\s+' # get svcid number
r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+' # get NQN id
r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', # get IP address
nvme_discover_output) # from nvme discovery output
subsystems = filter(lambda x: x[-1] in address_list, subsystems)
subsystems = list(set(subsystems))
subsystems.sort(key=lambda x: x[1])
self.log_print("Found matching subsystems on target side:")
for s in subsystems:
self.log_print(s)
self.subsystem_info_list = subsystems
def gen_fio_filename_conf(self, *args, **kwargs):
# Logic implemented in SPDKInitiator and KernelInitiator classes
pass
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10, rate_iops=0):
fio_conf_template = """
[global]
ioengine={ioengine}
{spdk_conf}
thread=1
group_reporting=1
direct=1
percentile_list=50:90:99:99.5:99.9:99.99:99.999
norandommap=1
rw={rw}
rwmixread={rwmixread}
bs={block_size}
time_based=1
ramp_time={ramp_time}
runtime={run_time}
rate_iops={rate_iops}
"""
if "spdk" in self.mode:
bdev_conf = self.gen_spdk_bdev_conf(self.subsystem_info_list)
self.exec_cmd(["echo", "'%s'" % bdev_conf, ">", "%s/bdev.conf" % self.spdk_dir])
ioengine = "%s/build/fio/spdk_bdev" % self.spdk_dir
spdk_conf = "spdk_json_conf=%s/bdev.conf" % self.spdk_dir
else:
ioengine = self.ioengine
spdk_conf = ""
out = self.exec_cmd(["sudo", "nvme", "list", "|", "grep", "-E", "'SPDK|Linux'",
"|", "awk", "'{print $1}'"])
subsystems = [x for x in out.split("\n") if "nvme" in x]
if self.cpus_allowed is not None:
self.log_print("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
cpus_num = 0
cpus = self.cpus_allowed.split(",")
for cpu in cpus:
if "-" in cpu:
a, b = cpu.split("-")
a = int(a)
b = int(b)
cpus_num += len(range(a, b))
else:
cpus_num += 1
self.num_cores = cpus_num
threads = range(0, self.num_cores)
elif hasattr(self, 'num_cores'):
self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
threads = range(0, int(self.num_cores))
else:
self.num_cores = len(subsystems)
threads = range(0, len(subsystems))
if "spdk" in self.mode:
filename_section = self.gen_fio_filename_conf(self.subsystem_info_list, threads, io_depth, num_jobs)
else:
filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs)
fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
rw=rw, rwmixread=rwmixread, block_size=block_size,
ramp_time=ramp_time, run_time=run_time, rate_iops=rate_iops)
# TODO: hipri disabled for now, as it causes fio errors:
# io_u error on file /dev/nvme2n1: Operation not supported
# See comment in KernelInitiator class, kernel_init_connect() function
if hasattr(self, "ioengine") and "io_uring" in self.ioengine:
fio_config = fio_config + """
fixedbufs=1
registerfiles=1
#hipri=1
"""
if num_jobs:
fio_config = fio_config + "numjobs=%s \n" % num_jobs
if self.cpus_allowed is not None:
fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
fio_config = fio_config + "cpus_allowed_policy=%s \n" % self.cpus_allowed_policy
fio_config = fio_config + filename_section
fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
if hasattr(self, "num_cores"):
fio_config_filename += "_%sCPU" % self.num_cores
fio_config_filename += ".fio"
self.exec_cmd(["mkdir", "-p", "%s/nvmf_perf" % self.spdk_dir])
self.exec_cmd(["echo", "'%s'" % fio_config, ">", "%s/nvmf_perf/%s" % (self.spdk_dir, fio_config_filename)])
self.log_print("Created FIO Config:")
self.log_print(fio_config)
return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
def set_cpu_frequency(self):
if self.cpu_frequency is not None:
try:
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "userspace"], True)
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-f", "%s" % self.cpu_frequency], True)
self.log_print(self.exec_cmd(["sudo", "cpupower", "frequency-info"]))
except Exception:
self.log_print("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
sys.exit()
else:
self.log_print("WARNING: you have disabled intel_pstate and using default cpu governance.")
def run_fio(self, fio_config_file, run_num=None):
job_name, _ = os.path.splitext(fio_config_file)
self.log_print("Starting FIO run for job: %s" % job_name)
self.log_print("Using FIO: %s" % self.fio_bin)
if run_num:
for i in range(1, run_num + 1):
output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
try:
output = self.exec_cmd(["sudo", self.fio_bin, fio_config_file, "--output-format=json",
"--output=%s" % output_filename, "--eta=never"], True)
self.log_print(output)
except subprocess.CalledProcessError as e:
self.log_print("ERROR: Fio process failed!")
self.log_print(e.stdout)
else:
output_filename = job_name + "_" + self.name + ".json"
output = self.exec_cmd(["sudo", self.fio_bin,
fio_config_file, "--output-format=json",
"--output" % output_filename], True)
self.log_print(output)
self.log_print("FIO run finished. Results in: %s" % output_filename)
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(self.exec_cmd(["uname", "-r"]))
self.log_print("====Kernel command line:====")
cmdline = self.exec_cmd(["cat", "/proc/cmdline"])
self.log_print('\n'.join(self.get_uncommented_lines(cmdline.splitlines())))
self.log_print("====sysctl conf:====")
sysctl = self.exec_cmd(["cat", "/etc/sysctl.conf"])
self.log_print('\n'.join(self.get_uncommented_lines(sysctl.splitlines())))
self.log_print("====Cpu power info:====")
self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
class KernelTarget(Target):
def __init__(self, name, general_config, target_config):
super(KernelTarget, self).__init__(name, general_config, target_config)
# Defaults
self.nvmet_bin = "nvmetcli"
if "nvmet_bin" in target_config:
self.nvmet_bin = target_config["nvmet_bin"]
def __del__(self):
nvmet_command(self.nvmet_bin, "clear")
def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
nvmet_cfg = {
"ports": [],
"hosts": [],
"subsystems": [],
}
# Split disks between NIC IP's
disks_per_ip = int(len(nvme_list) / len(address_list))
disk_chunks = [nvme_list[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(address_list))]
subsys_no = 1
port_no = 0
for ip, chunk in zip(address_list, disk_chunks):
for disk in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % subsys_no
nvmet_cfg["subsystems"].append({
"allowed_hosts": [],
"attr": {
"allow_any_host": "1",
"serial": "SPDK00%s" % subsys_no,
"version": "1.3"
},
"namespaces": [
{
"device": {
"path": disk,
"uuid": "%s" % uuid.uuid4()
},
"enable": 1,
"nsid": subsys_no
}
],
"nqn": nqn
})
nvmet_cfg["ports"].append({
"addr": {
"adrfam": "ipv4",
"traddr": ip,
"trsvcid": "%s" % (4420 + port_no),
"trtype": "%s" % self.transport
},
"portid": subsys_no,
"referrals": [],
"subsystems": [nqn]
})
subsys_no += 1
port_no += 1
self.subsystem_info_list.append([port_no, nqn, ip])
with open("kernel.conf", "w") as fh:
fh.write(json.dumps(nvmet_cfg, indent=2))
pass
def tgt_start(self):
self.log_print("Configuring kernel NVMeOF Target")
if self.null_block:
print("Configuring with null block device.")
null_blk_list = ["/dev/nullb{}".format(x) for x in range(self.null_block)]
self.kernel_tgt_gen_subsystem_conf(null_blk_list, self.nic_ips)
self.subsys_no = len(null_blk_list)
else:
print("Configuring with NVMe drives.")
nvme_list = get_nvme_devices()
self.kernel_tgt_gen_subsystem_conf(nvme_list, self.nic_ips)
self.subsys_no = len(nvme_list)
nvmet_command(self.nvmet_bin, "clear")
nvmet_command(self.nvmet_bin, "restore kernel.conf")
if self.enable_adq:
self.adq_configure_tc()
self.log_print("Done configuring kernel NVMeOF Target")
class SPDKTarget(Target):
def __init__(self, name, general_config, target_config):
super(SPDKTarget, self).__init__(name, general_config, target_config)
# Required fields
self.core_mask = target_config["core_mask"]
self.num_cores = self.get_num_cores(self.core_mask)
# Defaults
self.dif_insert_strip = False
self.null_block_dif_type = 0
self.num_shared_buffers = 4096
self.bpf_proc = None
self.bpf_scripts = []
if "num_shared_buffers" in target_config:
self.num_shared_buffers = target_config["num_shared_buffers"]
if "null_block_dif_type" in target_config:
self.null_block_dif_type = target_config["null_block_dif_type"]
if "dif_insert_strip" in target_config:
self.dif_insert_strip = target_config["dif_insert_strip"]
if "bpf_scripts" in target_config:
self.bpf_scripts = target_config["bpf_scripts"]
def get_num_cores(self, core_mask):
if "0x" in core_mask:
return bin(int(core_mask, 16)).count("1")
else:
num_cores = 0
core_mask = core_mask.replace("[", "")
core_mask = core_mask.replace("]", "")
for i in core_mask.split(","):
if "-" in i:
x, y = i.split("-")
num_cores += len(range(int(x), int(y))) + 1
else:
num_cores += 1
return num_cores
def spdk_tgt_configure(self):
self.log_print("Configuring SPDK NVMeOF target via RPC")
if self.enable_adq:
self.adq_configure_tc()
# Create RDMA transport layer
rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport,
num_shared_buffers=self.num_shared_buffers,
dif_insert_or_strip=self.dif_insert_strip,
sock_priority=self.adq_priority)
self.log_print("SPDK NVMeOF transport layer:")
rpc.client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
if self.null_block:
self.spdk_tgt_add_nullblock(self.null_block)
self.spdk_tgt_add_subsystem_conf(self.nic_ips, self.null_block)
else:
self.spdk_tgt_add_nvme_conf()
self.spdk_tgt_add_subsystem_conf(self.nic_ips)
self.log_print("Done configuring SPDK NVMeOF Target")
def spdk_tgt_add_nullblock(self, null_block_count):
md_size = 0
block_size = 4096
if self.null_block_dif_type != 0:
md_size = 128
self.log_print("Adding null block bdevices to config via RPC")
for i in range(null_block_count):
self.log_print("Setting bdev protection to :%s" % self.null_block_dif_type)
rpc.bdev.bdev_null_create(self.client, 102400, block_size + md_size, "Nvme{}n1".format(i),
dif_type=self.null_block_dif_type, md_size=md_size)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
self.log_print("Adding NVMe bdevs to config via RPC")
bdfs = get_nvme_devices_bdf()
bdfs = [b.replace(":", ".") for b in bdfs]
if req_num_disks:
if req_num_disks > len(bdfs):
self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
sys.exit(1)
else:
bdfs = bdfs[0:req_num_disks]
for i, bdf in enumerate(bdfs):
rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
self.log_print("Adding subsystems to config")
port = "4420"
if not req_num_disks:
req_num_disks = get_nvme_devices_count()
# Distribute bdevs between provided NICs
num_disks = range(0, req_num_disks)
if len(num_disks) == 1:
disks_per_ip = 1
else:
disks_per_ip = int(len(num_disks) / len(ips))
disk_chunks = [num_disks[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(ips))]
# Create subsystems, add bdevs to namespaces, add listeners
for ip, chunk in zip(ips, disk_chunks):
for c in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % c
serial = "SPDK00%s" % c
bdev_name = "Nvme%sn1" % c
rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
allow_any_host=True, max_namespaces=8)
rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
rpc.nvmf.nvmf_subsystem_add_listener(self.client,
nqn=nqn,
trtype=self.transport,
traddr=ip,
trsvcid=port,
adrfam="ipv4")
self.subsystem_info_list.append([port, nqn, ip])
self.log_print("SPDK NVMeOF subsystem configuration:")
rpc.client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
def bpf_start(self):
self.log_print("Starting BPF Trace scripts: %s" % self.bpf_scripts)
bpf_script = os.path.join(self.spdk_dir, "scripts/bpftrace.sh")
bpf_traces = [os.path.join(self.spdk_dir, "scripts/bpf", trace) for trace in self.bpf_scripts]
results_path = os.path.join(self.results_dir, "bpf_traces.txt")
with open(self.pid, "r") as fh:
nvmf_pid = str(fh.readline())
cmd = [bpf_script, nvmf_pid, *bpf_traces]
self.log_print(cmd)
self.bpf_proc = subprocess.Popen(cmd, env={"BPF_OUTFILE": results_path})
def tgt_start(self):
if self.null_block:
self.subsys_no = 1
else:
self.subsys_no = get_nvme_devices_count()
self.log_print("Starting SPDK NVMeOF Target process")
nvmf_app_path = os.path.join(self.spdk_dir, "build/bin/nvmf_tgt")
proc = subprocess.Popen([nvmf_app_path, "--wait-for-rpc", "-m", self.core_mask])
self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
with open(self.pid, "w") as fh:
fh.write(str(proc.pid))
self.nvmf_proc = proc
self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
self.log_print("Waiting for spdk to initialize...")
while True:
if os.path.exists("/var/tmp/spdk.sock"):
break
time.sleep(1)
self.client = rpc.client.JSONRPCClient("/var/tmp/spdk.sock")
if self.enable_zcopy:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix",
enable_zerocopy_send_server=True)
self.log_print("Target socket options:")
rpc.client.print_dict(rpc.sock.sock_impl_get_options(self.client, impl_name="posix"))
if self.enable_adq:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix", enable_placement_id=1)
rpc.bdev.bdev_nvme_set_options(self.client, timeout_us=0, action_on_timeout=None,
nvme_adminq_poll_period_us=100000, retry_count=4)
rpc.app.framework_set_scheduler(self.client, name=self.scheduler_name)
rpc.framework_start_init(self.client)
if self.bpf_scripts:
self.bpf_start()
self.spdk_tgt_configure()
def __del__(self):
if self.bpf_proc:
self.log_print("Stopping BPF Trace script")
self.bpf_proc.terminate()
self.bpf_proc.wait()
if hasattr(self, "nvmf_proc"):
try:
self.nvmf_proc.terminate()
self.nvmf_proc.wait()
except Exception as e:
self.log_print(e)
self.nvmf_proc.kill()
self.nvmf_proc.communicate()
class KernelInitiator(Initiator):
def __init__(self, name, general_config, initiator_config):
super(KernelInitiator, self).__init__(name, general_config, initiator_config)
# Defaults
self.extra_params = ""
self.ioengine = "libaio"
if "extra_params" in initiator_config:
self.extra_params = initiator_config["extra_params"]
if "kernel_engine" in initiator_config:
self.ioengine = initiator_config["kernel_engine"]
if "io_uring" in self.ioengine:
self.extra_params = "--nr-poll-queues=8"
def __del__(self):
self.ssh_connection.close()
def get_connected_nvme_list(self):
json_obj = json.loads(self.exec_cmd(["sudo", "nvme", "list", "-o", "json"]))
nvme_list = [os.path.basename(x["DevicePath"]) for x in json_obj["Devices"]
if "SPDK" in x["ModelNumber"] or "Linux" in x["ModelNumber"]]
return nvme_list
def kernel_init_connect(self):
self.log_print("Below connection attempts may result in error messages, this is expected!")
for subsystem in self.subsystem_info_list:
self.log_print("Trying to connect %s %s %s" % subsystem)
self.exec_cmd(["sudo", self.nvmecli_bin, "connect", "-t", self.transport,
"-s", subsystem[0], "-n", subsystem[1], "-a", subsystem[2], self.extra_params])
time.sleep(2)
if "io_uring" in self.ioengine:
self.log_print("Setting block layer settings for io_uring.")
# TODO: io_poll=1 and io_poll_delay=-1 params not set here, because
# apparently it's not possible for connected subsystems.
# Results in "error: Invalid argument"
block_sysfs_settings = {
"iostats": "0",
"rq_affinity": "0",
"nomerges": "2"
}
for disk in self.get_connected_nvme_list():
sysfs = os.path.join("/sys/block", disk, "queue")
for k, v in block_sysfs_settings.items():
sysfs_opt_path = os.path.join(sysfs, k)
try:
self.exec_cmd(["sudo", "bash", "-c", "echo %s > %s" % (v, sysfs_opt_path)], stderr_redirect=True)
except subprocess.CalledProcessError as e:
self.log_print("Warning: command %s failed due to error %s. %s was not set!" % (e.cmd, e.output, v))
finally:
_ = self.exec_cmd(["sudo", "cat", "%s" % (sysfs_opt_path)])
self.log_print("%s=%s" % (sysfs_opt_path, _))
def kernel_init_disconnect(self):
for subsystem in self.subsystem_info_list:
self.exec_cmd(["sudo", self.nvmecli_bin, "disconnect", "-n", subsystem[1]])
time.sleep(1)
def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1):
nvme_list = [os.path.join("/dev", nvme) for nvme in self.get_connected_nvme_list()]
filename_section = ""
nvme_per_split = int(len(nvme_list) / len(threads))
remainder = len(nvme_list) % len(threads)
iterator = iter(nvme_list)
result = []
for i in range(len(threads)):
result.append([])
for _ in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
class SPDKInitiator(Initiator):
def __init__(self, name, general_config, initiator_config):
super(SPDKInitiator, self).__init__(name, general_config, initiator_config)
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.install_spdk()
# Required fields
self.num_cores = initiator_config["num_cores"]
def install_spdk(self):
self.log_print("Using fio binary %s" % self.fio_bin)
self.exec_cmd(["git", "-C", self.spdk_dir, "submodule", "update", "--init"])
self.exec_cmd(["git", "-C", self.spdk_dir, "clean", "-ffdx"])
self.exec_cmd(["cd", self.spdk_dir, "&&", "./configure", "--with-rdma", "--with-fio=%s" % os.path.dirname(self.fio_bin)])
self.exec_cmd(["make", "-C", self.spdk_dir, "clean"])
self.exec_cmd(["make", "-C", self.spdk_dir, "-j$(($(nproc)*2))"])
self.log_print("SPDK built")
self.exec_cmd(["sudo", "%s/scripts/setup.sh" % self.spdk_dir])
def gen_spdk_bdev_conf(self, remote_subsystem_list):
bdev_cfg_section = {
"subsystems": [
{
"subsystem": "bdev",
"config": []
}
]
}
for i, subsys in enumerate(remote_subsystem_list):
sub_port, sub_nqn, sub_addr = map(lambda x: str(x), subsys)
nvme_ctrl = {
"method": "bdev_nvme_attach_controller",
"params": {
"name": "Nvme{}".format(i),
"trtype": self.transport,
"traddr": sub_addr,
"trsvcid": sub_port,
"subnqn": sub_nqn,
"adrfam": "IPv4"
}
}
if self.enable_adq:
nvme_ctrl["params"].update({"priority": "1"})
bdev_cfg_section["subsystems"][0]["config"].append(nvme_ctrl)
return json.dumps(bdev_cfg_section, indent=2)
def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1):
filename_section = ""
if len(threads) >= len(subsystems):
threads = range(0, len(subsystems))
filenames = ["Nvme%sn1" % x for x in range(0, len(subsystems))]
nvme_per_split = int(len(subsystems) / len(threads))
remainder = len(subsystems) % len(threads)
iterator = iter(filenames)
result = []
for i in range(len(threads)):
result.append([])
for _ in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
if __name__ == "__main__":
script_full_dir = os.path.dirname(os.path.realpath(__file__))
default_config_file_path = os.path.relpath(os.path.join(script_full_dir, "config.json"))
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', type=str, default=default_config_file_path,
help='Configuration file.')
parser.add_argument('-r', '--results', type=str, default='/tmp/results',
help='Results directory.')
parser.add_argument('-s', '--csv-filename', type=str, default='nvmf_results.csv',
help='CSV results filename.')
args = parser.parse_args()
print("Using config file: %s" % args.config)
with open(args.config, "r") as config:
data = json.load(config)
initiators = []
fio_cases = []
general_config = data["general"]
target_config = data["target"]
initiator_configs = [data[x] for x in data.keys() if "initiator" in x]
for k, v in data.items():
if "target" in k:
v.update({"results_dir": args.results})
if data[k]["mode"] == "spdk":
target_obj = SPDKTarget(k, data["general"], v)
elif data[k]["mode"] == "kernel":
target_obj = KernelTarget(k, data["general"], v)
pass
elif "initiator" in k:
if data[k]["mode"] == "spdk":
init_obj = SPDKInitiator(k, data["general"], v)
elif data[k]["mode"] == "kernel":
init_obj = KernelInitiator(k, data["general"], v)
initiators.append(init_obj)
elif "fio" in k:
fio_workloads = itertools.product(data[k]["bs"],
data[k]["qd"],
data[k]["rw"])
fio_run_time = data[k]["run_time"]
fio_ramp_time = data[k]["ramp_time"]
fio_rw_mix_read = data[k]["rwmixread"]
fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
fio_rate_iops = 0
if "rate_iops" in data[k]:
fio_rate_iops = data[k]["rate_iops"]
else:
continue
try:
os.mkdir(args.results)
except FileExistsError:
pass
target_obj.tgt_start()
for i in initiators:
i.discover_subsystems(i.target_nic_ips, target_obj.subsys_no)
if i.enable_adq:
i.adq_configure_tc()
# Poor mans threading
# Run FIO tests
for block_size, io_depth, rw in fio_workloads:
threads = []
configs = []
for i in initiators:
if i.mode == "kernel":
i.kernel_init_connect()
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops)
configs.append(cfg)
for i, cfg in zip(initiators, configs):
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
threads.append(t)
if target_obj.enable_sar:
sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
sar_file_name = ".".join([sar_file_name, "txt"])
t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_name))
threads.append(t)
if target_obj.enable_pcm:
pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm, args=(args.results, pcm_fnames[0],))
pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory, args=(args.results, pcm_fnames[1],))
pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power, args=(args.results, pcm_fnames[2],))
threads.append(pcm_cpu_t)
threads.append(pcm_mem_t)
threads.append(pcm_pow_t)
if target_obj.enable_bandwidth:
bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
t = threading.Thread(target=target_obj.measure_network_bandwidth, args=(args.results, bandwidth_file_name,))
threads.append(t)
if target_obj.enable_dpdk_memory:
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results))
threads.append(t)
if target_obj.enable_adq:
ethtool_thread = threading.Thread(target=target_obj.ethtool_after_fio_ramp, args=(fio_ramp_time,))
threads.append(ethtool_thread)
for t in threads:
t.start()
for t in threads:
t.join()
for i in initiators:
if i.mode == "kernel":
i.kernel_init_disconnect()
i.copy_result_files(args.results)
target_obj.restore_governor()
target_obj.restore_tuned()
target_obj.restore_services()
target_obj.restore_sysctl()
for i in initiators:
i.restore_governor()
i.restore_tuned()
i.restore_services()
i.restore_sysctl()
target_obj.parse_results(args.results, args.csv_filename)
|
[] |
[] |
[
"SPDK_WORKSPACE"
] |
[]
|
["SPDK_WORKSPACE"]
|
python
| 1 | 0 | |
venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/compat.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2017 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import absolute_import
import os
import re
import sys
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
if sys.version_info[0] < 3: # pragma: no cover
from StringIO import StringIO
string_types = basestring,
text_type = unicode
from types import FileType as file_type
import __builtin__ as builtins
import ConfigParser as configparser
from ._backport import shutil
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
pathname2url, ContentTooShortError, splittype)
def quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return _quote(s)
import urllib2
from urllib2 import (Request, urlopen, URLError, HTTPError,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib2 import HTTPSHandler
import httplib
import xmlrpclib
import Queue as queue
from HTMLParser import HTMLParser
import htmlentitydefs
raw_input = raw_input
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
else: # pragma: no cover
from io import StringIO
string_types = str,
text_type = str
from io import TextIOWrapper as file_type
import builtins
import configparser
import shutil
from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
unquote, urlsplit, urlunsplit, splittype)
from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
pathname2url,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib.request import HTTPSHandler
from urllib.error import HTTPError, URLError, ContentTooShortError
import http.client as httplib
import urllib.request as urllib2
import xmlrpc.client as xmlrpclib
import queue
from html.parser import HTMLParser
import html.entities as htmlentitydefs
raw_input = input
from itertools import filterfalse
filter = filter
try:
from ssl import match_hostname, CertificateError
except ImportError: # pragma: no cover
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split('.')
leftmost, remainder = parts[0], parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError: # pragma: no cover
class Container(object):
"""
A generic container for when multiple values need to be returned
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
try:
from shutil import which
except ImportError: # pragma: no cover
# Implementation from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# ZipFile is a context manager in 2.7, but not in 2.6
from zipfile import ZipFile as BaseZipFile
if hasattr(BaseZipFile, '__enter__'): # pragma: no cover
ZipFile = BaseZipFile
else: # pragma: no cover
from zipfile import ZipExtFile as BaseZipExtFile
class ZipExtFile(BaseZipExtFile):
def __init__(self, base):
self.__dict__.update(base.__dict__)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
class ZipFile(BaseZipFile):
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
def open(self, *args, **kwargs):
base = BaseZipFile.open(self, *args, **kwargs)
return ZipExtFile(base)
try:
from platform import python_implementation
except ImportError: # pragma: no cover
def python_implementation():
"""Return a string identifying the Python implementation."""
if 'PyPy' in sys.version:
return 'PyPy'
if os.name == 'java':
return 'Jython'
if sys.version.startswith('IronPython'):
return 'IronPython'
return 'CPython'
try:
import sysconfig
except ImportError: # pragma: no cover
from ._backport import sysconfig
try:
callable = callable
except NameError: # pragma: no cover
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
fsdecode = os.fsdecode
except AttributeError: # pragma: no cover
# Issue #99: on some systems (e.g. containerised),
# sys.getfilesystemencoding() returns None, and we need a real value,
# so fall back to utf-8. From the CPython 2.7 docs relating to Unix and
# sys.getfilesystemencoding(): the return value is "the user’s preference
# according to the result of nl_langinfo(CODESET), or None if the
# nl_langinfo(CODESET) failed."
_fsencoding = sys.getfilesystemencoding() or 'utf-8'
if _fsencoding == 'mbcs':
_fserrors = 'strict'
else:
_fserrors = 'surrogateescape'
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, text_type):
return filename.encode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
def fsdecode(filename):
if isinstance(filename, text_type):
return filename
elif isinstance(filename, bytes):
return filename.decode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
try:
from tokenize import detect_encoding
except ImportError: # pragma: no cover
from codecs import BOM_UTF8, lookup
import re
cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
# For converting & <-> & etc.
try:
from html import escape
except ImportError:
from cgi import escape
if sys.version_info[:2] < (3, 4):
unescape = HTMLParser().unescape
else:
from html import unescape
try:
from collections import ChainMap
except ImportError: # pragma: no cover
from collections import MutableMapping
try:
from reprlib import recursive_repr as _recursive_repr
except ImportError:
def _recursive_repr(fillvalue='...'):
'''
Decorator to make a repr function return fillvalue for a recursive
call
'''
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from importlib.util import cache_from_source # Python >= 3.4
except ImportError: # pragma: no cover
try:
from imp import cache_from_source
except ImportError: # pragma: no cover
def cache_from_source(path, debug_override=None):
assert path.endswith('.py')
if debug_override is None:
debug_override = __debug__
if debug_override:
suffix = 'c'
else:
suffix = 'o'
return path + suffix
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
## {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
if not _repr_running: _repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
try:
from logging.config import BaseConfigurator, valid_ident
except ImportError: # pragma: no cover
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
|
[] |
[] |
[
"PATH",
"PATHEXT"
] |
[]
|
["PATH", "PATHEXT"]
|
python
| 2 | 0 | |
src/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'afterlife.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/exporter/azureblob_exporter.go
|
package exporter
import (
"bytes"
"context"
"fmt"
"log"
"net/url"
"os"
"strings"
"github.com/Azure/aks-periscope/pkg/interfaces"
"github.com/Azure/aks-periscope/pkg/utils"
"github.com/Azure/azure-storage-blob-go/azblob"
)
const (
maxContainerNameLength = 63
)
// AzureBlobExporter defines an Azure Blob Exporter
type AzureBlobExporter struct{}
var _ interfaces.Exporter = &AzureBlobExporter{}
// Export implements the interface method
func (exporter *AzureBlobExporter) Export(files []string) error {
APIServerFQDN, err := utils.GetAPIServerFQDN()
if err != nil {
return err
}
containerName := strings.Replace(APIServerFQDN, ".", "-", -1)
len := strings.Index(containerName, "-hcp-")
if len == -1 {
len = maxContainerNameLength
}
containerName = strings.TrimRight(containerName[:len], "-")
ctx := context.Background()
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
accountName := os.Getenv("AZURE_BLOB_ACCOUNT_NAME")
sasKey := os.Getenv("AZURE_BLOB_SAS_KEY")
url, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/%s%s", accountName, containerName, sasKey))
if err != nil {
return fmt.Errorf("Fail to build blob container url: %+v", err)
}
containerURL := azblob.NewContainerURL(*url, pipeline)
_, err = containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
storageError, ok := err.(azblob.StorageError)
if ok {
switch storageError.ServiceCode() {
case azblob.ServiceCodeContainerAlreadyExists:
default:
return fmt.Errorf("Failed to create blob with storage error: %+v", err)
}
} else {
return fmt.Errorf("Failed to create blob: %+v", err)
}
}
for _, file := range files {
appendBlobURL := containerURL.NewAppendBlobURL(strings.TrimPrefix(file, "/aks-periscope/"))
blobGetPropertiesResponse, err := appendBlobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
if err != nil {
storageError, ok := err.(azblob.StorageError)
if ok {
switch storageError.ServiceCode() {
case azblob.ServiceCodeBlobNotFound:
_, err = appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
if err != nil {
return fmt.Errorf("Fail to create blob for file %s: %+v", file, err)
}
default:
return fmt.Errorf("Failed to create blob with storage error: %+v", err)
}
} else {
return fmt.Errorf("Failed to create blob: %+v", err)
}
}
var start int64
if blobGetPropertiesResponse != nil {
start = blobGetPropertiesResponse.ContentLength()
}
f, err := os.Open(file)
if err != nil {
return fmt.Errorf("Fail to open file %s: %+v", file, err)
}
fileInfo, err := f.Stat()
if err != nil {
return fmt.Errorf("Fail to get file info for file %s: %+v", file, err)
}
end := fileInfo.Size()
fileSize := end - start
if fileSize > 0 {
for start < end {
lengthToWrite := end - start
if lengthToWrite > azblob.AppendBlobMaxAppendBlockBytes {
lengthToWrite = azblob.AppendBlobMaxAppendBlockBytes
}
b := make([]byte, lengthToWrite)
_, err = f.ReadAt(b, start)
if err != nil {
return fmt.Errorf("Fail to read file %s: %+v", file, err)
}
log.Printf("\tappend blob file: %s, start position: %d, end position: %d\n", file, start, start+lengthToWrite)
_, err = appendBlobURL.AppendBlock(ctx, bytes.NewReader(b), azblob.AppendBlobAccessConditions{}, nil)
if err != nil {
return fmt.Errorf("Fail to append file %s to blob: %+v", file, err)
}
start += lengthToWrite
}
}
}
return nil
}
|
[
"\"AZURE_BLOB_ACCOUNT_NAME\"",
"\"AZURE_BLOB_SAS_KEY\""
] |
[] |
[
"AZURE_BLOB_SAS_KEY",
"AZURE_BLOB_ACCOUNT_NAME"
] |
[]
|
["AZURE_BLOB_SAS_KEY", "AZURE_BLOB_ACCOUNT_NAME"]
|
go
| 2 | 0 | |
go/vt/vtgate/planbuilder/symtab_test.go
|
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package planbuilder
import (
"encoding/json"
"testing"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
func TestSymtabAddVSchemaTable(t *testing.T) {
tname := sqlparser.TableName{Name: sqlparser.NewTableIdent("t")}
rb := &route{}
tcases := []struct {
in []*vindexes.Table
authoritative bool
vindexes [][]string
err string
}{{
// Single table.
in: []*vindexes.Table{{
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}, {
Name: sqlparser.NewColIdent("C2"),
}},
}},
authoritative: false,
vindexes: [][]string{{}},
}, {
// Column vindex specified.
in: []*vindexes.Table{{
ColumnVindexes: []*vindexes.ColumnVindex{{
Columns: []sqlparser.ColIdent{sqlparser.NewColIdent("C1")},
}},
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}, {
Name: sqlparser.NewColIdent("C2"),
}},
}},
authoritative: false,
vindexes: [][]string{{"c1"}},
}, {
// Multi-column vindex.
in: []*vindexes.Table{{
ColumnVindexes: []*vindexes.ColumnVindex{{
Columns: []sqlparser.ColIdent{
sqlparser.NewColIdent("C1"),
sqlparser.NewColIdent("C2"),
},
}},
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}, {
Name: sqlparser.NewColIdent("C2"),
}},
}},
authoritative: false,
vindexes: [][]string{{"c1"}},
}, {
// AutoIncrement.
in: []*vindexes.Table{{
AutoIncrement: &vindexes.AutoIncrement{
Column: sqlparser.NewColIdent("C1"),
},
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}, {
Name: sqlparser.NewColIdent("C2"),
}},
}},
authoritative: false,
vindexes: [][]string{{}},
}, {
// Column vindex specifies a column not in list.
in: []*vindexes.Table{{
ColumnVindexes: []*vindexes.ColumnVindex{{
Columns: []sqlparser.ColIdent{sqlparser.NewColIdent("C1")},
}},
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C2"),
}},
}},
authoritative: false,
vindexes: [][]string{{"c1"}},
}, {
// Column vindex specifies columns with none in list.
in: []*vindexes.Table{{
ColumnVindexes: []*vindexes.ColumnVindex{{
Columns: []sqlparser.ColIdent{
sqlparser.NewColIdent("C1"),
sqlparser.NewColIdent("C2"),
},
}},
}},
authoritative: false,
vindexes: [][]string{{"c1"}},
}, {
// AutoIncrement specifies a column not in list.
in: []*vindexes.Table{{
AutoIncrement: &vindexes.AutoIncrement{
Column: sqlparser.NewColIdent("C1"),
},
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C2"),
}},
}},
authoritative: false,
vindexes: [][]string{{}},
}, {
// Two tables.
in: []*vindexes.Table{{
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C2"),
}},
}, {
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}},
}},
authoritative: false,
vindexes: [][]string{{}, {}},
}, {
// Two tables, with column vindexes.
in: []*vindexes.Table{{
ColumnVindexes: []*vindexes.ColumnVindex{{
Columns: []sqlparser.ColIdent{
sqlparser.NewColIdent("C1"),
},
}},
}, {
ColumnVindexes: []*vindexes.ColumnVindex{{
Columns: []sqlparser.ColIdent{
sqlparser.NewColIdent("C2"),
},
}},
}},
authoritative: false,
vindexes: [][]string{{"c1"}, {"c2"}},
}, {
// One table with two column vindexes.
in: []*vindexes.Table{{
ColumnVindexes: []*vindexes.ColumnVindex{{
Columns: []sqlparser.ColIdent{
sqlparser.NewColIdent("C1"),
},
}, {
Columns: []sqlparser.ColIdent{
sqlparser.NewColIdent("C2"),
},
}},
}},
authoritative: false,
vindexes: [][]string{{"c1", "c2"}, {}},
}, {
// First table is authoritative.
in: []*vindexes.Table{{
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}, {
Name: sqlparser.NewColIdent("C2"),
}},
ColumnListAuthoritative: true,
}, {
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}},
}},
authoritative: true,
vindexes: [][]string{{}, {}},
}, {
// Both tables are authoritative.
in: []*vindexes.Table{{
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}, {
Name: sqlparser.NewColIdent("C2"),
}},
ColumnListAuthoritative: true,
}, {
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}},
ColumnListAuthoritative: true,
}},
authoritative: true,
vindexes: [][]string{{}, {}},
}, {
// Second table is authoritative.
in: []*vindexes.Table{{
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}, {
Name: sqlparser.NewColIdent("C2"),
}},
}, {
Name: sqlparser.NewTableIdent("t1"),
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}},
ColumnListAuthoritative: true,
}},
authoritative: true,
vindexes: [][]string{{}, {}},
err: "intermixing of authoritative and non-authoritative tables not allowed: t1",
}, {
// Cannot add to authoritative table (column list).
in: []*vindexes.Table{{
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}},
ColumnListAuthoritative: true,
}, {
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C2"),
}},
}},
err: "column C2 not found in t",
}, {
// Cannot add to authoritative table (column vindex).
in: []*vindexes.Table{{
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}},
ColumnListAuthoritative: true,
}, {
ColumnVindexes: []*vindexes.ColumnVindex{{
Columns: []sqlparser.ColIdent{
sqlparser.NewColIdent("C2"),
},
}},
}},
err: "column C2 not found in t",
}, {
// Cannot add to authoritative table (autoinc).
in: []*vindexes.Table{{
Columns: []vindexes.Column{{
Name: sqlparser.NewColIdent("C1"),
}},
ColumnListAuthoritative: true,
}, {
AutoIncrement: &vindexes.AutoIncrement{
Column: sqlparser.NewColIdent("C2"),
},
}},
err: "column C2 not found in t",
}}
out := []string{"c1", "c2"}
for _, tcase := range tcases {
st := newSymtab()
vindexMaps, err := st.AddVSchemaTable(tname, tcase.in, rb)
tcasein, _ := json.Marshal(tcase.in)
if err != nil {
if err.Error() != tcase.err {
t.Errorf("st.AddVSchemaTable(%s) err: %v, want %s", tcasein, err, tcase.err)
}
continue
} else if tcase.err != "" {
t.Errorf("st.AddVSchemaTable(%s) succeeded, want error: %s", tcasein, tcase.err)
continue
}
tab := st.tables[tname]
for _, col := range out {
if tab.columns[col] == nil {
t.Errorf("st.AddVSchemaTable(%s): column %s not found", tcasein, col)
}
}
for i, cols := range tcase.vindexes {
for _, col := range cols {
c := tab.columns[col]
if c == nil {
t.Errorf("st.AddVSchemaTable(%s): column %s not found", tcasein, col)
}
if _, ok := vindexMaps[i][c]; !ok {
t.Errorf("st.AddVSchemaTable(%s).vindexMap[%d]: column %s not found", tcasein, i, col)
}
}
}
if tab.isAuthoritative != tcase.authoritative {
t.Errorf("st.AddVSchemaTable(%s).authoritative: %v want %v", tcasein, tab.isAuthoritative, tcase.authoritative)
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
newrelic-agent/src/test/java/com/newrelic/agent/config/SystemPropertyProviderTest.java
|
/*
*
* * Copyright 2020 New Relic Corporation. All rights reserved.
* * SPDX-License-Identifier: Apache-2.0
*
*/
package com.newrelic.agent.config;
import com.newrelic.agent.SaveSystemPropertyProviderRule;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
public class SystemPropertyProviderTest {
@Test
public void testSystemPropertyProviderGeneralSystemProps() {
Properties props = new Properties();
props.put("newrelic.config.process_host.display_name", "hello");
props.put("newrelic.config.app_name", "people");
props.put("newrelic.config.log_file_name", "logfile.log");
SystemPropertyProvider provider = new SystemPropertyProvider(
new SaveSystemPropertyProviderRule.TestSystemProps(props),
new SaveSystemPropertyProviderRule.TestEnvironmentFacade()
);
assertNotNull("Properties can not be null", provider.getNewRelicPropertiesWithoutPrefix().get("process_host.display_name"));
assertEquals("hello", provider.getNewRelicPropertiesWithoutPrefix().get("process_host.display_name"));
assertEquals("people", provider.getNewRelicPropertiesWithoutPrefix().get("app_name"));
assertEquals("logfile.log", provider.getNewRelicPropertiesWithoutPrefix().get("log_file_name"));
}
@Test
public void testSystemPropertyProviderGeneralEnvVars() {
Map<String, String> envs = new HashMap<>(System.getenv());
envs.put("NEW_RELIC_LOG", "logfile.log");
SystemPropertyProvider provider = new SystemPropertyProvider(
new SaveSystemPropertyProviderRule.TestSystemProps(),
new SaveSystemPropertyProviderRule.TestEnvironmentFacade(envs)
);
assertNotNull("Properties can not be null", provider.getNewRelicEnvVarsWithoutPrefix());
assertEquals("logfile.log", provider.getNewRelicEnvVarsWithoutPrefix().get("log_file_name"));
}
@Test
public void testSystemPropertyProviderGeneralEnvProps() {
//to cover for a case where config properties get passed as environment variables.
Map<String, String> envs = new HashMap<>(System.getenv());
envs.put("newrelic.config.process_host.display_name", "hello");
envs.put("newrelic.config.app_name", "people");
envs.put("newrelic.config.log_file_name", "logfile.log");
SystemPropertyProvider provider = new SystemPropertyProvider(
new SaveSystemPropertyProviderRule.TestSystemProps(),
new SaveSystemPropertyProviderRule.TestEnvironmentFacade(envs)
);
assertNotNull("Properties can not be null", provider.getNewRelicEnvVarsWithoutPrefix().get("process_host.display_name"));
assertEquals("hello", provider.getNewRelicEnvVarsWithoutPrefix().get("process_host.display_name"));
assertEquals("people", provider.getNewRelicEnvVarsWithoutPrefix().get("app_name"));
assertEquals("logfile.log", provider.getNewRelicEnvVarsWithoutPrefix().get("log_file_name"));
}
@Test
public void testEnvironmentVariable() {
Map<String, String> envs = new HashMap<>();
envs.put("NEW_RELIC_ANALYTICS_EVENTS_MAX_SAMPLES_STORED", "12345");
envs.put("NEW_RELIC_DATASTORE_DATABASE_NAME_REPORTING_ENABLED", "false");
envs.put("NEW_RELIC_PROCESS_HOST_DISPLAY_NAME", "hello");
envs.put("NEW_RELIC_APP_NAME", "people");
envs.put("KUBERNETES_SERVICE_HOST", "10.96.0.1");
envs.put("newrelic.config.distributed_tracing.enabled", "true");
SystemPropertyProvider provider = new SystemPropertyProvider(
new SaveSystemPropertyProviderRule.TestSystemProps(),
new SaveSystemPropertyProviderRule.TestEnvironmentFacade(envs)
);
assertEquals("12345", provider.getEnvironmentVariable("newrelic.config.analytics_events.max_samples_stored"));
assertEquals("false", provider.getEnvironmentVariable("newrelic.config.datastore.database_name_reporting.enabled"));
assertEquals("hello", provider.getEnvironmentVariable("NEW_RELIC_PROCESS_HOST_DISPLAY_NAME"));
assertEquals("people", provider.getEnvironmentVariable("NEW_RELIC_APP_NAME"));
assertEquals("10.96.0.1", provider.getEnvironmentVariable("KUBERNETES_SERVICE_HOST"));
assertEquals("true", provider.getEnvironmentVariable("newrelic.config.distributed_tracing.enabled"));
}
@Test
public void testGetNewRelicSystemProperties() {
Properties props = new Properties();
props.put("newrelic.config.process_host.display_name", "hello");
props.put("newrelic.config.app_name", "people");
props.put("newrelic.config.log_file_name", "logfile.log");
SystemPropertyProvider provider = new SystemPropertyProvider(
new SaveSystemPropertyProviderRule.TestSystemProps(props),
new SaveSystemPropertyProviderRule.TestEnvironmentFacade()
);
assertNotNull("Properties can not be null", provider.getNewRelicSystemProperties().get("newrelic.config.process_host.display_name"));
assertEquals("hello", provider.getNewRelicSystemProperties().get("newrelic.config.process_host.display_name"));
assertEquals("people", provider.getNewRelicSystemProperties().get("newrelic.config.app_name"));
assertEquals("logfile.log", provider.getNewRelicSystemProperties().get("newrelic.config.log_file_name"));
}
@Test
public void testGetSystemProperty() {
Properties props = new Properties();
props.put("newrelic.config.process_host.display_name", "hello");
props.put("newrelic.config.app_name", "people");
props.put("newrelic.config.log_file_name", "logfile.log");
SystemPropertyProvider provider = new SystemPropertyProvider(
new SaveSystemPropertyProviderRule.TestSystemProps(props),
new SaveSystemPropertyProviderRule.TestEnvironmentFacade()
);
assertEquals("hello", provider.getSystemProperty("newrelic.config.process_host.display_name"));
assertEquals("people", provider.getSystemProperty("newrelic.config.app_name"));
assertEquals("logfile.log", provider.getSystemProperty("newrelic.config.log_file_name"));
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
cmd/serve.go
|
// Copyright 2020 Security Scorecard Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"html/template"
"net/http"
"os"
"strings"
"github.com/spf13/cobra"
"github.com/ossf/scorecard/v4/checks"
"github.com/ossf/scorecard/v4/clients"
"github.com/ossf/scorecard/v4/clients/githubrepo"
"github.com/ossf/scorecard/v4/log"
"github.com/ossf/scorecard/v4/pkg"
)
//nolint:gochecknoinits
func init() {
rootCmd.AddCommand(serveCmd)
}
var serveCmd = &cobra.Command{
Use: "serve",
Short: "Serve the scorecard program over http",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
logger := log.NewLogger(log.Level(flagLogLevel))
t, err := template.New("webpage").Parse(tpl)
if err != nil {
// TODO(log): Should this actually panic?
logger.Error(err, "parsing webpage template")
panic(err)
}
http.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) {
repoParam := r.URL.Query().Get("repo")
const length = 3
s := strings.SplitN(repoParam, "/", length)
if len(s) != length {
rw.WriteHeader(http.StatusBadRequest)
}
repo, err := githubrepo.MakeGithubRepo(repoParam)
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
}
ctx := r.Context()
repoClient := githubrepo.CreateGithubRepoClient(ctx, logger)
ossFuzzRepoClient, err := githubrepo.CreateOssFuzzRepoClient(ctx, logger)
vulnsClient := clients.DefaultVulnerabilitiesClient()
if err != nil {
logger.Error(err, "initializing clients")
rw.WriteHeader(http.StatusInternalServerError)
}
defer ossFuzzRepoClient.Close()
ciiClient := clients.DefaultCIIBestPracticesClient()
repoResult, err := pkg.RunScorecards(ctx, repo, "HEAD" /*commitSHA*/, false /*raw*/, checks.AllChecks, repoClient,
ossFuzzRepoClient, ciiClient, vulnsClient)
if err != nil {
logger.Error(err, "running enabled scorecard checks on repo")
rw.WriteHeader(http.StatusInternalServerError)
}
if r.Header.Get("Content-Type") == "application/json" {
if err := repoResult.AsJSON(flagShowDetails, log.Level(flagLogLevel), rw); err != nil {
// TODO(log): Improve error message
logger.Error(err, "")
rw.WriteHeader(http.StatusInternalServerError)
}
return
}
if err := t.Execute(rw, repoResult); err != nil {
// TODO(log): Improve error message
logger.Error(err, "")
}
})
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
fmt.Printf("Listening on localhost:%s\n", port)
err = http.ListenAndServe(fmt.Sprintf("0.0.0.0:%s", port), nil)
if err != nil {
// TODO(log): Should this actually panic?
logger.Error(err, "listening and serving")
panic(err)
}
},
}
const tpl = `
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Scorecard Results for: {{.Repo}}</title>
</head>
<body>
{{range .Checks}}
<div>
<p>{{ .Name }}: {{ .Pass }}</p>
</div>
{{end}}
</body>
</html>`
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
build/tools/repopick.py
|
#!/usr/bin/env python
#
# Copyright (C) 2013-15 The CyanogenMod Project
# (C) 2017 The LineageOS Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Run repopick.py -h for a description of this utility.
#
from __future__ import print_function
import sys
import json
import os
import subprocess
import re
import argparse
import textwrap
from functools import cmp_to_key
from xml.etree import ElementTree
try:
import requests
except ImportError:
try:
# For python3
import urllib.error
import urllib.request
except ImportError:
# For python2
import imp
import urllib2
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.request = urllib2
# Verifies whether pathA is a subdirectory (or the same) as pathB
def is_subdir(a, b):
a = os.path.realpath(a) + '/'
b = os.path.realpath(b) + '/'
return b == a[:len(b)]
def fetch_query_via_ssh(remote_url, query):
"""Given a remote_url and a query, return the list of changes that fit it
This function is slightly messy - the ssh api does not return data in the same structure as the HTTP REST API
We have to get the data, then transform it to match what we're expecting from the HTTP RESET API"""
if remote_url.count(':') == 2:
(uri, userhost, port) = remote_url.split(':')
userhost = userhost[2:]
elif remote_url.count(':') == 1:
(uri, userhost) = remote_url.split(':')
userhost = userhost[2:]
port = 29418
else:
raise Exception('Malformed URI: Expecting ssh://[user@]host[:port]')
out = subprocess.check_output(['ssh', '-x', '-p{0}'.format(port), userhost, 'gerrit', 'query', '--format=JSON --patch-sets --current-patch-set', query])
if not hasattr(out, 'encode'):
out = out.decode()
reviews = []
for line in out.split('\n'):
try:
data = json.loads(line)
# make our data look like the http rest api data
review = {
'branch': data['branch'],
'change_id': data['id'],
'current_revision': data['currentPatchSet']['revision'],
'number': int(data['number']),
'revisions': {patch_set['revision']: {
'_number': int(patch_set['number']),
'fetch': {
'ssh': {
'ref': patch_set['ref'],
'url': 'ssh://{0}:{1}/{2}'.format(userhost, port, data['project'])
}
},
'commit': {
'parents': [{ 'commit': parent } for parent in patch_set['parents']]
},
} for patch_set in data['patchSets']},
'subject': data['subject'],
'project': data['project'],
'status': data['status']
}
reviews.append(review)
except:
pass
args.quiet or print('Found {0} reviews'.format(len(reviews)))
return reviews
def fetch_query_via_http(remote_url, query):
if "requests" in sys.modules:
auth = None
if os.path.isfile(os.getenv("HOME") + "/.gerritrc"):
f = open(os.getenv("HOME") + "/.gerritrc", "r")
for line in f:
parts = line.rstrip().split("|")
if parts[0] in remote_url:
auth = requests.auth.HTTPBasicAuth(username=parts[1], password=parts[2])
statusCode = '-1'
if auth:
url = '{0}/a/changes/?q={1}&o=CURRENT_REVISION&o=ALL_REVISIONS&o=ALL_COMMITS'.format(remote_url, query)
data = requests.get(url, auth=auth)
statusCode = str(data.status_code)
if statusCode != '200':
#They didn't get good authorization or data, Let's try the old way
url = '{0}/changes/?q={1}&o=CURRENT_REVISION&o=ALL_REVISIONS&o=ALL_COMMITS'.format(remote_url, query)
data = requests.get(url)
reviews = json.loads(data.text[5:])
else:
"""Given a query, fetch the change numbers via http"""
url = '{0}/changes/?q={1}&o=CURRENT_REVISION&o=ALL_REVISIONS&o=ALL_COMMITS'.format(remote_url, query)
data = urllib.request.urlopen(url).read().decode('utf-8')
reviews = json.loads(data[5:])
for review in reviews:
review['number'] = review.pop('_number')
return reviews
def fetch_query(remote_url, query):
"""Wrapper for fetch_query_via_proto functions"""
if remote_url[0:3] == 'ssh':
return fetch_query_via_ssh(remote_url, query)
elif remote_url[0:4] == 'http':
return fetch_query_via_http(remote_url, query.replace(' ', '+'))
else:
raise Exception('Gerrit URL should be in the form http[s]://hostname/ or ssh://[user@]host[:port]')
if __name__ == '__main__':
# Default to LineageOS Gerrit
default_gerrit = 'https://review.lineageos.org'
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\
repopick.py is a utility to simplify the process of cherry picking
patches from LineageOS's Gerrit instance (or any gerrit instance of your choosing)
Given a list of change numbers, repopick will cd into the project path
and cherry pick the latest patch available.
With the --start-branch argument, the user can specify that a branch
should be created before cherry picking. This is useful for
cherry-picking many patches into a common branch which can be easily
abandoned later (good for testing other's changes.)
The --abandon-first argument, when used in conjunction with the
--start-branch option, will cause repopick to abandon the specified
branch in all repos first before performing any cherry picks.'''))
parser.add_argument('change_number', nargs='*', help='change number to cherry pick. Use {change number}/{patchset number} to get a specific revision.')
parser.add_argument('-i', '--ignore-missing', action='store_true', help='do not error out if a patch applies to a missing directory')
parser.add_argument('-s', '--start-branch', nargs=1, help='start the specified branch before cherry picking')
parser.add_argument('-r', '--reset', action='store_true', help='reset to initial state (abort cherry-pick) if there is a conflict')
parser.add_argument('-a', '--abandon-first', action='store_true', help='before cherry picking, abandon the branch specified in --start-branch')
parser.add_argument('-b', '--auto-branch', action='store_true', help='shortcut to "--start-branch auto --abandon-first --ignore-missing"')
parser.add_argument('-q', '--quiet', action='store_true', help='print as little as possible')
parser.add_argument('-v', '--verbose', action='store_true', help='print extra information to aid in debug')
parser.add_argument('-f', '--force', action='store_true', help='force cherry pick even if change is closed')
parser.add_argument('-p', '--pull', action='store_true', help='execute pull instead of cherry-pick')
parser.add_argument('-P', '--path', help='use the specified path for the change')
parser.add_argument('-t', '--topic', help='pick all commits from a specified topic')
parser.add_argument('-Q', '--query', help='pick all commits using the specified query')
parser.add_argument('-g', '--gerrit', default=default_gerrit, help='Gerrit Instance to use. Form proto://[user@]host[:port]')
parser.add_argument('-e', '--exclude', nargs=1, help='exclude a list of commit numbers separated by a ,')
parser.add_argument('-c', '--check-picked', type=int, default=10, help='pass the amount of commits to check for already picked changes')
args = parser.parse_args()
if not args.start_branch and args.abandon_first:
parser.error('if --abandon-first is set, you must also give the branch name with --start-branch')
if args.auto_branch:
args.abandon_first = True
args.ignore_missing = True
if not args.start_branch:
args.start_branch = ['auto']
if args.quiet and args.verbose:
parser.error('--quiet and --verbose cannot be specified together')
if (1 << bool(args.change_number) << bool(args.topic) << bool(args.query)) != 2:
parser.error('One (and only one) of change_number, topic, and query are allowed')
# Change current directory to the top of the tree
if 'ANDROID_BUILD_TOP' in os.environ:
top = os.environ['ANDROID_BUILD_TOP']
if not is_subdir(os.getcwd(), top):
sys.stderr.write('ERROR: You must run this tool from within $ANDROID_BUILD_TOP!\n')
sys.exit(1)
os.chdir(os.environ['ANDROID_BUILD_TOP'])
# Sanity check that we are being run from the top level of the tree
if not os.path.isdir('.repo'):
sys.stderr.write('ERROR: No .repo directory found. Please run this from the top of your tree.\n')
sys.exit(1)
# If --abandon-first is given, abandon the branch before starting
if args.abandon_first:
# Determine if the branch already exists; skip the abandon if it does not
plist = subprocess.check_output(['repo', 'info'])
if not hasattr(plist, 'encode'):
plist = plist.decode()
needs_abandon = False
for pline in plist.splitlines():
matchObj = re.match(r'Local Branches.*\[(.*)\]', pline)
if matchObj:
local_branches = re.split('\s*,\s*', matchObj.group(1))
if any(args.start_branch[0] in s for s in local_branches):
needs_abandon = True
if needs_abandon:
# Perform the abandon only if the branch already exists
if not args.quiet:
print('Abandoning branch: %s' % args.start_branch[0])
subprocess.check_output(['repo', 'abandon', args.start_branch[0]])
if not args.quiet:
print('')
# Get the master manifest from repo
# - convert project name and revision to a path
project_name_to_data = {}
manifest = subprocess.check_output(['repo', 'manifest'])
xml_root = ElementTree.fromstring(manifest)
projects = xml_root.findall('project')
remotes = xml_root.findall('remote')
default_revision = xml_root.findall('default')[0].get('revision')
#dump project data into the a list of dicts with the following data:
#{project: {path, revision}}
for project in projects:
name = project.get('name')
path = project.get('path')
revision = project.get('revision')
if revision is None:
for remote in remotes:
if remote.get('name') == project.get('remote'):
revision = remote.get('revision')
if revision is None:
revision = default_revision
if not name in project_name_to_data:
project_name_to_data[name] = {}
revision = revision.split('refs/heads/')[-1]
project_name_to_data[name][revision] = path
# get data on requested changes
reviews = []
change_numbers = []
def cmp_reviews(review_a, review_b):
current_a = review_a['current_revision']
parents_a = [r['commit'] for r in review_a['revisions'][current_a]['commit']['parents']]
current_b = review_b['current_revision']
parents_b = [r['commit'] for r in review_b['revisions'][current_b]['commit']['parents']]
if current_a in parents_b:
return -1
elif current_b in parents_a:
return 1
else:
return cmp(review_a['number'], review_b['number'])
if args.topic:
reviews = fetch_query(args.gerrit, 'topic:{0}'.format(args.topic))
change_numbers = [str(r['number']) for r in sorted(reviews, key=cmp_to_key(cmp_reviews))]
if args.query:
reviews = fetch_query(args.gerrit, args.query)
change_numbers = [str(r['number']) for r in sorted(reviews, key=cmp_to_key(cmp_reviews))]
if args.change_number:
change_url_re = re.compile('https?://.+?/([0-9]+(?:/[0-9]+)?)/?')
for c in args.change_number:
change_number = change_url_re.findall(c)
if change_number:
change_numbers.extend(change_number)
elif '-' in c:
templist = c.split('-')
for i in range(int(templist[0]), int(templist[1]) + 1):
change_numbers.append(str(i))
else:
change_numbers.append(c)
reviews = fetch_query(args.gerrit, ' OR '.join('change:{0}'.format(x.split('/')[0]) for x in change_numbers))
# make list of things to actually merge
mergables = []
# If --exclude is given, create the list of commits to ignore
exclude = []
if args.exclude:
exclude = args.exclude[0].split(',')
for change in change_numbers:
patchset = None
if '/' in change:
(change, patchset) = change.split('/')
if change in exclude:
continue
change = int(change)
if patchset:
patchset = int(patchset)
review = next((x for x in reviews if x['number'] == change), None)
if review is None:
print('Change %d not found, skipping' % change)
continue
mergables.append({
'subject': review['subject'],
'project': review['project'],
'branch': review['branch'],
'change_id': review['change_id'],
'change_number': review['number'],
'status': review['status'],
'fetch': None,
'patchset': review['revisions'][review['current_revision']]['_number'],
})
mergables[-1]['fetch'] = review['revisions'][review['current_revision']]['fetch']
mergables[-1]['id'] = change
if patchset:
try:
mergables[-1]['fetch'] = [review['revisions'][x]['fetch'] for x in review['revisions'] if review['revisions'][x]['_number'] == patchset][0]
mergables[-1]['id'] = '{0}/{1}'.format(change, patchset)
mergables[-1]['patchset'] = patchset
except (IndexError, ValueError):
args.quiet or print('ERROR: The patch set {0}/{1} could not be found, using CURRENT_REVISION instead.'.format(change, patchset))
for item in mergables:
args.quiet or print('Applying change number {0}...'.format(item['id']))
# Check if change is open and exit if it's not, unless -f is specified
if (item['status'] != 'OPEN' and item['status'] != 'NEW' and item['status'] != 'DRAFT') and not args.query:
if args.force:
print('!! Force-picking a closed change !!\n')
else:
print('Change status is ' + item['status'] + '. Skipping the cherry pick.\nUse -f to force this pick.')
continue
# Convert the project name to a project path
# - check that the project path exists
project_path = None
if item['project'] in project_name_to_data and item['branch'] in project_name_to_data[item['project']]:
project_path = project_name_to_data[item['project']][item['branch']]
elif args.path:
project_path = args.path
elif args.ignore_missing:
print('WARNING: Skipping {0} since there is no project directory for: {1}\n'.format(item['id'], item['project']))
continue
else:
sys.stderr.write('ERROR: For {0}, could not determine the project path for project {1}\n'.format(item['id'], item['project']))
sys.exit(1)
# If --start-branch is given, create the branch (more than once per path is okay; repo ignores gracefully)
if args.start_branch:
subprocess.check_output(['repo', 'start', args.start_branch[0], project_path])
# Determine the maximum commits to check already picked changes
check_picked_count = args.check_picked
branch_commits_count = int(subprocess.check_output(['git', 'rev-list', '--count', 'HEAD'], cwd=project_path))
if branch_commits_count <= check_picked_count:
check_picked_count = branch_commits_count - 1
# Check if change is already picked to HEAD...HEAD~check_picked_count
found_change = False
for i in range(0, check_picked_count):
if subprocess.call(['git', 'cat-file', '-e', 'HEAD~{0}'.format(i)], cwd=project_path, stderr=open(os.devnull, 'wb')):
continue
output = subprocess.check_output(['git', 'show', '-q', 'HEAD~{0}'.format(i)], cwd=project_path).split()
if 'Change-Id:' in output:
head_change_id = ''
for j,t in enumerate(reversed(output)):
if t == 'Change-Id:':
head_change_id = output[len(output) - j]
break
if head_change_id.strip() == item['change_id']:
print('Skipping {0} - already picked in {1} as HEAD~{2}'.format(item['id'], project_path, i))
found_change = True
break
if found_change:
continue
# Print out some useful info
if not args.quiet:
print('--> Subject: "{0}"'.format(item['subject'].encode('utf-8')))
print('--> Project path: {0}'.format(project_path))
print('--> Change number: {0} (Patch Set {1})'.format(item['id'], item['patchset']))
if 'anonymous http' in item['fetch']:
method = 'anonymous http'
else:
method = 'ssh'
# Try fetching from GitHub first if using default gerrit
if args.gerrit == default_gerrit:
if args.verbose:
print('Trying to fetch the change from GitHub')
if args.pull:
cmd = ['git pull --no-edit github', item['fetch'][method]['ref']]
else:
cmd = ['git fetch github', item['fetch'][method]['ref']]
if args.quiet:
cmd.append('--quiet')
else:
print(cmd)
result = subprocess.call([' '.join(cmd)], cwd=project_path, shell=True)
FETCH_HEAD = '{0}/.git/FETCH_HEAD'.format(project_path)
if result != 0 and os.stat(FETCH_HEAD).st_size != 0:
print('ERROR: git command failed')
sys.exit(result)
# Check if it worked
if args.gerrit != default_gerrit or os.stat(FETCH_HEAD).st_size == 0:
# If not using the default gerrit or github failed, fetch from gerrit.
if args.verbose:
if args.gerrit == default_gerrit:
print('Fetching from GitHub didn\'t work, trying to fetch the change from Gerrit')
else:
print('Fetching from {0}'.format(args.gerrit))
if args.pull:
cmd = ['git pull --no-edit', item['fetch'][method]['url'], item['fetch'][method]['ref']]
else:
cmd = ['git fetch', item['fetch'][method]['url'], item['fetch'][method]['ref']]
if args.quiet:
cmd.append('--quiet')
else:
print(cmd)
result = subprocess.call([' '.join(cmd)], cwd=project_path, shell=True)
if result != 0:
print('ERROR: git command failed')
sys.exit(result)
# Perform the cherry-pick
if not args.pull:
cmd = ['git cherry-pick --ff FETCH_HEAD']
if args.quiet:
cmd_out = open(os.devnull, 'wb')
else:
cmd_out = None
result = subprocess.call(cmd, cwd=project_path, shell=True, stdout=cmd_out, stderr=cmd_out)
if result != 0:
if args.reset:
print('ERROR: git command failed, aborting cherry-pick')
cmd = ['git cherry-pick --abort']
subprocess.call(cmd, cwd=project_path, shell=True, stdout=cmd_out, stderr=cmd_out)
else:
print('ERROR: git command failed')
sys.exit(result)
if not args.quiet:
print('')
|
[] |
[] |
[
"ANDROID_BUILD_TOP",
"HOME"
] |
[]
|
["ANDROID_BUILD_TOP", "HOME"]
|
python
| 2 | 0 | |
agw/gunicorn_cfg.py
|
#!/usr/bin/env python3
import os
bind = '0.0.0.0:8171'
workers = os.environ['GUNICORN_WORKERS'] if 'GUNICORN_WORKERS' in os.environ else 5
preload_app = True
# Server Hooks
def post_fork(server, worker):
pass
|
[] |
[] |
[
"GUNICORN_WORKERS"
] |
[]
|
["GUNICORN_WORKERS"]
|
python
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Elasticsearch documentation build configuration file, created by
# sphinx-quickstart on Mon May 6 15:38:41 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Elasticsearch DSL'
copyright = u'%d, Elasticsearch B.V' % datetime.datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import elasticsearch_dsl
# The short X.Y version.
version = elasticsearch_dsl.__versionstr__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Elasticsearchdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Elasticsearch-dsl.tex', u'Elasticsearch DSL Documentation',
u'Honza Král', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'elasticsearch-dsl', u'Elasticsearch DSL Documentation',
[u'Honza Král'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Elasticsearch', u'Elasticsearch Documentation',
u'Honza Král', 'Elasticsearch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
pkg/outputs/influxdb.go
|
package outputs
import (
"fmt"
"log"
"os"
"time"
"github.com/influxdata/influxdb/client/v2"
)
// InfluxDBServer This struct contains the information necessary to connect to a InfluxDB server
// such as host, port and database
type InfluxDBServer struct {
Host string
Port string
Database string
}
// GetInfluxdbConfig Generates a server config from environment variables
func GetInfluxdbConfig() InfluxDBServer {
server := InfluxDBServer{
Host: os.Getenv("INFLUXDB_HOST"),
Port: os.Getenv("INFLUXDB_PORT"),
Database: os.Getenv("INFLUXDB_DATABASE"),
}
return server
}
// WriteOutputInflux Creates a Batch of points given a map
func WriteOutputInflux(values map[string]int, fieldName string) {
s := GetInfluxdbConfig()
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: fmt.Sprintf("http://%s:%s", s.Host, s.Port),
})
if err != nil {
log.Fatalf("failed to create new HTTP client: %v", err)
}
defer c.Close()
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
Database: s.Database,
Precision: "s",
})
if err != nil {
log.Fatalf("failed to create new batch points: %v", err)
}
for k, v := range values {
tags := make(map[string]string)
fields := map[string]interface{}{
fieldName: v,
}
pt, err := client.NewPoint(k, tags, fields, time.Now())
if err != nil {
log.Fatal(err)
}
bp.AddPoint(pt)
}
if err := c.Write(bp); err != nil {
log.Fatal(err)
}
log.Println("Successfully wrote to InfluxDB")
}
|
[
"\"INFLUXDB_HOST\"",
"\"INFLUXDB_PORT\"",
"\"INFLUXDB_DATABASE\""
] |
[] |
[
"INFLUXDB_DATABASE",
"INFLUXDB_HOST",
"INFLUXDB_PORT"
] |
[]
|
["INFLUXDB_DATABASE", "INFLUXDB_HOST", "INFLUXDB_PORT"]
|
go
| 3 | 0 | |
habr-api/config/settings/base.py
|
import os
from unipath import Path
from config.settings.celery import *
BASE_DIR = Path(__file__).ancestor(3)
SECRET_KEY = os.environ['SECRET_KEY']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'core.apps.CoreConfig'
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR.child('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# static
STATICFILES_DIRS = [BASE_DIR.child("static")]
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR.child("collect_static")
# media
MEDIA_ROOT = BASE_DIR.child("media")
MEDIA_URL = '/media/'
|
[] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
python
| 1 | 0 | |
test/e2e/framework/util.go
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/discovery"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/runtime"
sshutil "k8s.io/kubernetes/pkg/ssh"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
utilyaml "k8s.io/kubernetes/pkg/util/yaml"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/watch"
"github.com/blang/semver"
"golang.org/x/crypto/ssh"
"golang.org/x/net/websocket"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
)
const (
// How long to wait for the pod to be listable
PodListTimeout = time.Minute
// Initial pod start can be delayed O(minutes) by slow docker pulls
// TODO: Make this 30 seconds once #4566 is resolved.
PodStartTimeout = 5 * time.Minute
// How long to wait for the pod to no longer be running
podNoLongerRunningTimeout = 30 * time.Second
// If there are any orphaned namespaces to clean up, this test is running
// on a long lived cluster. A long wait here is preferably to spurious test
// failures caused by leaked resources from a previous test run.
NamespaceCleanupTimeout = 15 * time.Minute
// Some pods can take much longer to get ready due to volume attach/detach latency.
slowPodStartTimeout = 15 * time.Minute
// How long to wait for a service endpoint to be resolvable.
ServiceStartTimeout = 1 * time.Minute
// String used to mark pod deletion
nonExist = "NonExist"
// How often to Poll pods, nodes and claims.
Poll = 2 * time.Second
// service accounts are provisioned after namespace creation
// a service account is required to support pod creation in a namespace as part of admission control
ServiceAccountProvisionTimeout = 2 * time.Minute
// How long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
SingleCallTimeout = 5 * time.Minute
// How long nodes have to be "ready" when a test begins. They should already
// be "ready" before the test starts, so this is small.
NodeReadyInitialTimeout = 20 * time.Second
// How long pods have to be "ready" when a test begins.
PodReadyBeforeTimeout = 2 * time.Minute
// How long pods have to become scheduled onto nodes
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
podRespondingTimeout = 2 * time.Minute
ServiceRespondingTimeout = 2 * time.Minute
EndpointRegisterTimeout = time.Minute
// How long claims have to become dynamically provisioned
ClaimProvisionTimeout = 5 * time.Minute
)
// SubResource proxy should have been functional in v1.0.0, but SubResource
// proxy via tunneling is known to be broken in v1.0. See
// https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively
// in v1.3).
var SubResourcePodProxyVersion = version.MustParse("v1.1.0")
var subResourceServiceAndNodeProxyVersion = version.MustParse("v1.2.0")
func GetServicesProxyRequest(c *client.Client, request *restclient.Request) (*restclient.Request, error) {
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c)
if err != nil {
return nil, err
}
if subResourceProxyAvailable {
return request.Resource("services").SubResource("proxy"), nil
}
return request.Prefix("proxy").Resource("services"), nil
}
// unique identifier of the e2e run
var RunId = util.NewUUID()
type CreateTestingNSFn func(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error)
type ContainerFailures struct {
status *api.ContainerStateTerminated
Restarts int
}
func GetMasterHost() string {
masterUrl, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterUrl.Host
}
// Convenient wrapper around cache.Store that returns list of api.Pod instead of interface{}.
type PodStore struct {
cache.Store
stopCh chan struct{}
}
func NewPodStore(c *client.Client, namespace string, label labels.Selector, field fields.Selector) *PodStore {
lw := &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = label
options.FieldSelector = field
return c.Pods(namespace).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = label
options.FieldSelector = field
return c.Pods(namespace).Watch(options)
},
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
stopCh := make(chan struct{})
cache.NewReflector(lw, &api.Pod{}, store, 0).RunUntil(stopCh)
return &PodStore{store, stopCh}
}
func (s *PodStore) List() []*api.Pod {
objects := s.Store.List()
pods := make([]*api.Pod, 0)
for _, o := range objects {
pods = append(pods, o.(*api.Pod))
}
return pods
}
func (s *PodStore) Stop() {
close(s.stopCh)
}
type RCConfig struct {
Client *client.Client
Image string
Command []string
Name string
Namespace string
PollInterval time.Duration
Timeout time.Duration
PodStatusFile *os.File
Replicas int
CpuRequest int64 // millicores
CpuLimit int64 // millicores
MemRequest int64 // bytes
MemLimit int64 // bytes
ReadinessProbe *api.Probe
// Env vars, set the same for every pod.
Env map[string]string
// Extra labels added to every pod.
Labels map[string]string
// Ports to declare in the container (map of name to containerPort).
Ports map[string]int
// Ports to declare in the container as host and container ports.
HostPorts map[string]int
Volumes []api.Volume
VolumeMounts []api.VolumeMount
// Pointer to a list of pods; if non-nil, will be set to a list of pods
// created by this RC by RunRC.
CreatedPods *[]*api.Pod
// Maximum allowable container failures. If exceeded, RunRC returns an error.
// Defaults to replicas*0.1 if unspecified.
MaxContainerFailures *int
// If set to false starting RC will print progress, otherwise only errors will be printed.
Silent bool
}
type DeploymentConfig struct {
RCConfig
}
type ReplicaSetConfig struct {
RCConfig
}
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Fail(nowStamp()+": "+msg, 1)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Skip(nowStamp() + ": " + msg)
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if TestContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessAtLeast(value int, minValue int, message string) {
if value < minValue {
Skipf(message)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if ProviderIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !ProviderIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
}
}
func ProviderIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) {
return true
}
}
return false
}
func SkipUnlessServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) {
gte, err := ServerVersionGTE(v, c)
if err != nil {
Failf("Failed to get server version: %v", err)
}
if !gte {
Skipf("Not supported for server versions before %q", v)
}
}
// ProvidersWithSSH are those providers where each node is accessible with SSH
var ProvidersWithSSH = []string{"gce", "gke", "aws"}
// providersWithMasterSSH are those providers where master node is accessible with SSH
var providersWithMasterSSH = []string{"gce", "gke", "kubemark", "aws"}
type podCondition func(pod *api.Pod) (bool, error)
// podReady returns whether pod has a condition of Ready with a status of true.
// TODO: should be replaced with api.IsPodReady
func podReady(pod *api.Pod) bool {
for _, cond := range pod.Status.Conditions {
if cond.Type == api.PodReady && cond.Status == api.ConditionTrue {
return true
}
}
return false
}
// logPodStates logs basic info of provided pods for debugging.
func logPodStates(pods []api.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
for i := range pods {
pod := &pods[i]
if len(pod.ObjectMeta.Name) > maxPodW {
maxPodW = len(pod.ObjectMeta.Name)
}
if len(pod.Spec.NodeName) > maxNodeW {
maxNodeW = len(pod.Spec.NodeName)
}
if len(pod.Status.Phase) > maxPhaseW {
maxPhaseW = len(pod.Status.Phase)
}
}
// Increase widths by one to separate by a single space.
maxPodW++
maxNodeW++
maxPhaseW++
maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned.
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods {
grace := ""
if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
}
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
}
Logf("") // Final empty line helps for readability.
}
// PodRunningReady checks whether pod p's phase is running and it has a ready
// condition of status true.
func PodRunningReady(p *api.Pod) (bool, error) {
// Check the phase is running.
if p.Status.Phase != api.PodRunning {
return false, fmt.Errorf("want pod '%s' on '%s' to be '%v' but was '%v'",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodRunning, p.Status.Phase)
}
// Check the ready condition is true.
if !podReady(p) {
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodReady, api.ConditionTrue, p.Status.Conditions)
}
return true, nil
}
// PodNotReady checks whether pod p's has a ready condition of status false.
func PodNotReady(p *api.Pod) (bool, error) {
// Check the ready condition is false.
if podReady(p) {
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
p.ObjectMeta.Name, p.Spec.NodeName, api.PodReady, api.ConditionFalse, p.Status.Conditions)
}
return true, nil
}
// check if a Pod is controlled by a Replication Controller in the List
func hasReplicationControllersForPod(rcs *api.ReplicationControllerList, pod api.Pod) bool {
for _, rc := range rcs.Items {
selector := labels.SelectorFromSet(rc.Spec.Selector)
if selector.Matches(labels.Set(pod.ObjectMeta.Labels)) {
return true
}
}
return false
}
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
// namespace ns are either running and ready, or failed but controlled by a
// replication controller. Also, it ensures that at least minPods are running
// and ready. It has separate behavior from other 'wait for' pods functions in
// that it requires the list of pods on every iteration. This is useful, for
// example, in cluster startup, because the number of pods increases while
// waiting.
func WaitForPodsRunningReady(ns string, minPods int32, timeout time.Duration) error {
c, err := LoadClient()
if err != nil {
return err
}
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
if wait.PollImmediate(Poll, timeout, func() (bool, error) {
// We get the new list of pods and replication controllers in every
// iteration because more pods come online during startup and we want to
// ensure they are also checked.
rcList, err := c.ReplicationControllers(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
return false, nil
}
replicas := int32(0)
for _, rc := range rcList.Items {
replicas += rc.Spec.Replicas
}
podList, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return false, nil
}
nOk, replicaOk, badPods := int32(0), int32(0), []api.Pod{}
for _, pod := range podList.Items {
if res, err := PodRunningReady(&pod); res && err == nil {
nOk++
if hasReplicationControllersForPod(rcList, pod) {
replicaOk++
}
} else {
if pod.Status.Phase != api.PodFailed {
Logf("The status of Pod %s is %s, waiting for it to be either Running or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
badPods = append(badPods, pod)
} else if !hasReplicationControllersForPod(rcList, pod) {
Logf("Pod %s is Failed, but it's not controlled by a ReplicationController", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
//ignore failed pods that are controlled by a replication controller
}
}
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
}
logPodStates(badPods)
return false, nil
}) != nil {
return fmt.Errorf("Not all pods in namespace '%s' running and ready within %v", ns, timeout)
}
return nil
}
func podFromManifest(filename string) (*api.Pod, error) {
var pod api.Pod
Logf("Parsing pod from %v", filename)
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
json, err := utilyaml.ToJSON(data)
if err != nil {
return nil, err
}
if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &pod); err != nil {
return nil, err
}
return &pod, nil
}
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func RunKubernetesServiceTestContainer(repoRoot string, ns string) {
c, err := LoadClient()
if err != nil {
Logf("Failed to load client")
return
}
path := filepath.Join(repoRoot, "test", "images", "clusterapi-tester", "pod.yaml")
p, err := podFromManifest(path)
if err != nil {
Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.Pods(ns).Create(p); err != nil {
Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.Pods(ns).Delete(p.Name, nil); err != nil {
Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := waitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, PodRunningReady); err != nil {
Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
Logf("Output of clusterapi-tester:\n%v", logs)
}
}
func LogFailedContainers(ns string) {
c, err := LoadClient()
if err != nil {
Logf("Failed to load client")
return
}
podList, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return
}
Logf("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := PodRunningReady(&pod); !res || err != nil {
for _, container := range pod.Spec.Containers {
logs, err := GetPodLogs(c, ns, pod.Name, container.Name)
if err != nil {
logs, err = getPreviousPodLogs(c, ns, pod.Name, container.Name)
if err != nil {
Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
}
By(fmt.Sprintf("Logs of %v/%v:%v on node %v", ns, pod.Name, container.Name, pod.Spec.NodeName))
Logf(logs)
}
}
}
}
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error.
func DeleteNamespaces(c *client.Client, deleteFilter, skipFilter []string) ([]string, error) {
By("Deleting namespaces")
nsList, err := c.Namespaces().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var deleted []string
var wg sync.WaitGroup
OUTER:
for _, item := range nsList.Items {
if skipFilter != nil {
for _, pattern := range skipFilter {
if strings.Contains(item.Name, pattern) {
continue OUTER
}
}
}
if deleteFilter != nil {
var shouldDelete bool
for _, pattern := range deleteFilter {
if strings.Contains(item.Name, pattern) {
shouldDelete = true
break
}
}
if !shouldDelete {
continue OUTER
}
}
wg.Add(1)
deleted = append(deleted, item.Name)
go func(nsName string) {
defer wg.Done()
defer GinkgoRecover()
Expect(c.Namespaces().Delete(nsName)).To(Succeed())
Logf("namespace : %v api call to delete is complete ", nsName)
}(item.Name)
}
wg.Wait()
return deleted, nil
}
func WaitForNamespacesDeleted(c *client.Client, namespaces []string, timeout time.Duration) error {
By("Waiting for namespaces to vanish")
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if _, ok := nsMap[item.Name]; ok {
return false, nil
}
}
return true, nil
})
}
func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName string, timeout time.Duration) error {
w, err := c.ServiceAccounts(ns).Watch(api.SingleObject(api.ObjectMeta{Name: serviceAccountName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.ServiceAccountHasSecrets)
return err
}
func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pod, err := c.Pods(ns).Get(podName)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Pod %q in namespace %q disappeared. Error: %v", podName, ns, err)
return err
}
// Aligning this text makes it much more readable
Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v",
podName, ns, Poll, err)
continue
}
done, err := condition(pod)
if done {
return err
}
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
podName, ns, desc, pod.Status.Phase, podReady(pod), time.Since(start))
}
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
}
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
// waits and checks if all match pods are in the given podCondition
func WaitForMatchPodsCondition(c *client.Client, opts api.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pods, err := c.Pods(api.NamespaceAll).List(opts)
if err != nil {
return err
}
conditionNotMatch := []string{}
for _, pod := range pods.Items {
done, err := condition(&pod)
if done && err != nil {
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
}
}
if len(conditionNotMatch) <= 0 {
return err
}
Logf("%d pods are not %s", len(conditionNotMatch), desc)
}
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
}
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
// the default service account is what is associated with pods when they do not specify a service account
// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned
func WaitForDefaultServiceAccountInNamespace(c *client.Client, namespace string) error {
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase api.PersistentVolumePhase, c *client.Client, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.PersistentVolumes().Get(pvName)
if err != nil {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
} else {
if pv.Status.Phase == phase {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c *client.Client, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.PersistentVolumes().Get(pvName)
if err == nil {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
} else {
if apierrs.IsNotFound(err) {
Logf("PersistentVolume %s was removed", pvName)
return nil
} else {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
}
}
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c *client.Client, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pvc, err := c.PersistentVolumeClaims(ns).Get(pvcName)
if err != nil {
Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, Poll, err)
continue
} else {
if pvc.Status.Phase == phase {
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
labels["e2e-run"] = string(RunId)
namespaceObj := &api.Namespace{
ObjectMeta: api.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
Namespace: "",
Labels: labels,
},
Status: api.NamespaceStatus{},
}
// Be robust about making the namespace creation call.
var got *api.Namespace
if err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
var err error
got, err = c.Namespaces().Create(namespaceObj)
if err != nil {
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
if TestContext.VerifyServiceAccount {
if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil {
return nil, err
}
}
return got, nil
}
// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state
// and waits until they are finally deleted. It ignores namespace skip.
func CheckTestingNSDeletedExcept(c *client.Client, skip string) error {
// TODO: Since we don't have support for bulk resource deletion in the API,
// while deleting a namespace we are deleting all objects from that namespace
// one by one (one deletion == one API call). This basically exposes us to
// throttling - currently controller-manager has a limit of max 20 QPS.
// Once #10217 is implemented and used in namespace-controller, deleting all
// object from a given namespace should be much faster and we will be able
// to lower this timeout.
// However, now Density test is producing ~26000 events and Load capacity test
// is producing ~35000 events, thus assuming there are no other requests it will
// take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60
// minutes to avoid any timeouts here.
timeout := 60 * time.Minute
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
namespaces, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
}
terminating := 0
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip {
if ns.Status.Phase == api.NamespaceActive {
return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name)
}
terminating++
}
}
if terminating == 0 {
return nil
}
}
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
}
// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks
// whether there are any pods remaining in a non-terminating state.
func deleteNS(c *client.Client, namespace string, timeout time.Duration) error {
if err := c.Namespaces().Delete(namespace); err != nil {
return err
}
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
if _, err := c.Namespaces().Get(namespace); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
// check for pods that were not deleted
remaining := []string{}
missingTimestamp := false
if pods, perr := c.Pods(namespace).List(api.ListOptions{}); perr == nil {
for _, pod := range pods.Items {
Logf("Pod %s %s on node %s remains, has deletion timestamp %s", namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
remaining = append(remaining, pod.Name)
if pod.DeletionTimestamp == nil {
missingTimestamp = true
}
}
}
// a timeout occurred
if err != nil {
if missingTimestamp {
return fmt.Errorf("namespace %s was not deleted within limit: %v, some pods were not marked with a deletion timestamp, pods remaining: %v", namespace, err, remaining)
}
return fmt.Errorf("namespace %s was not deleted within limit: %v, pods remaining: %v", namespace, err, remaining)
}
// pods were not deleted but the namespace was deleted
if len(remaining) > 0 {
return fmt.Errorf("pods remained within namespace %s after deletion: %v", namespace, remaining)
}
return nil
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, PodStartTimeout)
}
// Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func waitForPodRunningInNamespaceSlow(c *client.Client, podName string, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout)
}
func waitTimeoutForPodRunningInNamespace(c *client.Client, podName string, namespace string, timeout time.Duration) error {
w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.PodRunning)
return err
}
// Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running.
// Returns an error if timeout occurs first.
func WaitForPodNoLongerRunningInNamespace(c *client.Client, podName string, namespace string) error {
return waitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, podNoLongerRunningTimeout)
}
func waitTimeoutForPodNoLongerRunningInNamespace(c *client.Client, podName string, namespace string, timeout time.Duration) error {
w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.PodCompleted)
return err
}
func waitTimeoutForPodReadyInNamespace(c *client.Client, podName string, namespace string, timeout time.Duration) error {
w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, client.PodRunningAndReady)
return err
}
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
func WaitForPodNotPending(c *client.Client, ns, podName string) error {
w, err := c.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: podName}))
if err != nil {
return err
}
_, err = watch.Until(PodStartTimeout, w, client.PodNotPending)
return err
}
// waitForPodTerminatedInNamespace returns an error if it took too long for the pod
// to terminate or if the pod terminated with an unexpected reason.
func waitForPodTerminatedInNamespace(c *client.Client, podName, reason, namespace string) error {
return waitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *api.Pod) (bool, error) {
if pod.Status.Phase == api.PodFailed {
if pod.Status.Reason == reason {
return true, nil
} else {
return true, fmt.Errorf("Expected pod %v in namespace %v to be terminated with reason %v, got reason: %v", podName, namespace, reason, pod.Status.Reason)
}
}
return false, nil
})
}
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c *client.Client, podName string, contName string, namespace string, timeout time.Duration) error {
return waitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *api.Pod) (bool, error) {
// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
ci, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, contName)
if !ok {
Logf("No Status.Info for container '%s' in pod '%s' yet", contName, podName)
} else {
if ci.State.Terminated != nil {
if ci.State.Terminated.ExitCode == 0 {
By("Saw pod success")
return true, nil
}
return true, fmt.Errorf("pod '%s' terminated with failure: %+v", podName, ci.State.Terminated)
}
Logf("Nil State.Terminated for container '%s' in pod '%s' in namespace '%s' so far", contName, podName, namespace)
}
return false, nil
})
}
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func WaitForPodSuccessInNamespace(c *client.Client, podName string, contName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, PodStartTimeout)
}
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func WaitForPodSuccessInNamespaceSlow(c *client.Client, podName string, contName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, slowPodStartTimeout)
}
// waitForRCPodOnNode returns the pod from the given replication controller (described by rcName) which is scheduled on the given node.
// In case of failure or too long waiting time, an error is returned.
func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
var p *api.Pod = nil
err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
Logf("Waiting for pod %s to appear on node %s", rcName, node)
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == node {
Logf("Pod %s found on node %s", pod.Name, node)
p = &pod
return true, nil
}
}
return false, nil
})
return p, err
}
func WaitForPodToDisappear(c *client.Client, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, err
}
found := false
for _, pod := range pods.Items {
if pod.Name == podName {
Logf("Pod %s still exists", podName)
found = true
}
}
if !found {
Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
}
// WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists.
// In case of failure or too long waiting time, an error is returned.
func WaitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
// NodeController evicts pod after 5 minutes, so we need timeout greater than that.
// Additionally, there can be non-zero grace period, so we are setting 10 minutes
// to be on the safe size.
return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
}
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Services(namespace).Get(name)
switch {
case err == nil:
if !exist {
return false, nil
}
Logf("Service %s in namespace %s found.", name, namespace)
return true, nil
case apierrs.IsNotFound(err):
if exist {
return false, nil
}
Logf("Service %s in namespace %s disappeared.", name, namespace)
return true, nil
default:
Logf("Get service %s in namespace %s failed: %v", name, namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
func WaitForServiceEndpointsNum(c *client.Client, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
Logf("Waiting for amount of service:%s endpoints to %d", serviceName, expectNum)
list, err := c.Endpoints(namespace).List(api.ListOptions{})
if err != nil {
return false, err
}
for _, e := range list.Items {
if e.Name == serviceName && countEndpointsNum(&e) == expectNum {
return true, nil
}
}
return false, nil
})
}
func countEndpointsNum(e *api.Endpoints) int {
num := 0
for _, sub := range e.Subsets {
num += len(sub.Addresses)
}
return num
}
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
func WaitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.ReplicationControllers(namespace).Get(name)
if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
} else {
Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
func WaitForEndpoint(c *client.Client, ns, name string) error {
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
endpoint, err := c.Endpoints(ns).Get(name)
Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
} else {
return nil
}
}
return fmt.Errorf("Failed to get entpoints for %s/%s", ns, name)
}
// Context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with ther own pod name.
type podProxyResponseChecker struct {
c *client.Client
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *api.PodList
}
func PodProxyResponseChecker(c *client.Client, ns string, label labels.Selector, controllerName string, respondName bool, pods *api.PodList) podProxyResponseChecker {
return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := api.ListOptions{LabelSelector: r.label}
currentPods, err := r.c.Pods(r.ns).List(options)
Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c)
if err != nil {
return false, err
}
var body []byte
if subResourceProxyAvailable {
body, err = r.c.Get().
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()
} else {
body, err = r.c.Get().
Prefix("proxy").
Namespace(r.ns).
Resource("pods").
Name(string(pod.Name)).
Do().
Raw()
}
if err != nil {
Logf("Controller %s: Failed to GET from replica %d [%s]: %v:", r.controllerName, i+1, pod.Name, err)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// ServerVersionGTE returns true if v is greater than or equal to the server
// version.
//
// TODO(18726): This should be incorporated into client.VersionInterface.
func ServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
}
sv, err := version.Parse(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
}
return sv.GTE(v), nil
}
func PodsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
func PodsCreated(c *client.Client, ns, name string, replicas int32) (*api.PodList, error) {
timeout := 2 * time.Minute
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options)
if err != nil {
return nil, err
}
created := []api.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if int32(len(created)) == replicas {
pods.Items = created
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func podsRunning(c *client.Client, pods *api.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
By("ensuring each pod is running")
e := []error{}
for _, pod := range pods.Items {
// TODO: make waiting parallel.
err := WaitForPodRunningInNamespace(c, pod.Name, pod.Namespace)
if err != nil {
e = append(e, err)
}
}
return e
}
func VerifyPods(c *client.Client, ns, name string, wantName bool, replicas int32) error {
pods, err := PodsCreated(c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
err = PodsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
return nil
}
func ServiceResponding(c *client.Client, ns, name string) error {
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) {
proxyRequest, errProxy := GetServicesProxyRequest(c, c.Get())
if errProxy != nil {
Logf("Failed to get services proxy request: %v:", errProxy)
return false, nil
}
body, err := proxyRequest.Namespace(ns).
Name(name).
Do().
Raw()
if err != nil {
Logf("Failed to GET from service %s: %v:", name, err)
return false, nil
}
got := string(body)
if len(got) == 0 {
Logf("Service %s: expected non-empty response", name)
return false, err // stop polling
}
Logf("Service %s: found nonempty answer: %s", name, got)
return true, nil
})
}
func LoadConfig() (*restclient.Config, error) {
switch {
case TestContext.KubeConfig != "":
Logf(">>> TestContext.KubeConfig: %s\n", TestContext.KubeConfig)
c, err := clientcmd.LoadFromFile(TestContext.KubeConfig)
if err != nil {
return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error())
}
if TestContext.KubeContext != "" {
Logf(">>> TestContext.KubeContext: %s\n", TestContext.KubeContext)
c.CurrentContext = TestContext.KubeContext
}
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
default:
return nil, fmt.Errorf("KubeConfig must be specified to load client config")
}
}
func loadClientFromConfig(config *restclient.Config) (*client.Client, error) {
c, err := client.New(config)
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
if c.Client.Timeout == 0 {
c.Client.Timeout = SingleCallTimeout
}
return c, nil
}
func LoadClient() (*client.Client, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return loadClientFromConfig(config)
}
// randomSuffix provides a random string to append to pods,services,rcs.
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
// need to use such a function and can instead
// use the UUID utility function.
func randomSuffix() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int() % 10000)
}
func ExpectNoError(err error, explain ...interface{}) {
if err != nil {
Logf("Unexpected error occurred: %v", err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func Cleanup(filePath string, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
for _, selector := range selectors {
resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}
// validatorFn is the function which is individual tests will implement.
// we may want it to return more than just an error, at some point.
type validatorFn func(c *client.Client, podID string) error
// ValidateController is a generic mechanism for testing RC's that are running.
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
// "containername": this is grepped for.
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
func ValidateController(c *client.Client, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
// NB: kubectl adds the "exists" function to the standard template functions.
// This lets us check to see if the "running" entry exists for each of the containers
// we care about. Exists will never return an error and it's safe to check a chain of
// things, any one of which may not exist. In the below template, all of info,
// containername, and running might be nil, so the normal index function isn't very
// helpful.
// This template is unit-tested in kubectl, so if you change it, update the unit test.
// You can read about the syntax here: http://golang.org/pkg/text/template/.
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop:
for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "--api-version=v1", "-l", testname, fmt.Sprintf("--namespace=%v", ns))
pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas {
By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
continue
}
var runningPods []string
for _, podID := range pods {
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, "--api-version=v1", fmt.Sprintf("--namespace=%v", ns))
if running != "true" {
Logf("%s is created but not running", podID)
continue waitLoop
}
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, "--api-version=v1", fmt.Sprintf("--namespace=%v", ns))
if currentImage != containerImage {
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}
// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}
Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.
if len(runningPods) == replicas {
return
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
}
// KubectlCmd runs the kubectl executable through the wrapper script.
func KubectlCmd(args ...string) *exec.Cmd {
defaultArgs := []string{}
// Reference a --server option so tests can run anywhere.
if TestContext.Host != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host)
}
if TestContext.KubeConfig != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
// Reference the KubeContext
if TestContext.KubeContext != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext)
}
} else {
if TestContext.CertDir != "" {
defaultArgs = append(defaultArgs,
fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")),
fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")),
fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key")))
}
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
//and so on.
cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...)
//caller will invoke this and wait on it.
return cmd
}
// kubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type kubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
func NewKubectlCommand(args ...string) *kubectlBuilder {
b := new(kubectlBuilder)
b.cmd = KubectlCmd(args...)
return b
}
func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder {
b.timeout = t
return b
}
func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder {
b.cmd.Stdin = reader
return &b
}
func (b kubectlBuilder) ExecOrDie() string {
str, err := b.Exec()
Logf("stdout: %q", str)
Expect(err).NotTo(HaveOccurred())
return str
}
func (b kubectlBuilder) Exec() (string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", fmt.Errorf("Error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
return "", fmt.Errorf("Error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", fmt.Errorf("Timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
}
Logf("stderr: %q", stderr.String())
// TODO: trimspace should be unnecessary after switching to use kubectl binary directly
return strings.TrimSpace(stdout.String()), nil
}
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
func RunKubectlOrDie(args ...string) string {
return NewKubectlCommand(args...).ExecOrDie()
}
// RunKubectl is a convenience wrapper over kubectlBuilder
func RunKubectl(args ...string) (string, error) {
return NewKubectlCommand(args...).Exec()
}
// runKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func runKubectlOrDieInput(data string, args ...string) string {
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
}
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
if err != nil {
return
}
stderr, err = cmd.StderrPipe()
if err != nil {
return
}
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
err = cmd.Start()
return
}
// Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer.
func TryKill(cmd *exec.Cmd) {
if err := cmd.Process.Kill(); err != nil {
Logf("ERROR failed to kill command %v! The process may leak", cmd)
}
}
// TestContainerOutput runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a substring matcher.
func TestContainerOutput(scenarioName string, c *client.Client, pod *api.Pod, containerIndex int, expectedOutput []string, ns string) {
testContainerOutputMatcher(scenarioName, c, pod, containerIndex, expectedOutput, ns, ContainSubstring)
}
// testContainerOutputRegexp runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a regexp matcher.
func testContainerOutputRegexp(scenarioName string, c *client.Client, pod *api.Pod, containerIndex int, expectedOutput []string, ns string) {
testContainerOutputMatcher(scenarioName, c, pod, containerIndex, expectedOutput, ns, MatchRegexp)
}
// testContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func testContainerOutputMatcher(scenarioName string,
c *client.Client,
pod *api.Pod,
containerIndex int,
expectedOutput []string, ns string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
defer c.Pods(ns).Delete(pod.Name, api.NewDeleteOptions(0))
if _, err := c.Pods(ns).Create(pod); err != nil {
Failf("Failed to create pod: %v", err)
}
// Wait for client pod to complete.
var containerName string
for id, container := range pod.Spec.Containers {
ExpectNoError(WaitForPodSuccessInNamespace(c, pod.Name, container.Name, ns))
if id == containerIndex {
containerName = container.Name
}
}
if containerName == "" {
Failf("Invalid container index: %d", containerIndex)
}
// Grab its logs. Get host first.
podStatus, err := c.Pods(ns).Get(pod.Name)
if err != nil {
Failf("Failed to get pod status: %v", err)
}
By(fmt.Sprintf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
var logs string
start := time.Now()
// Sometimes the actual containers take a second to get started, try to get logs for 60s
for time.Now().Sub(start) < (60 * time.Second) {
err = nil
logs, err = GetPodLogs(c, ns, pod.Name, containerName)
if err != nil {
By(fmt.Sprintf("Warning: Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
time.Sleep(5 * time.Second)
continue
}
By(fmt.Sprintf("Successfully fetched pod logs:%v\n", logs))
break
}
for _, m := range expectedOutput {
Expect(logs).To(matcher(m), "%q in container output", m)
}
}
// podInfo contains pod information useful for debugging e2e tests.
type podInfo struct {
oldHostname string
oldPhase string
hostname string
phase string
}
// PodDiff is a map of pod name to podInfos
type PodDiff map[string]*podInfo
// Print formats and prints the give PodDiff.
func (p PodDiff) Print(ignorePhases sets.String) {
for name, info := range p {
if ignorePhases.Has(info.phase) {
continue
}
if info.phase == nonExist {
Logf("Pod %v was deleted, had phase %v and host %v", name, info.oldPhase, info.oldHostname)
continue
}
phaseChange, hostChange := false, false
msg := fmt.Sprintf("Pod %v ", name)
if info.oldPhase != info.phase {
phaseChange = true
if info.oldPhase == nonExist {
msg += fmt.Sprintf("in phase %v ", info.phase)
} else {
msg += fmt.Sprintf("went from phase: %v -> %v ", info.oldPhase, info.phase)
}
}
if info.oldHostname != info.hostname {
hostChange = true
if info.oldHostname == nonExist || info.oldHostname == "" {
msg += fmt.Sprintf("assigned host %v ", info.hostname)
} else {
msg += fmt.Sprintf("went from host: %v -> %v ", info.oldHostname, info.hostname)
}
}
if phaseChange || hostChange {
Logf(msg)
}
}
}
// Diff computes a PodDiff given 2 lists of pods.
func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
podInfoMap := PodDiff{}
// New pods will show up in the curPods list but not in oldPods. They have oldhostname/phase == nonexist.
for _, pod := range curPods {
podInfoMap[pod.Name] = &podInfo{hostname: pod.Spec.NodeName, phase: string(pod.Status.Phase), oldHostname: nonExist, oldPhase: nonExist}
}
// Deleted pods will show up in the oldPods list but not in curPods. They have a hostname/phase == nonexist.
for _, pod := range oldPods {
if info, ok := podInfoMap[pod.Name]; ok {
info.oldHostname, info.oldPhase = pod.Spec.NodeName, string(pod.Status.Phase)
} else {
podInfoMap[pod.Name] = &podInfo{hostname: nonExist, phase: nonExist, oldHostname: pod.Spec.NodeName, oldPhase: string(pod.Status.Phase)}
}
}
return podInfoMap
}
// RunDeployment Launches (and verifies correctness) of a Deployment
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunDeployment(config DeploymentConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *DeploymentConfig) create() error {
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
deployment := &extensions.Deployment{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.DeploymentSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&deployment.Spec.Template)
_, err := config.Client.Deployments(config.Namespace).Create(deployment)
if err != nil {
return fmt.Errorf("Error creating deployment: %v", err)
}
Logf("Created deployment with name: %v, namespace: %v, replica count: %v", deployment.Name, config.Namespace, deployment.Spec.Replicas)
return nil
}
// RunReplicaSet launches (and verifies correctness) of a ReplicaSet
// and waits until all the pods it launches to reach the "Running" state.
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunReplicaSet(config ReplicaSetConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *ReplicaSetConfig) create() error {
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
rs := &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.ReplicaSetSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&rs.Spec.Template)
_, err := config.Client.ReplicaSets(config.Namespace).Create(rs)
if err != nil {
return fmt.Errorf("Error creating replica set: %v", err)
}
Logf("Created replica set with name: %v, namespace: %v, replica count: %v", rs.Name, config.Namespace, rs.Spec.Replicas)
return nil
}
// RunRC Launches (and verifies correctness) of a Replication Controller
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunRC(config RCConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *RCConfig) create() error {
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: api.ReplicationControllerSpec{
Replicas: int32(config.Replicas),
Selector: map[string]string{
"name": config.Name,
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: config.ReadinessProbe,
},
},
DNSPolicy: api.DNSDefault,
},
},
},
}
config.applyTo(rc.Spec.Template)
_, err := config.Client.ReplicationControllers(config.Namespace).Create(rc)
if err != nil {
return fmt.Errorf("Error creating replication controller: %v", err)
}
Logf("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
return nil
}
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
if config.Env != nil {
for k, v := range config.Env {
c := &template.Spec.Containers[0]
c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
}
}
if config.Labels != nil {
for k, v := range config.Labels {
template.ObjectMeta.Labels[k] = v
}
}
if config.Ports != nil {
for k, v := range config.Ports {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v)})
}
}
if config.HostPorts != nil {
for k, v := range config.HostPorts {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
}
}
if config.CpuLimit > 0 || config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
}
if config.CpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
}
if config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
}
if config.CpuRequest > 0 || config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
}
if config.CpuRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
}
if config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
}
if len(config.Volumes) > 0 {
template.Spec.Volumes = config.Volumes
}
if len(config.VolumeMounts) > 0 {
template.Spec.Containers[0].VolumeMounts = config.VolumeMounts
}
}
type RCStartupStatus struct {
Expected int
Terminating int
Running int
RunningButNotReady int
Waiting int
Pending int
Unknown int
Inactive int
FailedContainers int
Created []*api.Pod
ContainerRestartNodes sets.String
}
func (s *RCStartupStatus) Print(name string) {
Logf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
name, len(s.Created), s.Expected, s.Running, s.Pending, s.Waiting, s.Inactive, s.Terminating, s.Unknown, s.RunningButNotReady)
}
func ComputeRCStartupStatus(pods []*api.Pod, expected int) RCStartupStatus {
startupStatus := RCStartupStatus{
Expected: expected,
Created: make([]*api.Pod, 0, expected),
ContainerRestartNodes: sets.NewString(),
}
for _, p := range pods {
if p.DeletionTimestamp != nil {
startupStatus.Terminating++
continue
}
startupStatus.Created = append(startupStatus.Created, p)
if p.Status.Phase == api.PodRunning {
ready := false
for _, c := range p.Status.Conditions {
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
ready = true
break
}
}
if ready {
// Only count a pod is running when it is also ready.
startupStatus.Running++
} else {
startupStatus.RunningButNotReady++
}
for _, v := range FailedContainers(p) {
startupStatus.FailedContainers = startupStatus.FailedContainers + v.Restarts
startupStatus.ContainerRestartNodes.Insert(p.Spec.NodeName)
}
} else if p.Status.Phase == api.PodPending {
if p.Spec.NodeName == "" {
startupStatus.Waiting++
} else {
startupStatus.Pending++
}
} else if p.Status.Phase == api.PodSucceeded || p.Status.Phase == api.PodFailed {
startupStatus.Inactive++
} else if p.Status.Phase == api.PodUnknown {
startupStatus.Unknown++
}
}
return startupStatus
}
func (config *RCConfig) start() error {
// Don't force tests to fail if they don't care about containers restarting.
var maxContainerFailures int
if config.MaxContainerFailures == nil {
maxContainerFailures = int(math.Max(1.0, float64(config.Replicas)*.01))
} else {
maxContainerFailures = *config.MaxContainerFailures
}
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
PodStore := NewPodStore(config.Client, config.Namespace, label, fields.Everything())
defer PodStore.Stop()
interval := config.PollInterval
if interval <= 0 {
interval = 10 * time.Second
}
timeout := config.Timeout
if timeout <= 0 {
timeout = 5 * time.Minute
}
oldPods := make([]*api.Pod, 0)
oldRunning := 0
lastChange := time.Now()
for oldRunning != config.Replicas {
time.Sleep(interval)
pods := PodStore.List()
startupStatus := ComputeRCStartupStatus(pods, config.Replicas)
pods = startupStatus.Created
if config.CreatedPods != nil {
*config.CreatedPods = pods
}
if !config.Silent {
startupStatus.Print(config.Name)
}
promPushRunningPending(startupStatus.Running, startupStatus.Pending)
if config.PodStatusFile != nil {
fmt.Fprintf(config.PodStatusFile, "%d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", startupStatus.Running, startupStatus.Pending, startupStatus.Waiting, startupStatus.Inactive, startupStatus.Unknown, startupStatus.RunningButNotReady)
}
if startupStatus.FailedContainers > maxContainerFailures {
DumpNodeDebugInfo(config.Client, startupStatus.ContainerRestartNodes.List())
// Get the logs from the failed containers to help diagnose what caused them to fail
LogFailedContainers(config.Namespace)
return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures)
}
if len(pods) < len(oldPods) || len(pods) > config.Replicas {
// This failure mode includes:
// kubelet is dead, so node controller deleted pods and rc creates more
// - diagnose by noting the pod diff below.
// pod is unhealthy, so replication controller creates another to take its place
// - diagnose by comparing the previous "2 Pod states" lines for inactive pods
errorStr := fmt.Sprintf("Number of reported pods for %s changed: %d vs %d", config.Name, len(pods), len(oldPods))
Logf("%v, pods that changed since the last iteration:", errorStr)
Diff(oldPods, pods).Print(sets.NewString())
return fmt.Errorf(errorStr)
}
if len(pods) > len(oldPods) || startupStatus.Running > oldRunning {
lastChange = time.Now()
}
oldPods = pods
oldRunning = startupStatus.Running
if time.Since(lastChange) > timeout {
dumpPodDebugInfo(config.Client, pods)
break
}
}
if oldRunning != config.Replicas {
// List only pods from a given replication controller.
options := api.ListOptions{LabelSelector: label}
if pods, err := config.Client.Pods(api.NamespaceAll).List(options); err == nil {
for _, pod := range pods.Items {
Logf("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp)
}
} else {
Logf("Can't list pod debug info: %v", err)
}
return fmt.Errorf("Only %d pods started out of %d", oldRunning, config.Replicas)
}
return nil
}
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// optionally waits for pods to start running (if waitForRunning == true)
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) {
startPodsID := string(util.NewUUID()) // So that we can label and find them
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Pods(namespace).Create(&pod)
ExpectNoError(err)
}
Logf("Waiting for running...")
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := WaitForPodsWithLabelRunning(c, namespace, label)
ExpectNoError(err, "Error waiting for %d pods to be running - probably a timeout", replicas)
}
}
func dumpPodDebugInfo(c *client.Client, pods []*api.Pod) {
badNodes := sets.NewString()
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
if p.Spec.NodeName != "" {
Logf("Pod %v assigned to host %v (IP: %v) in %v", p.Name, p.Spec.NodeName, p.Status.HostIP, p.Status.Phase)
badNodes.Insert(p.Spec.NodeName)
} else {
Logf("Pod %v still unassigned", p.Name)
}
}
}
DumpNodeDebugInfo(c, badNodes.List())
}
func DumpAllNamespaceInfo(c *client.Client, namespace string) {
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := c.Events(namespace).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any Cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/Cleanup events.
dumpAllPodInfo(c)
dumpAllNodeInfo(c)
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []api.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(o[j].FirstTimestamp)
}
func dumpAllPodInfo(c *client.Client) {
pods, err := c.Pods("").List(api.ListOptions{})
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
logPodStates(pods.Items)
}
func dumpAllNodeInfo(c *client.Client) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
}
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names)
}
func DumpNodeDebugInfo(c *client.Client, nodeNames []string) {
for _, n := range nodeNames {
Logf("\nLogging node info for node %v", n)
node, err := c.Nodes().Get(n)
if err != nil {
Logf("Error getting node info %v", err)
}
Logf("Node Info: %v", node)
Logf("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
Logf("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
Logf("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n)
if err != nil {
Logf("Unable to retrieve kubelet pods for node %v", n)
continue
}
for _, p := range podList.Items {
Logf("%v started at %v (%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.ContainerStatuses))
for _, c := range p.Status.ContainerStatuses {
Logf("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
HighLatencyKubeletOperations(c, 10*time.Second, n)
// TODO: Log node resource info
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c *client.Client, nodeName string) []api.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": api.NamespaceAll,
"source": "kubelet",
}.AsSelector()
options := api.ListOptions{FieldSelector: selector}
events, err := c.Events(api.NamespaceSystem).List(options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
return []api.Event{}
}
return events.Items
}
// Convenient wrapper around listing nodes supporting retries.
func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
var nodes *api.NodeList
var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
return err == nil, nil
}) != nil {
ExpectNoError(err, "Timed out while listing nodes for e2e cluster.")
}
return nodes
}
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c)
if err != nil {
return err
}
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
}
if !wait {
return nil
}
return WaitForRCPodsRunning(c, ns, name)
}
// Wait up to 10 minutes for pods to become Running. Assume that the pods of the
// rc are labels with {"name":rcName}.
func WaitForRCPodsRunning(c *client.Client, ns, rcName string) error {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
err := WaitForPodsWithLabelRunning(c, ns, selector)
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", rcName, err)
}
return nil
}
// Wait up to 10 minutes for all matching pods to become Running and at least one
// matching pod exists.
func WaitForPodsWithLabelRunning(c *client.Client, ns string, label labels.Selector) error {
running := false
PodStore := NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
waitLoop:
for start := time.Now(); time.Since(start) < 10*time.Minute; time.Sleep(5 * time.Second) {
pods := PodStore.List()
if len(pods) == 0 {
continue waitLoop
}
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
continue waitLoop
}
}
running = true
break
}
if !running {
return fmt.Errorf("Timeout while waiting for pods with labels %q to be running", label.String())
}
return nil
}
// Returns true if all the specified pods are scheduled, else returns false.
func podsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (bool, error) {
PodStore := NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
pods := PodStore.List()
if len(pods) == 0 {
return false, nil
}
for _, pod := range pods {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
}
// Wait for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) {
err = wait.PollImmediate(Poll, podScheduledBeforeTimeout,
func() (bool, error) {
pods, err = WaitForPodsWithLabel(c, ns, label)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
})
return pods, err
}
// Wait up to PodListTimeout for getting pods with certain label
func WaitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) {
for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) {
options := api.ListOptions{LabelSelector: label}
pods, err = c.Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(pods.Items) > 0 {
break
}
}
if pods == nil || len(pods.Items) == 0 {
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
}
return
}
// Delete a Replication Controller and all pods it spawned
func DeleteRC(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
rc, err := c.ReplicationControllers(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperForReplicationController(c, 10*time.Minute)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
return err
}
startTime := time.Now()
err = reaper.Stop(ns, name, 0, api.NewDeleteOptions(0))
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
return nil
}
deleteRCTime := time.Now().Sub(startTime)
Logf("Deleting RC %s took: %v", name, deleteRCTime)
if err != nil {
return fmt.Errorf("error while stopping RC: %s: %v", name, err)
}
err = waitForRCPodsGone(c, rc)
if err != nil {
return fmt.Errorf("error while deleting RC %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRCTime
Logf("Terminating RC %s pods took: %v", name, terminatePodTime)
return nil
}
// waitForRCPodsGone waits until there are no pods reported under an RC's selector (because the pods
// have completed termination).
func waitForRCPodsGone(c *client.Client, rc *api.ReplicationController) error {
labels := labels.SelectorFromSet(rc.Spec.Selector)
PodStore := NewPodStore(c, rc.Namespace, labels, fields.Everything())
defer PodStore.Stop()
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
if pods := PodStore.List(); len(pods) == 0 {
return true, nil
}
return false, nil
})
}
// Delete a ReplicaSet and all pods it spawned
func DeleteReplicaSet(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rc, err := c.Extensions().ReplicaSets(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), c)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
startTime := time.Now()
err = reaper.Stop(ns, name, 0, api.NewDeleteOptions(0))
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
deleteRSTime := time.Now().Sub(startTime)
Logf("Deleting RS %s took: %v", name, deleteRSTime)
if err == nil {
err = waitForReplicaSetPodsGone(c, rc)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
return err
}
// waitForReplicaSetPodsGone waits until there are no pods reported under a
// ReplicaSet selector (because the pods have completed termination).
func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) error {
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
ExpectNoError(err)
options := api.ListOptions{LabelSelector: selector}
if pods, err := c.Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
return true, nil
}
return false, nil
})
}
// Waits for the deployment to reach desired state.
// Returns an error if minAvailable or maxCreated is broken at any times.
func WaitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, desiredUpdatedReplicas, minAvailable, maxCreated, minReadySeconds int32) error {
var oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
var newRS *extensions.ReplicaSet
var deployment *extensions.Deployment
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
oldRSs, allOldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c)
if err != nil {
return false, err
}
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
// New RC hasn't been created yet.
return false, nil
}
allRSs = append(oldRSs, newRS)
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
totalAvailable, err := deploymentutil.GetAvailablePodsForReplicaSets(c, allRSs, minReadySeconds)
if err != nil {
return false, err
}
if totalCreated > maxCreated {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfReplicaSets(c, allRSs, minReadySeconds)
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
}
if totalAvailable < minAvailable {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfReplicaSets(c, allRSs, minReadySeconds)
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deployment.Status.Replicas == desiredUpdatedReplicas &&
deployment.Status.UpdatedReplicas == desiredUpdatedReplicas &&
deploymentutil.GetReplicaCountForReplicaSets(oldRSs) == 0 &&
deploymentutil.GetReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}) == desiredUpdatedReplicas {
return true, nil
}
return false, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfReplicaSets(c, allRSs, minReadySeconds)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %s status to match expectation: %v", deploymentName, err)
}
return nil
}
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int, desiredGeneration int64) error {
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= int32(minUpdatedReplicas) {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s to have at least %d updpatedReplicas: %v", deploymentName, minUpdatedReplicas, err)
}
return nil
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
// Rollback not set or is kicked off
if deployment.Spec.RollbackTo == nil {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
}
return nil
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
var deployment *extensions.Deployment
var newRS *extensions.ReplicaSet
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil {
return false, err
}
// Check revision of this deployment, and of the new replica set of this deployment
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision ||
newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision ||
deployment.Spec.Template.Spec.Containers[0].Image != image || newRS.Spec.Template.Spec.Containers[0].Image != image {
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, nil, newRS)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %s (got %s / %s) and new RS %s (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err)
}
return nil
}
// CheckNewRSAnnotations check if the new RS's annotation is as expected
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return err
}
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil {
return err
}
for k, v := range expectedAnnotations {
// Skip checking revision annotations
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
}
}
return nil
}
func WaitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := api.ListOptions{LabelSelector: label}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.Pods(ns).List(options)
if err != nil {
return false, nil
}
for _, pod := range pods.Items {
if !deploymentutil.IsPodAvailable(&pod, int32(minReadySeconds)) {
return false, nil
}
}
return true, nil
})
}
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c *clientset.Clientset, ns, deploymentName string, desiredRSNum int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
_, oldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
}
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
Logf("Deployment: %+v. Selector = %+v", deployment, deployment.Spec.Selector)
for i := range allOldRSs {
Logf("All old ReplicaSets (%d/%d) of deployment %s: %+v. Selector = %+v", i+1, len(allOldRSs), deployment.Name, allOldRSs[i], allOldRSs[i].Spec.Selector)
}
Logf("New ReplicaSet of deployment %s: %+v. Selector = %+v", deployment.Name, newRS, newRS.Spec.Selector)
}
func WaitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute)
}
func logPodsOfReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int32) {
allPods, err := deploymentutil.GetPodsForReplicaSets(c, rss)
if err == nil {
for _, pod := range allPods {
availability := "not available"
if deploymentutil.IsPodAvailable(&pod, minReadySeconds) {
availability = "available"
}
Logf("Pod %s is %s: %+v", pod.Name, availability, pod)
}
}
}
// Waits for the number of events on the given object to reach a desired count.
func WaitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.Events(ns).Search(objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount == desiredEventsCount {
return true, nil
}
if eventsCount < desiredEventsCount {
return false, nil
}
// Number of events has exceeded the desired count.
return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount)
})
}
// Waits for the number of events on the given object to be at least a desired count.
func WaitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object, atLeastEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.Events(ns).Search(objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount >= atLeastEventsCount {
return true, nil
}
return false, nil
})
}
type updateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c *clientset.Clientset, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) {
deployments := c.Extensions().Deployments(namespace)
err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if deployment, err = deployments.Get(name); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = deployments.Update(deployment); err == nil {
Logf("Updating deployment %s", name)
return true, nil
}
return false, nil
})
return deployment, err
}
// FailedContainers inspects all containers in a pod and returns failure
// information for containers that have failed or been restarted.
// A map is returned where the key is the containerID and the value is a
// struct containing the restart and failure information
func FailedContainers(pod *api.Pod) map[string]ContainerFailures {
var state ContainerFailures
states := make(map[string]ContainerFailures)
statuses := pod.Status.ContainerStatuses
if len(statuses) == 0 {
return nil
} else {
for _, status := range statuses {
if status.State.Terminated != nil {
states[status.ContainerID] = ContainerFailures{status: status.State.Terminated}
} else if status.LastTerminationState.Terminated != nil {
states[status.ContainerID] = ContainerFailures{status: status.LastTerminationState.Terminated}
}
if status.RestartCount > 0 {
var ok bool
if state, ok = states[status.ContainerID]; !ok {
state = ContainerFailures{}
}
state.Restarts = int(status.RestartCount)
states[status.ContainerID] = state
}
}
}
return states
}
// Prints the histogram of the events and returns the number of bad events.
func BadEvents(events []*api.Event) int {
type histogramKey struct {
reason string
source string
}
histogram := make(map[histogramKey]int)
for _, e := range events {
histogram[histogramKey{reason: e.Reason, source: e.Source.Component}]++
}
for key, number := range histogram {
Logf("- reason: %s, source: %s -> %d", key.reason, key.source, number)
}
badPatterns := []string{"kill", "fail"}
badEvents := 0
for key, number := range histogram {
for _, s := range badPatterns {
if strings.Contains(key.reason, s) {
Logf("WARNING %d events from %s with reason: %s", number, key.source, key.reason)
badEvents += number
break
}
}
}
return badEvents
}
// NodeAddresses returns the first address of the given type of each node.
func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
// Use the first external IP address we find on the node, and
// use at most one per node.
// TODO(roberthbailey): Use the "preferred" address for the node, once
// such a thing is defined (#2462).
if addr.Type == addrType {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c *client.Client) ([]string, error) {
nodelist := ListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, api.NodeExternalIP)
// Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) {
return hosts, fmt.Errorf(
"only found %d external IPs on nodes, but found %d nodes. Nodelist: %v",
len(hosts), len(nodelist.Items), nodelist)
}
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, "22"))
}
return sshHosts, nil
}
type SSHResult struct {
User string
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (SSHResult, error) {
result := SSHResult{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
// defaulting here as well for logging clarity.
result.User = os.Getenv("KUBE_SSH_USER")
if result.User == "" {
result.User = os.Getenv("USER")
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
return result, err
}
func LogSSHResult(result SSHResult) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
Logf("ssh %s: command: %s", remote, result.Cmd)
Logf("ssh %s: stdout: %q", remote, result.Stdout)
Logf("ssh %s: stderr: %q", remote, result.Stderr)
Logf("ssh %s: exit code: %d", remote, result.Code)
}
func IssueSSHCommand(cmd, provider string, node *api.Node) error {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == api.NodeExternalIP {
host = a.Address + ":22"
break
}
}
if host == "" {
return fmt.Errorf("couldn't find external IP address for node %s", node.Name)
}
Logf("Calling %s on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
if result.Code != 0 || err != nil {
return fmt.Errorf("failed running %q: %v (exit code %d)", cmd, err, result.Code)
}
return nil
}
// NewHostExecPodSpec returns the pod spec of hostexec pod
func NewHostExecPodSpec(ns, name string) *api.Pod {
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
},
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "hostexec",
Image: "gcr.io/google_containers/hostexec:1.2",
ImagePullPolicy: api.PullIfNotPresent,
},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
}
return pod
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
Logf("stdout: %v", stdout)
ExpectNoError(err)
return stdout
}
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
// until it's Running
func LaunchHostExecPod(client *client.Client, ns, name string) *api.Pod {
hostExecPod := NewHostExecPodSpec(ns, name)
pod, err := client.Pods(ns).Create(hostExecPod)
ExpectNoError(err)
err = WaitForPodRunningInNamespace(client, pod.Name, pod.Namespace)
ExpectNoError(err)
return pod
}
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
// used to SSH to their nodes.
func GetSigner(provider string) (ssh.Signer, error) {
// Get the directory in which SSH keys are located.
keydir := filepath.Join(os.Getenv("HOME"), ".ssh")
// Select the key itself to use. When implementing more providers here,
// please also add them to any SSH tests that are disabled because of signer
// support.
keyfile := ""
switch provider {
case "gce", "gke", "kubemark":
keyfile = "google_compute_engine"
case "aws":
// If there is an env. variable override, use that.
aws_keyfile := os.Getenv("AWS_SSH_KEY")
if len(aws_keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(aws_keyfile)
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
default:
return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider)
}
key := filepath.Join(keydir, keyfile)
return sshutil.MakePrivateKeySignerFromFile(key)
}
// checkPodsRunning returns whether all pods whose names are listed in podNames
// in namespace ns are running and ready, using c and waiting at most timeout.
func CheckPodsRunningReady(c *client.Client, ns string, podNames []string, timeout time.Duration) bool {
np, desc := len(podNames), "running and ready"
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
result := make(chan bool, len(podNames))
for ix := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := waitForPodCondition(c, ns, name, desc, timeout, PodRunningReady)
result <- err == nil
}(podNames[ix])
}
// Wait for them all to finish.
success := true
// TODO(a-robinson): Change to `for range` syntax and remove logging once we
// support only Go >= 1.4.
for _, podName := range podNames {
if !<-result {
Logf("Pod %[1]s failed to be %[2]s.", podName, desc)
success = false
}
}
Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c *client.Client, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, api.NodeReady, true, timeout)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c *client.Client, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, api.NodeReady, false, timeout)
}
func IsNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
if (cond.Status == api.ConditionTrue) == wantTrue {
return true
} else {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == api.ConditionTrue, wantTrue, cond.Reason, cond.Message)
return false
}
}
}
Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
return false
}
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitForNodeToBe(c *client.Client, name string, conditionType api.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.Nodes().Get(name)
if err != nil {
Logf("Couldn't get node %s", name)
continue
}
if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// checks whether all registered nodes are ready
func AllNodesReady(c *client.Client, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []api.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return false, err
}
for _, node := range nodes.Items {
if !IsNodeConditionSetAsExpected(&node, api.NodeReady, true) {
notReady = append(notReady, node)
}
}
return len(notReady) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
return nil
}
// Filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func FilterNodes(nodeList *api.NodeList, fn func(node api.Node) bool) {
var l []api.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
func ParseKVLines(output, key string) string {
delim := ":"
key = key + delim
for _, line := range strings.Split(output, "\n") {
pieces := strings.SplitAfterN(line, delim, 2)
if len(pieces) != 2 {
continue
}
k, v := pieces[0], pieces[1]
if k == key {
return strings.TrimSpace(v)
}
}
return ""
}
func RestartKubeProxy(host string) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
// kubelet will restart the kube-proxy since it's running in a static pod
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
}
// wait for kube-proxy to come back up
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
result, err := SSH("sudo /bin/sh -c 'pgrep kube-proxy | wc -l'", host, TestContext.Provider)
if err != nil {
return false, err
}
if result.Code != 0 {
LogSSHResult(result)
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
}
if result.Stdout == "0\n" {
return false, nil
}
Logf("kube-proxy is back up.")
return true, nil
})
if err != nil {
return fmt.Errorf("kube-proxy didn't recover: %v", err)
}
return nil
}
func RestartApiserver() error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
var command string
if ProviderIs("gce", "gke") {
command = "sudo docker ps | grep /kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
} else {
command = "sudo /etc/init.d/kube-apiserver restart"
}
result, err := SSH(command, GetMasterHost()+":22", TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart apiserver: %v", err)
}
return nil
}
func WaitForApiserverUp(c *client.Client) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
// WaitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func WaitForClusterSize(c *client.Client, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node api.Node) bool {
return IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired size %d", size)
return nil
}
Logf("Waiting for cluster size %d, current size %d, not ready nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size)
}
// GetHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func GetHostExternalAddress(client *client.Client, p *api.Pod) (externalAddress string, err error) {
node, err := client.Nodes().Get(p.Spec.NodeName)
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == api.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
type extractRT struct {
http.Header
}
func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
rt.Header = req.Header
return &http.Response{}, nil
}
// headersForConfig extracts any http client logic necessary for the provided
// config.
func headersForConfig(c *restclient.Config) (http.Header, error) {
extract := &extractRT{}
rt, err := restclient.HTTPWrappersForConfig(c, extract)
if err != nil {
return nil, err
}
if _, err := rt.RoundTrip(&http.Request{}); err != nil {
return nil, err
}
return extract.Header, nil
}
// OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client
// config, with the specified protocols.
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("failed to create tls config: %v", err)
}
if tlsConfig != nil {
url.Scheme = "wss"
if !strings.Contains(url.Host, ":") {
url.Host += ":443"
}
} else {
url.Scheme = "ws"
if !strings.Contains(url.Host, ":") {
url.Host += ":80"
}
}
headers, err := headersForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to load http headers: %v", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("failed to create websocket config: %v", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig
cfg.Protocol = protocols
return websocket.DialConfig(cfg)
}
// getIngressAddress returns the ips/hostnames associated with the Ingress.
func getIngressAddress(client *client.Client, ns, name string) ([]string, error) {
ing, err := client.Extensions().Ingress(ns).Get(name)
if err != nil {
return nil, err
}
addresses := []string{}
for _, a := range ing.Status.LoadBalancer.Ingress {
if a.IP != "" {
addresses = append(addresses, a.IP)
}
if a.Hostname != "" {
addresses = append(addresses, a.Hostname)
}
}
return addresses, nil
}
// WaitForIngressAddress waits for the Ingress to acquire an address.
func WaitForIngressAddress(c *client.Client, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
ipOrNameList, err := getIngressAddress(c, ns, ingName)
if err != nil || len(ipOrNameList) == 0 {
Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
return false, nil
}
address = ipOrNameList[0]
return true, nil
})
return address, err
}
// Looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("log", podName, container, fmt.Sprintf("--namespace=%v", ns))
})
}
// Looks for the given string in a file in a specific pod container
func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
})
}
// Looks for the given string in the output of a command executed in a specific pod container
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
// use the first container
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
args = append(args, command...)
return RunKubectlOrDie(args...)
})
}
// Looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client *client.Client, ns, name string, svcPort int) (int, error) {
svc, err := client.Services(ns).Get(name)
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == int32(svcPort) {
if p.NodePort != 0 {
return int(p.NodePort), nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// GetNodePortURL returns the url to a nodeport Service.
func GetNodePortURL(client *client.Client, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *api.NodeList
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = client.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
return err == nil, nil
}) != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster.")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == api.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
// none are running, otherwise it does what a synchronous scale operation would do.
func ScaleRCByLabels(client *client.Client, ns string, l map[string]string, replicas uint) error {
listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))}
rcs, err := client.ReplicationControllers(ns).List(listOpts)
if err != nil {
return err
}
if len(rcs.Items) == 0 {
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
}
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
for _, labelRC := range rcs.Items {
name := labelRC.Name
if err := ScaleRC(client, ns, name, replicas, false); err != nil {
return err
}
rc, err := client.ReplicationControllers(ns).Get(name)
if err != nil {
return err
}
if replicas == 0 {
if err := waitForRCPodsGone(client, rc); err != nil {
return err
}
} else {
if err := WaitForPodsWithLabelRunning(
client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
return err
}
}
}
return nil
}
func GetPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}
func getPreviousPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}
// utility function for gomega Eventually
func getPodLogsInternal(c *client.Client, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
}
return nil
}
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
}
project := TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err)
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
service := gceCloud.GetComputeService()
list, err := service.ForwardingRules.List(project, region).Do()
if err != nil {
return false, err
}
for ix := range list.Items {
item := list.Items[ix]
if item.PortRange == portRange && item.IPAddress == ip {
Logf("found a load balancer: %v", item)
return false, nil
}
}
return true, nil
})
}
// The following helper functions can block/unblock network from source
// host to destination host by manipulating iptable rules.
// This function assumes it can ssh to the source host.
//
// Caution:
// Recommend to input IP instead of hostnames. Using hostnames will cause iptables to
// do a DNS lookup to resolve the name to an IP address, which will
// slow down the test and cause it to fail if DNS is absent or broken.
//
// Suggested usage pattern:
// func foo() {
// ...
// defer UnblockNetwork(from, to)
// BlockNetwork(from, to)
// ...
// }
//
func BlockNetwork(from string, to string) {
Logf("block network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
LogSSHResult(result)
Failf("Unexpected error: %v", err)
}
}
func UnblockNetwork(from string, to string) {
Logf("Unblock network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule)
// Undrop command may fail if the rule has never been created.
// In such case we just lose 30 seconds, but the cluster is healthy.
// But if the rule had been created and removing it failed, the node is broken and
// not coming back. Subsequent tests will run or fewer nodes (some of the tests
// may fail). Manual intervention is required in such case (recreating the
// cluster solves the problem too).
err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
result, err := SSH(undropCmd, from, TestContext.Provider)
if result.Code == 0 && err == nil {
return true, nil
}
LogSSHResult(result)
if err != nil {
Logf("Unexpected error: %v", err)
}
return false, nil
})
if err != nil {
Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
"required on host %s: remove rule %s, if exists", from, iptablesRule)
}
}
func isElementOf(podUID types.UID, pods *api.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
func CheckRSHashLabel(rs *extensions.ReplicaSet) error {
if len(rs.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 ||
len(rs.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 ||
len(rs.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
return fmt.Errorf("unexpected RS missing required pod-hash-template: %+v, selector = %+v, template = %+v", rs, rs.Spec.Selector, rs.Spec.Template)
}
return nil
}
func CheckPodHashLabel(pods *api.PodList) error {
invalidPod := ""
for _, pod := range pods.Items {
if len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
if len(invalidPod) == 0 {
invalidPod = "unexpected pods missing required pod-hash-template:"
}
invalidPod = fmt.Sprintf("%s %+v;", invalidPod, pod)
}
}
if len(invalidPod) > 0 {
return fmt.Errorf("%s", invalidPod)
}
return nil
}
// GetReadyNodes retrieves a list of schedulable nodes whose condition
// is Ready. An error will be returned if no such nodes are found.
func GetReadyNodes(f *Framework) (nodes *api.NodeList, err error) {
nodes = ListSchedulableNodesOrDie(f.Client)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node api.Node) bool {
return !node.Spec.Unschedulable && IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
})
if len(nodes.Items) == 0 {
return nil, errors.New("No Ready nodes found.")
}
return nodes, nil
}
// timeout for proxy requests.
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c *client.Client, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c)
if err != nil {
return restclient.Result{}, err
}
var result restclient.Result
finished := make(chan struct{})
go func() {
if subResourceProxyAvailable {
result = c.Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
} else {
result = c.Get().
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
}
finished <- struct{}{}
}()
select {
case <-finished:
return result, nil
case <-time.After(proxyTimeout):
return restclient.Result{}, nil
}
}
// GetKubeletPods retrieves the list of pods on the kubelet
func GetKubeletPods(c *client.Client, node string) (*api.PodList, error) {
return getKubeletPods(c, node, "pods")
}
// GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods
// includes necessary information (e.g., UID, name, namespace for
// pods/containers), but do not contain the full spec.
func GetKubeletRunningPods(c *client.Client, node string) (*api.PodList, error) {
return getKubeletPods(c, node, "runningpods")
}
func getKubeletPods(c *client.Client, node, resource string) (*api.PodList, error) {
result := &api.PodList{}
client, err := NodeProxyRequest(c, node, resource)
if err != nil {
return &api.PodList{}, err
}
if err = client.Into(result); err != nil {
return &api.PodList{}, err
}
return result, nil
}
// LaunchWebserverPod launches a pod serving http on port 8080 to act
// as the target for networking connectivity checks. The ip address
// of the created pod will be returned if the pod is launched
// successfully.
func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
},
ObjectMeta: api.ObjectMeta{
Name: podName,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
Env: []api.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []api.ContainerPort{{ContainerPort: int32(port)}},
},
},
NodeName: nodeName,
RestartPolicy: api.RestartPolicyNever,
},
}
podClient := f.Client.Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName))
createdPod, err := podClient.Get(podName)
ExpectNoError(err)
ip = fmt.Sprintf("%s:%d", createdPod.Status.PodIP, port)
Logf("Target pod IP:port is %s", ip)
return
}
// CheckConnectivityToHost launches a pod running wget on the
// specified node to test connectivity to the specified host. An
// error will be returned if the host is not reachable from the pod.
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeout int) error {
contName := fmt.Sprintf("%s-container", podName)
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
},
ObjectMeta: api.ObjectMeta{
Name: podName,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: contName,
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"wget", fmt.Sprintf("--timeout=%d", timeout), "-s", host},
},
},
NodeName: nodeName,
RestartPolicy: api.RestartPolicyNever,
},
}
podClient := f.Client.Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
if err != nil {
return err
}
defer podClient.Delete(podName, nil)
return WaitForPodSuccessInNamespace(f.Client, podName, contName, f.Namespace.Name)
}
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
// It shells out to cluster/log-dump.sh to accomplish this.
func CoreDump(dir string) {
cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump.sh"), dir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
Logf("Error running cluster/log-dump.sh: %v", err)
}
}
|
[
"\"KUBE_SSH_USER\"",
"\"USER\"",
"\"HOME\"",
"\"AWS_SSH_KEY\""
] |
[] |
[
"USER",
"HOME",
"AWS_SSH_KEY",
"KUBE_SSH_USER"
] |
[]
|
["USER", "HOME", "AWS_SSH_KEY", "KUBE_SSH_USER"]
|
go
| 4 | 0 | |
tencentcloud/cloud.go
|
package tencentcloud
import (
"encoding/json"
"errors"
"io"
"io/ioutil"
"os"
"github.com/dbdd4us/qcloudapi-sdk-go/ccs"
"github.com/dbdd4us/qcloudapi-sdk-go/clb"
"github.com/dbdd4us/qcloudapi-sdk-go/common"
"github.com/dbdd4us/qcloudapi-sdk-go/cvm"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
)
const (
providerName = "tencentcloud"
)
var (
CloudInstanceNotFound = errors.New("tencentcloud instance not found")
)
func init() {
cloudprovider.RegisterCloudProvider(providerName, NewCloud)
}
func NewCloud(config io.Reader) (cloudprovider.Interface, error) {
var c Config
if config != nil {
cfg, err := ioutil.ReadAll(config)
if err != nil {
return nil, err
}
if err := json.Unmarshal(cfg, &c); err != nil {
return nil, err
}
}
if c.Region == "" {
c.Region = os.Getenv("TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_REGION")
}
if c.VpcId == "" {
c.VpcId = os.Getenv("TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_VPC_ID")
}
if c.SecretId == "" {
c.SecretId = os.Getenv("TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_SECRET_ID")
}
if c.SecretKey == "" {
c.SecretKey = os.Getenv("TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_SECRET_KEY")
}
if c.ClusterRouteTable == "" {
c.ClusterRouteTable = os.Getenv("TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_CLUSTER_ROUTE_TABLE")
}
return &Cloud{config: c}, nil
}
type Cloud struct {
config Config
kubeClient kubernetes.Interface
cvm *cvm.Client
cvmV3 *cvm.Client
ccs *ccs.Client
clb *clb.Client
}
type Config struct {
Region string `json:"region"`
VpcId string `json:"vpc_id"`
SecretId string `json:"secret_id"`
SecretKey string `json:"secret_key"`
ClusterRouteTable string `json:"cluster_route_table"`
}
// Initialize provides the cloud with a kubernetes client builder and may spawn goroutines
// to perform housekeeping activities within the cloud provider.
func (cloud *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {
cloud.kubeClient = clientBuilder.ClientOrDie("tencentcloud-cloud-provider")
cvmClient, err := cvm.NewClient(
common.Credential{SecretId: cloud.config.SecretId, SecretKey: cloud.config.SecretKey},
common.Opts{Region: cloud.config.Region},
)
if err != nil {
panic(err)
}
cloud.cvm = cvmClient
cvmV3Client, err := cvm.NewClient(
common.Credential{SecretId: cloud.config.SecretId, SecretKey: cloud.config.SecretKey},
common.Opts{Region: cloud.config.Region, Host: cvm.CvmV3Host, Path: cvm.CvmV3Path},
)
if err != nil {
panic(err)
}
cloud.cvmV3 = cvmV3Client
ccsClient, err := ccs.NewClient(
common.Credential{SecretId: cloud.config.SecretId, SecretKey: cloud.config.SecretKey},
common.Opts{Region: cloud.config.Region},
)
if err != nil {
panic(err)
}
cloud.ccs = ccsClient
clbClient, err := clb.NewClient(
common.Credential{SecretId: cloud.config.SecretId, SecretKey: cloud.config.SecretKey},
common.Opts{Region: cloud.config.Region},
)
if err != nil {
panic(err)
}
cloud.clb = clbClient
return
}
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
func (cloud *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return cloud, true
}
// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.
func (cloud *Cloud) Instances() (cloudprovider.Instances, bool) {
return cloud, true
}
// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.
func (cloud *Cloud) Zones() (cloudprovider.Zones, bool) {
return nil, false
}
// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.
func (cloud *Cloud) Clusters() (cloudprovider.Clusters, bool) {
return nil, false
}
// Routes returns a routes interface along with whether the interface is supported.
func (cloud *Cloud) Routes() (cloudprovider.Routes, bool) {
return cloud, true
}
// ProviderName returns the cloud provider ID.
func (cloud *Cloud) ProviderName() string {
return providerName
}
// HasClusterID returns true if a ClusterID is required and set
func (cloud *Cloud) HasClusterID() bool {
return false
}
|
[
"\"TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_REGION\"",
"\"TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_VPC_ID\"",
"\"TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_SECRET_ID\"",
"\"TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_SECRET_KEY\"",
"\"TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_CLUSTER_ROUTE_TABLE\""
] |
[] |
[
"TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_CLUSTER_ROUTE_TABLE",
"TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_SECRET_KEY",
"TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_SECRET_ID",
"TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_VPC_ID",
"TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_REGION"
] |
[]
|
["TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_CLUSTER_ROUTE_TABLE", "TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_SECRET_KEY", "TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_SECRET_ID", "TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_VPC_ID", "TENCENTCLOUD_CLOUD_CONTROLLER_MANAGER_REGION"]
|
go
| 5 | 0 | |
gunicorn.py
|
import os
bind = '0.0.0.0:5000'
accesslog = 'app.log'
access_log_format = \
'%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
raw_env = [
'GRAPH_CLIENT_ID=' + os.getenv('GRAPH_CLIENT_ID'),
'GRAPH_CLIENT_SECRET=' + os.getenv('GRAPH_CLIENT_SECRET')
]
|
[] |
[] |
[
"GRAPH_CLIENT_ID",
"GRAPH_CLIENT_SECRET"
] |
[]
|
["GRAPH_CLIENT_ID", "GRAPH_CLIENT_SECRET"]
|
python
| 2 | 0 | |
admin/examples/organization/users/users.go
|
package main
import (
"context"
"github.com/ctreminiom/go-atlassian/admin"
"log"
"net/url"
"os"
)
func main() {
//ATLASSIAN_ADMIN_TOKEN
var apiKey = os.Getenv("ATLASSIAN_ADMIN_TOKEN")
cloudAdmin, err := admin.New(nil)
if err != nil {
log.Fatal(err)
}
cloudAdmin.Auth.SetBearerToken(apiKey)
cloudAdmin.Auth.SetUserAgent("curl/7.54.0")
var (
organizationID = "9a1jj823-jac8-123d-jj01-63315k059cb2"
cursor string
userChunks []*admin.OrganizationUserPageScheme
)
for {
users, response, err := cloudAdmin.Organization.Users(context.Background(), organizationID, cursor)
if err != nil {
if response != nil {
log.Println("Response HTTP Response", response.Bytes.String())
}
log.Fatal(err)
}
log.Println("Response HTTP Code", response.Code)
log.Println("HTTP Endpoint Used", response.Endpoint)
userChunks = append(userChunks, users)
if len(users.Links.Next) == 0 {
break
}
//extract the next cursor pagination
nextAsURL, err := url.Parse(users.Links.Next)
if err != nil {
log.Fatal(err)
}
cursor = nextAsURL.Query().Get("cursor")
}
for _, chunk := range userChunks {
for _, user := range chunk.Data {
log.Println(user.Email, user.Name)
}
}
}
|
[
"\"ATLASSIAN_ADMIN_TOKEN\""
] |
[] |
[
"ATLASSIAN_ADMIN_TOKEN"
] |
[]
|
["ATLASSIAN_ADMIN_TOKEN"]
|
go
| 1 | 0 | |
Auth/jwt.go
|
package Auth
import (
"GO-INVEST/errs"
"GO-INVEST/types"
"encoding/json"
"errors"
"fmt"
"github.com/go-chi/render"
"github.com/jinzhu/gorm"
"io"
"log"
"os"
"time"
"net/http"
"github.com/auth0/go-jwt-middleware"
jwt "github.com/dgrijalva/jwt-go"
)
const (
APP_KEY = "DAEMONS"
)
type User_temp struct{
Uname string `json:"Username"`
Upass string `json:"Password"`
}
var dnsstr1 =fmt.Sprintf("root:root@tcp(%s:3306)/Stocks?charset=utf8&parseTime=True",os.Getenv("CONTAINER_NAME"))
// TokenHandler is our handler to take a username and password and,
// if it's valid, return a token used for future requests.
func TokenHandler(w http.ResponseWriter, r *http.Request) {
var u1 User_temp
var temp types.Users
_ = json.NewDecoder(r.Body).Decode(&u1)
fmt.Println(u1)
db, err := gorm.Open("mysql", dnsstr1)
defer db.Close()
Db:= db.Table("users").Where("u_name = ?", u1.Uname).Find(&temp)
if Db.RowsAffected == 0{
err=errors.New("Incorrect Credentials")
_ = render.Render(w, r, errs.ErrRender(err))
return
}
if u1.Upass != temp.U_pass {
w.WriteHeader(http.StatusUnauthorized)
err=errors.New("Incorrect Credentials")
_ = render.Render(w, r, errs.ErrRender(err))
return
}
// We are happy with the credentials, so build a token. We've given it
// an expiry of 1 hour.
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
"user": temp.U_id,
"exp": time.Now().Add(time.Hour * time.Duration(1)).Unix(),
"iat": time.Now().Unix(),
})
tokenString, err := token.SignedString([]byte(APP_KEY))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
io.WriteString(w, `{"error":"token_generation_failed"}`)
return
}
io.WriteString(w, `{"token":"`+tokenString+`"}`)
return
}
// AuthMiddleware is our middleware to check our token is valid. Returning
// a 401 status to the client if it is not valid.
func AuthMiddleware(next http.Handler) http.Handler {
if len(APP_KEY) == 0 {
log.Fatal("HTTP server unable to start, expected an APP_KEY for JWT auth")
}
jwtMiddleware := jwtmiddleware.New(jwtmiddleware.Options{
ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
return []byte(APP_KEY), nil
},
SigningMethod: jwt.SigningMethodHS256,
})
return jwtMiddleware.Handler(next)
}
func Signup(w http.ResponseWriter, r *http.Request){
var u1 types.Users
var u2 types.Users
_ = json.NewDecoder(r.Body).Decode(&u1)
fmt.Println(u1)
db, err := gorm.Open("mysql", dnsstr1)
defer db.Close()
temp:=db.Table("users").Where("u_name = ?",u1.U_name).Find(&u2)
if(temp.RowsAffected!=0){
err=errors.New("Username Already exsist ")
_ = render.Render(w, r, errs.ErrRender(err))
return
}
if(err!=nil){
err=errors.New("Database error")
_ = render.Render(w, r, errs.ErrRender(err))
return
}
u1.Amount=50000.00
db.Create(&u1)
b, _ := json.Marshal(u1)
_,_=fmt.Fprintf(w,"%s", b)
}
|
[
"\"CONTAINER_NAME\""
] |
[] |
[
"CONTAINER_NAME"
] |
[]
|
["CONTAINER_NAME"]
|
go
| 1 | 0 | |
sdk/communication/azure-communication-phonenumbers/samples/list_acquired_phone_numbers_sample.py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: list_acquired_phone_numbers_sample.py
DESCRIPTION:
This sample demonstrates how to get all off you acquired phone numbers using your connection string
USAGE:
python list_acquired_phone_numbers_sample.py
Set the environment variables with your own values before running the sample:
1) AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING - The connection string including your endpoint and
access key of your Azure Communication Service"""
import os
from azure.communication.phonenumbers import (
PhoneNumbersClient
)
connection_str = os.getenv('AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING')
phone_numbers_client = PhoneNumbersClient.from_connection_string(connection_str)
def list_acquired_phone_numbers():
acquired_phone_numbers = phone_numbers_client.list_acquired_phone_numbers()
print('Acquired phone numbers:')
for acquired_phone_number in acquired_phone_numbers:
print(acquired_phone_number.phone_number)
if __name__ == '__main__':
list_acquired_phone_numbers()
|
[] |
[] |
[
"AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING"
] |
[]
|
["AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING"]
|
python
| 1 | 0 | |
circleci-export.go
|
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/schollz/progressbar/v3"
)
func parseArgs() ([]string, time.Time, bool, string, string) {
circleToken := os.Getenv("CIRCLECI_TOKEN")
if circleToken == "" {
log.Error().Msg("Must set CIRCLECI_TOKEN environment variable")
return nil, time.Time{}, false, "", ""
}
athenianToken := os.Getenv("ATHENIAN_TOKEN")
if athenianToken == "" {
log.Error().Msg("Must set ATHENIAN_TOKEN environment variable")
return nil, time.Time{}, false, "", ""
}
var sinceStr string
flag.StringVar(&sinceStr, "s", time.Now().AddDate(-1, -3, 0).Format("2006-01-02"),
"Load pipelines started after this date.")
dryRun := flag.Bool("dry-run", false, "Print release notifications instead of sending")
flag.Parse()
repos := flag.Args()
for _, repo := range repos {
if strings.HasPrefix(repo, "-") {
log.Error().Msgf("\"%v\" is not a repository name, flags must go first", repo)
return nil, time.Time{}, false, "", ""
}
}
since, err := time.Parse("2006-01-02", sinceStr)
if err != nil {
log.Error().Msgf("Invalid date: %v (must be YYYY-MM-DD)", sinceStr)
return nil, time.Time{}, false, "", ""
}
return repos, since, *dryRun, circleToken, athenianToken
}
func makeCircleAPIRequest(endpoint, token string) ([]byte, int) {
var body []byte
var remaining int
attempts := 10
for attempt := 1; attempt <= attempts && len(body) == 0; attempt++ {
body, remaining = func() ([]byte, int) {
req, err := http.NewRequest(
http.MethodGet,
"https://circleci.com/api/v2/"+endpoint,
nil,
)
if err != nil {
log.Fatal().Msgf("[%d/%d] error creating HTTP request: %v", attempt, attempts, err)
}
req.Header.Add("Circle-Token", token)
res, err := http.DefaultClient.Do(req)
if err != nil {
log.Error().Msgf("[%d/%d] error sending HTTP request: %v", attempt, attempts, err)
return nil, 0
}
defer res.Body.Close()
remaining, err := strconv.Atoi(res.Header.Get("X-Ratelimit-Remaining"))
if err != nil {
log.Error().Msgf("[%d/%d] error reading the rate limit: %v", attempt, attempts, err)
return nil, 0
}
body, err := ioutil.ReadAll(res.Body)
if remaining == 0 {
log.Warn().Msgf("[%d/%d] drained the rate limit, waiting 60s", attempt, attempts)
time.Sleep(time.Minute)
return nil, 0
}
if err != nil {
log.Error().Msgf("[%d/%d] error reading HTTP response: %v", attempt, attempts, err)
body = nil
}
return body, remaining
}()
}
return body, remaining
}
type Pipeline struct {
CreatedAt string `json:"created_at"`
State string `json:"state"`
Trigger struct {
Actor struct {
Login string `json:"login"`
} `json:"actor"`
} `json:"trigger"`
VCS struct {
Revision string `json:"revision"`
} `json:"vcs"`
}
type Pipelines struct {
NextPageToken string `json:"next_page_token"`
Items []Pipeline `json:"items"`
}
type Release struct {
PublishedAt time.Time `json:"published_at"`
Author string `json:"author"`
Commit string `json:"commit"`
Repository string `json:"repository"`
Name string `json:"name"`
}
func loadPipelines(repo, branch string, since time.Time, token string) []Release {
var pageToken string
lastCreatedAt := since
var pipelines Pipelines
var releases []Release
if branch != "" {
branch = fmt.Sprintf("branch=%s", branch)
}
bar := progressbar.Default(-1)
defer bar.Finish()
for lastCreatedAt.Sub(since) >= 0 {
if pageToken != "" {
pageToken = fmt.Sprintf("page-token=%s", pageToken)
}
query := strings.Trim(strings.Join([]string{branch, pageToken}, "&"), "&")
if query != "" {
query = "?" + query
}
response, rateLimit := makeCircleAPIRequest(
fmt.Sprintf("project/gh/%s/pipeline%s", repo, query),
token)
bar.Describe(fmt.Sprintf("Loaded %d pipelines since %s [rate limit %d]",
len(releases), lastCreatedAt.Format("2006-01-02"), rateLimit))
if err := json.Unmarshal(response, &pipelines); err != nil {
return nil
}
pageToken = pipelines.NextPageToken
for _, pipeline := range pipelines.Items {
var err error
createdAt, err := time.Parse("2006-01-02T15:04:05Z", pipeline.CreatedAt)
if err != nil {
log.Error().Msgf("Invalid datetime format: %v", pipeline.CreatedAt)
return nil
}
releases = append(releases, Release{
PublishedAt: createdAt,
Author: "github.com/" + pipeline.Trigger.Actor.Login,
Commit: pipeline.VCS.Revision,
Repository: "github.com/" + repo,
Name: fmt.Sprintf(
"%s-%s", createdAt.Format("2006-01-02"), pipeline.VCS.Revision[:7]),
})
}
lastCreatedAt = releases[len(releases)-1].PublishedAt
}
return releases
}
func sendReleasesBatch(releases []Release, token string, dryRun bool) error {
data, err := json.Marshal(releases)
if err != nil {
return err
}
if dryRun {
fmt.Println(string(data))
return nil
}
req, err := http.NewRequest(
http.MethodPost,
"https://api.athenian.co/v1/events/releases",
bytes.NewBuffer(data),
)
if err != nil {
log.Error().Msgf("error creating HTTP request: %v", err)
return err
}
req.Header.Add("X-API-Key", token)
req.Header.Add("Content-Type", "application/json")
res, err := http.DefaultClient.Do(req)
var feedback []byte
if err == nil {
defer res.Body.Close()
feedback, err = ioutil.ReadAll(res.Body)
} else {
log.Error().Msgf("error sending Athenian API request: %v", err)
return err
}
if res.StatusCode != 200 {
log.Error().Msgf("Athenian API returned %s:\n%s", res.Status, string(feedback))
return fmt.Errorf("server returned %s", res.Status)
}
return err
}
func sendReleases(releases []Release, token string, dryRun bool) {
bar := progressbar.Default(int64(len(releases)))
defer bar.Finish()
buffer := make([]Release, 0, 100)
for _, release := range releases {
if len(buffer) == cap(buffer) {
if sendReleasesBatch(buffer, token, dryRun) != nil {
continue
}
_ = bar.Add(len(buffer))
buffer = buffer[:0]
}
buffer = append(buffer, release)
}
if len(buffer) > 0 {
for sendReleasesBatch(buffer, token, dryRun) != nil {
}
}
}
func main() {
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
repos, since, dryRun, circleToken, athenianToken := parseArgs()
if len(repos) == 0 {
os.Exit(1)
}
log.Info().Msgf("Loading the pipelines for %d repositories", len(repos))
var releases []Release
for _, repo := range repos {
parts := strings.Split(repo, "@")
var repoName, branch string
if len(parts) == 1 {
repoName = repo
} else {
repoName, branch = parts[0], parts[1]
}
releases = append(releases, loadPipelines(repoName, branch, since, circleToken)...)
}
log.Info().Msgf("Sending %d release notifications to Athenian", len(releases))
sendReleases(releases, athenianToken, dryRun)
}
|
[
"\"CIRCLECI_TOKEN\"",
"\"ATHENIAN_TOKEN\""
] |
[] |
[
"CIRCLECI_TOKEN",
"ATHENIAN_TOKEN"
] |
[]
|
["CIRCLECI_TOKEN", "ATHENIAN_TOKEN"]
|
go
| 2 | 0 | |
internal/repository/mongodb/mongo_test.go
|
package mongodb_test
import (
"context"
"os"
"testing"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var (
mongoClient *mongo.Client
mongoDB *mongo.Database
)
func TestMain(t *testing.M) {
ctx, cancel := context.WithCancel(context.Background())
mongoURI := os.Getenv("MONGO_URI")
if mongoURI == "" {
mongoURI = "mongodb://localhost:27017"
}
c, err := mongo.Connect(ctx, options.Client().ApplyURI(mongoURI))
if err != nil {
panic(err)
}
if err := c.Ping(ctx, nil); err != nil {
panic(err)
}
mongoClient = c
mongoDB = mongoClient.Database("flaggio_test")
code := t.Run()
if err := mongoClient.Disconnect(ctx); err != nil {
panic(err)
}
cancel()
os.Exit(code)
}
|
[
"\"MONGO_URI\""
] |
[] |
[
"MONGO_URI"
] |
[]
|
["MONGO_URI"]
|
go
| 1 | 0 | |
cmd/internal/modload/modload.go
|
/*
* Copyright (c) 2021 The GoPlus Authors (goplus.org). All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package modload
import (
"bytes"
"errors"
"fmt"
"log"
"os"
"path/filepath"
gomodfile "golang.org/x/mod/modfile"
"golang.org/x/mod/module"
"github.com/goplus/gop/cl"
"github.com/goplus/gop/cmd/gengo"
"github.com/goplus/gop/cmd/internal/search"
"github.com/goplus/gop/env"
"github.com/goplus/gop/x/mod/modfetch"
"github.com/goplus/gop/x/mod/modfile"
)
var (
modFile, classModFile *modfile.File
initialized bool
modRoot string
Target module.Version
)
var ErrNoModRoot = errors.New("gop.mod file not found in current directory or any parent directory; see 'gop help modules'")
func findModuleRoot(dir string) (root string) {
if dir == "" {
panic("dir not set")
}
dir = filepath.Clean(dir)
// Look for enclosing gop.mod or go.mod.
for {
if fi, err := os.Stat(filepath.Join(dir, "gop.mod")); err == nil && !fi.IsDir() {
return dir
}
if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
return dir
}
d := filepath.Dir(dir)
if d == dir {
break
}
dir = d
}
return ""
}
func SetModRoot(dir string) {
modRoot = dir
}
func getcwd() string {
path, _ := os.Getwd()
return path
}
var gopRoot = getcwd()
// HasModRoot reports whether a main module is present.
// HasModRoot may return false even if Enabled returns true: for example, 'get'
// does not require a main module.
func HasModRoot() bool {
Init()
return modRoot != ""
}
// GopModFilePath returns the effective path of the go.mod file. Normally, this
// "go.mod" in the directory returned by ModRoot, but the -modfile flag may
// change its location. ModFilePath calls base.Fatalf if there is no main
// module, even if -modfile is set.
func GopModFilePath() string {
if !HasModRoot() {
log.Fatalf("gop: %v", ErrNoModRoot)
}
return filepath.Join(modRoot, "gop.mod")
}
// GoModFilePath returns the effective path of the go.mod file. Normally, this
// "go.mod" in the directory returned by ModRoot, but the -modfile flag may
// change its location. ModFilePath calls base.Fatalf if there is no main
// module, even if -modfile is set.
func GoModFilePath() string {
if !HasModRoot() {
log.Fatalf("gop: %v", ErrNoModRoot)
}
return filepath.Join(modRoot, "go.mod")
}
// Init determines whether module mode is enabled, locates the root of the
// current module (if any), sets environment variables for Git subprocesses, and
// configures the cfg, codehost, load, modfetch, and search packages for use
// with modules.
func Init() {
if initialized {
return
}
initialized = true
// Disable any prompting for passwords by Git.
// Only has an effect for 2.3.0 or later, but avoiding
// the prompt in earlier versions is just too hard.
// If user has explicitly set GIT_TERMINAL_PROMPT=1, keep
// prompting.
// See golang.org/issue/9341 and golang.org/issue/12706.
if os.Getenv("GIT_TERMINAL_PROMPT") == "" {
os.Setenv("GIT_TERMINAL_PROMPT", "0")
}
// Disable any ssh connection pooling by Git.
// If a Git subprocess forks a child into the background to cache a new connection,
// that child keeps stdout/stderr open. After the Git subprocess exits,
// os /exec expects to be able to read from the stdout/stderr pipe
// until EOF to get all the data that the Git subprocess wrote before exiting.
// The EOF doesn't come until the child exits too, because the child
// is holding the write end of the pipe.
// This is unfortunate, but it has come up at least twice
// (see golang.org/issue/13453 and golang.org/issue/16104)
// and confuses users when it does.
// If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND,
// assume they know what they are doing and don't step on it.
// But default to turning off ControlMaster.
if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no")
}
if modRoot != "" {
// nothing to do
} else {
modRoot := findModuleRoot(gopRoot)
if modRoot != "" {
SetModRoot(modRoot)
}
}
}
func findModulePath(dir string) (string, error) {
// Look for path in GOPATH.
var badPathErr error
for _, gpdir := range filepath.SplitList(getGoPath()) {
if gpdir == "" {
continue
}
if rel := search.InDir(dir, filepath.Join(gpdir, "src")); rel != "" && rel != "." {
path := filepath.ToSlash(rel)
return path, nil
}
}
reason := "outside GOPATH, module path must be specified"
if badPathErr != nil {
// return a different error message if the module was in GOPATH, but
// the module path determined above would be an invalid path.
reason = fmt.Sprintf("bad module path inferred from directory in GOPATH: %v", badPathErr)
}
msg := `cannot determine module path for source directory %s (%s)
Example usage:
'gop mod init example.com/m' to initialize a v0 or v1 module
'gop mod init example.com/m/v2' to initialize a v2 module
Run 'gop help mod init' for more information.
`
return "", fmt.Errorf(msg, dir, reason)
}
// CreateModFile initializes a new module by creating a go.mod file.
//
// If modPath is empty, CreateModFile will attempt to infer the path from the
// directory location within GOPATH.
//
// If a vendoring configuration file is present, CreateModFile will attempt to
// translate it to go.mod directives. The resulting build list may not be
// exactly the same as in the legacy configuration (for example, we can't get
// packages at multiple versions from the same module).
func CreateModFile(modPath string) {
modRoot = gopRoot
Init()
modFilePath := GopModFilePath()
if _, err := os.Stat(modFilePath); err == nil {
log.Fatalf("gop: %s already exists", modFilePath)
}
if modPath == "" {
var err error
modPath, err = findModulePath(modRoot)
if err != nil {
log.Fatalf("gop: %v", err)
}
}
fmt.Fprintf(os.Stderr, "gop: creating new gop.mod: module %s\n", modPath)
modFile = new(modfile.File)
modFile.AddModuleStmt(modPath)
addGopStmt() // Add the gop directive before converted module requirements.
WriteGopMod()
}
func Load() {
LoadModFile()
if modRoot == "" {
return
}
SyncGoMod()
if classModFile != nil && classModFile.Classfile != nil {
gengo.RegisterPkgFlags(classModFile.Classfile.ProjExt, gengo.PkgFlagGmx)
gengo.RegisterPkgFlags(classModFile.Classfile.WorkExt, gengo.PkgFlagSpx)
cl.RegisterClassFileType(classModFile.Classfile.ProjExt,
classModFile.Classfile.WorkExt, classModFile.Classfile.PkgPaths...)
}
}
// fixVersion returns a modfile.VersionFixer implemented using the Query function.
//
// It resolves commit hashes and branch names to versions,
// canonicalizes versions that appeared in early vgo drafts,
// and does nothing for versions that already appear to be canonical.
//
// The VersionFixer sets 'fixed' if it ever returns a non-canonical version.
func fixVersion(fixed *bool) modfile.VersionFixer {
return func(path, vers string) (resolved string, err error) {
// do nothing
return vers, nil
}
}
func fixGoVersion(fixed *bool) gomodfile.VersionFixer {
return func(path, vers string) (resolved string, err error) {
// do nothing
return vers, nil
}
}
// LoadModFile sets Target and, if there is a main module, parses the initial
// build list from its go.mod file.
//
// LoadModFile may make changes in memory, like adding a go directive and
// ensuring requirements are consistent. WriteGoMod should be called later to
// write changes out to disk or report errors in readonly mode.
//
// As a side-effect, LoadModFile may change cfg.BuildMod to "vendor" if
// -mod wasn't set explicitly and automatic vendoring should be enabled.
func LoadModFile() {
Init()
if modRoot == "" {
return
}
// If gop.mod does not exist, then modroot does not exist,
// and if go.mod exists then a copy of go.mod will be synchronized to gop.mod
gopmod := GopModFilePath()
gomod := GoModFilePath()
if _, err := os.Stat(gopmod); os.IsNotExist(err) {
if _, err := os.Stat(gomod); err == nil {
SyncGopMod()
}
return
}
data, err := modfetch.Read(gopmod)
if err != nil {
log.Fatalf("gop: %v", err)
}
var fixed bool
f, err := modfile.Parse(gopmod, data, fixVersion(&fixed))
if err != nil {
// Errors returned by modfile.Parse begin with file:line.
log.Fatalf("gop: errors parsing gop.mod:\n%s\n", err)
}
modFile = f
if f.Module == nil {
// No module declaration. Must add module path.
log.Fatalf("gop: no module declaration in gop.mod. To specify the module path:\n")
}
LoadClassFile()
}
// addGoStmt adds a gop directive to the gop.mod file if it does not already include one.
// The 'gop' version added, if any, is the latest version supported by this toolchain.
func addGopStmt() {
if modFile.Gop != nil && modFile.Gop.Version != "" {
return
}
version := env.MainVersion
if !modfile.GopVersionRE.MatchString(version) {
log.Fatalf("gop: unrecognized default version %q", version)
}
if err := modFile.AddGopStmt(version); err != nil {
log.Fatalf("gop: internal error: %v", err)
}
}
// WriteGopMod writes the current build list back to gop.mod.
func WriteGopMod() {
// If we aren't in a module, we don't have anywhere to write a go.mod file.
if modRoot == "" {
return
}
addGopStmt()
modFile.Cleanup()
new, err := modFile.Format()
if err != nil {
log.Fatalf("gop: %v", err)
}
errNoChange := errors.New("no update needed")
err = modfetch.Transform(GopModFilePath(), func(old []byte) ([]byte, error) {
if bytes.Equal(old, new) {
// The go.mod file is already equal to new, possibly as the result of some
// other process.
return nil, errNoChange
}
return new, nil
})
if err != nil && err != errNoChange {
log.Fatalf("gop: updating gop.mod: %v", err)
}
}
func getGoPath() string {
return os.Getenv("GOPATH")
}
func SyncGoMod() {
gomodPath := GoModFilePath()
gomod := &gomodfile.File{}
if _, err := os.Stat(gomodPath); err == nil {
data, err := modfetch.Read(gomodPath)
if err != nil {
log.Fatalln(err)
}
var fixed bool
gomod, err = gomodfile.Parse(gomodPath, data, fixGoVersion(&fixed))
if err != nil {
// Errors returned by modfile.Parse begin with file:line.
log.Fatalf("gop: errors parsing gop.mod:\n%s\n", err)
}
}
gomod.AddModuleStmt(modFile.Module.Mod.Path)
if modFile.Go != nil {
gomod.AddGoStmt(modFile.Go.Version)
}
for _, require := range modFile.Require {
gomod.AddRequire(require.Mod.Path, require.Mod.Version)
}
for _, replace := range modFile.Replace {
gomod.AddReplace(replace.Old.Path, replace.Old.Version, replace.New.Path, replace.New.Version)
}
for _, exclude := range modFile.Exclude {
gomod.AddExclude(exclude.Mod.Path, exclude.Mod.Version)
}
for _, retract := range modFile.Retract {
gomod.AddRetract(gomodfile.VersionInterval(retract.VersionInterval), retract.Rationale)
}
if classModFile != nil {
for _, require := range classModFile.Require {
gomod.AddRequire(require.Mod.Path, require.Mod.Version)
}
for _, replace := range classModFile.Replace {
gomod.AddReplace(replace.Old.Path, replace.Old.Version, replace.New.Path, replace.New.Version)
}
for _, exclude := range classModFile.Exclude {
gomod.AddExclude(exclude.Mod.Path, exclude.Mod.Version)
}
for _, retract := range classModFile.Retract {
gomod.AddRetract(gomodfile.VersionInterval(retract.VersionInterval), retract.Rationale)
}
}
gomod.Cleanup()
new, err := gomod.Format()
if err != nil {
log.Fatalf("gop: %v", err)
}
errNoChange := errors.New("no update needed")
err = modfetch.Transform(GoModFilePath(), func(old []byte) ([]byte, error) {
if bytes.Equal(old, new) {
// The go.mod file is already equal to new, possibly as the result of some
// other process.
return nil, errNoChange
}
return new, nil
})
if err != nil && err != errNoChange {
log.Fatalf("gop: updating gop.mod: %v", err)
}
}
func SyncGopMod() {
gomodPath := GoModFilePath()
gomod := &gomodfile.File{}
if _, err := os.Stat(gomodPath); err == nil {
data, err := modfetch.Read(gomodPath)
if err != nil {
log.Fatalln(err)
}
var fixed bool
gomod, err = gomodfile.Parse(gomodPath, data, fixGoVersion(&fixed))
if err != nil {
// Errors returned by modfile.Parse begin with file:line.
log.Fatalf("gop: errors parsing gop.mod:\n%s\n", err)
}
}
if modFile == nil {
modFile = &modfile.File{}
modFile.AddModuleStmt(gomod.Module.Mod.Path)
}
if gomod.Go != nil {
modFile.AddGoStmt(gomod.Go.Version)
}
for _, require := range gomod.Require {
modFile.AddRequire(require.Mod.Path, require.Mod.Version)
}
for _, replace := range gomod.Replace {
modFile.AddReplace(replace.Old.Path, replace.Old.Version, replace.New.Path, replace.New.Version)
}
for _, exclude := range gomod.Exclude {
modFile.AddExclude(exclude.Mod.Path, exclude.Mod.Version)
}
for _, retract := range gomod.Retract {
modFile.AddRetract(modfile.VersionInterval(retract.VersionInterval), retract.Rationale)
}
modFile.Cleanup()
new, err := modFile.Format()
if err != nil {
log.Fatalf("gop: %v", err)
}
errNoChange := errors.New("no update needed")
err = modfetch.Transform(GopModFilePath(), func(old []byte) ([]byte, error) {
if bytes.Equal(old, new) {
// The go.mod file is already equal to new, possibly as the result of some
// other process.
return nil, errNoChange
}
return new, nil
})
if err != nil && err != errNoChange {
log.Fatalf("gop: updating gop.mod: %v", err)
}
}
|
[
"\"GIT_TERMINAL_PROMPT\"",
"\"GIT_SSH\"",
"\"GIT_SSH_COMMAND\"",
"\"GOPATH\""
] |
[] |
[
"GIT_SSH",
"GIT_SSH_COMMAND",
"GIT_TERMINAL_PROMPT",
"GOPATH"
] |
[]
|
["GIT_SSH", "GIT_SSH_COMMAND", "GIT_TERMINAL_PROMPT", "GOPATH"]
|
go
| 4 | 0 | |
tools/third_party/pytest/testing/acceptance_test.py
|
import os
import sys
import types
import attr
import py
import pytest
from _pytest.compat import importlib_metadata
from _pytest.config import ExitCode
from _pytest.pathlib import symlink_or_skip
from _pytest.pytester import Testdir
def prepend_pythonpath(*dirs):
cur = os.getenv("PYTHONPATH")
if cur:
dirs += (cur,)
return os.pathsep.join(str(p) for p in dirs)
class TestGeneralUsage:
def test_config_error(self, testdir):
testdir.copy_example("conftest_usageerror/conftest.py")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(["*ERROR: hello"])
result.stdout.fnmatch_lines(["*pytest_unconfigure_called"])
def test_root_conftest_syntax_error(self, testdir):
testdir.makepyfile(conftest="raise SyntaxError\n")
result = testdir.runpytest()
result.stderr.fnmatch_lines(["*raise SyntaxError*"])
assert result.ret != 0
def test_early_hook_error_issue38_1(self, testdir):
testdir.makeconftest(
"""
def pytest_sessionstart():
0 / 0
"""
)
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines(
["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"]
)
result = testdir.runpytest(testdir.tmpdir, "--fulltrace")
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines(
["*INTERNALERROR*def pytest_sessionstart():*", "*INTERNALERROR*0 / 0*"]
)
def test_early_hook_configure_error_issue38(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
0 / 0
"""
)
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# here we get it on stderr
result.stderr.fnmatch_lines(
["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"]
)
def test_file_not_found(self, testdir):
result = testdir.runpytest("asd")
assert result.ret != 0
result.stderr.fnmatch_lines(["ERROR: file or directory not found: asd"])
def test_file_not_found_unconfigure_issue143(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
print("---configure")
def pytest_unconfigure():
print("---unconfigure")
"""
)
result = testdir.runpytest("-s", "asd")
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(["ERROR: file or directory not found: asd"])
result.stdout.fnmatch_lines(["*---configure", "*---unconfigure"])
def test_config_preparse_plugin_option(self, testdir):
testdir.makepyfile(
pytest_xyz="""
def pytest_addoption(parser):
parser.addoption("--xyz", dest="xyz", action="store")
"""
)
testdir.makepyfile(
test_one="""
def test_option(pytestconfig):
assert pytestconfig.option.xyz == "123"
"""
)
result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("load_cov_early", [True, False])
def test_early_load_setuptools_name(self, testdir, monkeypatch, load_cov_early):
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
testdir.makepyfile(mytestplugin1_module="")
testdir.makepyfile(mytestplugin2_module="")
testdir.makepyfile(mycov_module="")
testdir.syspathinsert()
loaded = []
@attr.s
class DummyEntryPoint:
name = attr.ib()
module = attr.ib()
group = "pytest11"
def load(self):
__import__(self.module)
loaded.append(self.name)
return sys.modules[self.module]
entry_points = [
DummyEntryPoint("myplugin1", "mytestplugin1_module"),
DummyEntryPoint("myplugin2", "mytestplugin2_module"),
DummyEntryPoint("mycov", "mycov_module"),
]
@attr.s
class DummyDist:
entry_points = attr.ib()
files = ()
def my_dists():
return (DummyDist(entry_points),)
monkeypatch.setattr(importlib_metadata, "distributions", my_dists)
params = ("-p", "mycov") if load_cov_early else ()
testdir.runpytest_inprocess(*params)
if load_cov_early:
assert loaded == ["mycov", "myplugin1", "myplugin2"]
else:
assert loaded == ["myplugin1", "myplugin2", "mycov"]
@pytest.mark.parametrize("import_mode", ["prepend", "append", "importlib"])
def test_assertion_rewrite(self, testdir, import_mode):
p = testdir.makepyfile(
"""
def test_this():
x = 0
assert x
"""
)
result = testdir.runpytest(p, "--import-mode={}".format(import_mode))
result.stdout.fnmatch_lines(["> assert x", "E assert 0"])
assert result.ret == 1
def test_nested_import_error(self, testdir):
p = testdir.makepyfile(
"""
import import_fails
def test_this():
assert import_fails.a == 1
"""
)
testdir.makepyfile(import_fails="import does_not_work")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"*No module named *does_not_work*",
]
)
assert result.ret == 2
def test_not_collectable_arguments(self, testdir):
p1 = testdir.makepyfile("")
p2 = testdir.makefile(".pyc", "123")
result = testdir.runpytest(p1, p2)
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(
[
"ERROR: not found: {}".format(p2),
"(no name {!r} in any of [[][]])".format(str(p2)),
"",
]
)
@pytest.mark.filterwarnings("default")
def test_better_reporting_on_conftest_load_failure(self, testdir):
"""Show a user-friendly traceback on conftest import failures (#486, #3332)"""
testdir.makepyfile("")
conftest = testdir.makeconftest(
"""
def foo():
import qwerty
foo()
"""
)
result = testdir.runpytest("--help")
result.stdout.fnmatch_lines(
"""
*--version*
*warning*conftest.py*
"""
)
result = testdir.runpytest()
exc_name = (
"ModuleNotFoundError" if sys.version_info >= (3, 6) else "ImportError"
)
assert result.stdout.lines == []
assert result.stderr.lines == [
"ImportError while loading conftest '{}'.".format(conftest),
"conftest.py:3: in <module>",
" foo()",
"conftest.py:2: in foo",
" import qwerty",
"E {}: No module named 'qwerty'".format(exc_name),
]
def test_early_skip(self, testdir):
testdir.mkdir("xyz")
testdir.makeconftest(
"""
import pytest
def pytest_collect_file():
pytest.skip("early")
"""
)
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*1 skip*"])
def test_issue88_initial_file_multinodes(self, testdir):
testdir.copy_example("issue88_initial_file_multinodes")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines(["*MyFile*test_issue88*", "*Module*test_issue88*"])
def test_issue93_initialnode_importing_capturing(self, testdir):
testdir.makeconftest(
"""
import sys
print("should not be seen")
sys.stderr.write("stder42\\n")
"""
)
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.no_fnmatch_line("*should not be seen*")
assert "stderr42" not in result.stderr.str()
def test_conftest_printing_shows_if_error(self, testdir):
testdir.makeconftest(
"""
print("should be seen")
assert 0
"""
)
result = testdir.runpytest()
assert result.ret != 0
assert "should be seen" in result.stdout.str()
def test_issue109_sibling_conftests_not_loaded(self, testdir):
sub1 = testdir.mkdir("sub1")
sub2 = testdir.mkdir("sub2")
sub1.join("conftest.py").write("assert 0")
result = testdir.runpytest(sub2)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
sub2.ensure("__init__.py")
p = sub2.ensure("test_hello.py")
result = testdir.runpytest(p)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result = testdir.runpytest(sub1)
assert result.ret == ExitCode.USAGE_ERROR
def test_directory_skipped(self, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_ignore_collect():
pytest.skip("intentional")
"""
)
testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_multiple_items_per_collector_byid(self, testdir):
c = testdir.makeconftest(
"""
import pytest
class MyItem(pytest.Item):
def runtest(self):
pass
class MyCollector(pytest.File):
def collect(self):
return [MyItem.from_parent(name="xyz", parent=self)]
def pytest_collect_file(path, parent):
if path.basename.startswith("conftest"):
return MyCollector.from_parent(fspath=path, parent=parent)
"""
)
result = testdir.runpytest(c.basename + "::" + "xyz")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 pass*"])
def test_skip_on_generated_funcarg_id(self, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_generate_tests(metafunc):
metafunc.parametrize('x', [3], ids=['hello-123'])
def pytest_runtest_setup(item):
print(item.keywords)
if 'hello-123' in item.keywords:
pytest.skip("hello")
assert 0
"""
)
p = testdir.makepyfile("""def test_func(x): pass""")
res = testdir.runpytest(p)
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 skipped*"])
def test_direct_addressing_selects(self, testdir):
p = testdir.makepyfile(
"""
def pytest_generate_tests(metafunc):
metafunc.parametrize('i', [1, 2], ids=["1", "2"])
def test_func(i):
pass
"""
)
res = testdir.runpytest(p.basename + "::" + "test_func[1]")
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_direct_addressing_notfound(self, testdir):
p = testdir.makepyfile(
"""
def test_func():
pass
"""
)
res = testdir.runpytest(p.basename + "::" + "test_notfound")
assert res.ret
res.stderr.fnmatch_lines(["*ERROR*not found*"])
def test_docstring_on_hookspec(self):
from _pytest import hookspec
for name, value in vars(hookspec).items():
if name.startswith("pytest_"):
assert value.__doc__, "no docstring for %s" % name
def test_initialization_error_issue49(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
x
"""
)
result = testdir.runpytest()
assert result.ret == 3 # internal error
result.stderr.fnmatch_lines(["INTERNAL*pytest_configure*", "INTERNAL*x*"])
assert "sessionstarttime" not in result.stderr.str()
@pytest.mark.parametrize("lookfor", ["test_fun.py::test_a"])
def test_issue134_report_error_when_collecting_member(self, testdir, lookfor):
testdir.makepyfile(
test_fun="""
def test_a():
pass
def"""
)
result = testdir.runpytest(lookfor)
result.stdout.fnmatch_lines(["*SyntaxError*"])
if "::" in lookfor:
result.stderr.fnmatch_lines(["*ERROR*"])
assert result.ret == 4 # usage error only if item not found
def test_report_all_failed_collections_initargs(self, testdir):
testdir.makeconftest(
"""
from _pytest.config import ExitCode
def pytest_sessionfinish(exitstatus):
assert exitstatus == ExitCode.USAGE_ERROR
print("pytest_sessionfinish_called")
"""
)
testdir.makepyfile(test_a="def", test_b="def")
result = testdir.runpytest("test_a.py::a", "test_b.py::b")
result.stderr.fnmatch_lines(["*ERROR*test_a.py::a*", "*ERROR*test_b.py::b*"])
result.stdout.fnmatch_lines(["pytest_sessionfinish_called"])
assert result.ret == ExitCode.USAGE_ERROR
def test_namespace_import_doesnt_confuse_import_hook(self, testdir):
"""Ref #383.
Python 3.3's namespace package messed with our import hooks.
Importing a module that didn't exist, even if the ImportError was
gracefully handled, would make our test crash.
"""
testdir.mkdir("not_a_package")
p = testdir.makepyfile(
"""
try:
from not_a_package import doesnt_exist
except ImportError:
# We handle the import error gracefully here
pass
def test_whatever():
pass
"""
)
res = testdir.runpytest(p.basename)
assert res.ret == 0
def test_unknown_option(self, testdir):
result = testdir.runpytest("--qwlkej")
result.stderr.fnmatch_lines(
"""
*unrecognized*
"""
)
def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
monkeypatch.setattr("inspect.getsourcelines", None)
p = testdir.makepyfile(
"""
def raise_error(obj):
raise OSError('source code not available')
import inspect
inspect.getsourcelines = raise_error
def test_foo(invalid_fixture):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines(
["*source code not available*", "E*fixture 'invalid_fixture' not found"]
)
def test_plugins_given_as_strings(self, tmpdir, monkeypatch, _sys_snapshot):
"""Test that str values passed to main() as `plugins` arg are
interpreted as module names to be imported and registered (#855)."""
with pytest.raises(ImportError) as excinfo:
pytest.main([str(tmpdir)], plugins=["invalid.module"])
assert "invalid" in str(excinfo.value)
p = tmpdir.join("test_test_plugins_given_as_strings.py")
p.write("def test_foo(): pass")
mod = types.ModuleType("myplugin")
monkeypatch.setitem(sys.modules, "myplugin", mod)
assert pytest.main(args=[str(tmpdir)], plugins=["myplugin"]) == 0
def test_parametrized_with_bytes_regex(self, testdir):
p = testdir.makepyfile(
"""
import re
import pytest
@pytest.mark.parametrize('r', [re.compile(b'foo')])
def test_stuff(r):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines(["*1 passed*"])
def test_parametrized_with_null_bytes(self, testdir):
"""Test parametrization with values that contain null bytes and unicode characters (#2644, #2957)"""
p = testdir.makepyfile(
"""\
import pytest
@pytest.mark.parametrize("data", [b"\\x00", "\\x00", 'ação'])
def test_foo(data):
assert data
"""
)
res = testdir.runpytest(p)
res.assert_outcomes(passed=3)
class TestInvocationVariants:
def test_earlyinit(self, testdir):
p = testdir.makepyfile(
"""
import pytest
assert hasattr(pytest, 'mark')
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_pydoc(self, testdir):
for name in ("py.test", "pytest"):
result = testdir.runpython_c("import {};help({})".format(name, name))
assert result.ret == 0
s = result.stdout.str()
assert "MarkGenerator" in s
def test_import_star_py_dot_test(self, testdir):
p = testdir.makepyfile(
"""
from py.test import *
#collect
#cmdline
#Item
# assert collect.Item is Item
# assert collect.Collector is Collector
main
skip
xfail
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_import_star_pytest(self, testdir):
p = testdir.makepyfile(
"""
from pytest import *
#Item
#File
main
skip
xfail
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_double_pytestcmdline(self, testdir):
p = testdir.makepyfile(
run="""
import pytest
pytest.main()
pytest.main()
"""
)
testdir.makepyfile(
"""
def test_hello():
pass
"""
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(["*1 passed*", "*1 passed*"])
def test_python_minus_m_invocation_ok(self, testdir):
p1 = testdir.makepyfile("def test_hello(): pass")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
def test_python_minus_m_invocation_fail(self, testdir):
p1 = testdir.makepyfile("def test_fail(): 0/0")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 1
def test_python_pytest_package(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_equivalence_pytest_pydottest(self) -> None:
# Type ignored because `py.test` is not and will not be typed.
assert pytest.main == py.test.cmdline.main # type: ignore[attr-defined]
def test_invoke_with_invalid_type(self) -> None:
with pytest.raises(
TypeError, match="expected to be a list of strings, got: '-h'"
):
pytest.main("-h") # type: ignore[arg-type]
def test_invoke_with_path(self, tmpdir: py.path.local, capsys) -> None:
retcode = pytest.main(tmpdir)
assert retcode == ExitCode.NO_TESTS_COLLECTED
out, err = capsys.readouterr()
def test_invoke_plugin_api(self, capsys):
class MyPlugin:
def pytest_addoption(self, parser):
parser.addoption("--myopt")
pytest.main(["-h"], plugins=[MyPlugin()])
out, err = capsys.readouterr()
assert "--myopt" in out
def test_pyargs_importerror(self, testdir, monkeypatch):
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("raise ImportError")
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret != 0
result.stdout.fnmatch_lines(["collected*0*items*/*1*error"])
def test_pyargs_only_imported_once(self, testdir):
pkg = testdir.mkpydir("foo")
pkg.join("test_foo.py").write("print('hello from test_foo')\ndef test(): pass")
pkg.join("conftest.py").write(
"def pytest_configure(config): print('configuring')"
)
result = testdir.runpytest("--pyargs", "foo.test_foo", "-s", syspathinsert=True)
# should only import once
assert result.outlines.count("hello from test_foo") == 1
# should only configure once
assert result.outlines.count("configuring") == 1
def test_pyargs_filename_looks_like_module(self, testdir):
testdir.tmpdir.join("conftest.py").ensure()
testdir.tmpdir.join("t.py").write("def test(): pass")
result = testdir.runpytest("--pyargs", "t.py")
assert result.ret == ExitCode.OK
def test_cmdline_python_package(self, testdir, monkeypatch):
import warnings
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("def test_hello(): pass")
path.join("test_world.py").write("def test_world(): pass")
result = testdir.runpytest("--pyargs", "tpkg")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
empty_package = testdir.mkpydir("empty_package")
monkeypatch.setenv("PYTHONPATH", str(empty_package), prepend=os.pathsep)
# the path which is not a package raises a warning on pypy;
# no idea why only pypy and not normal python warn about it here
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
result = testdir.runpytest("--pyargs", ".")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
monkeypatch.setenv("PYTHONPATH", str(testdir), prepend=os.pathsep)
result = testdir.runpytest("--pyargs", "tpkg.test_missing", syspathinsert=True)
assert result.ret != 0
result.stderr.fnmatch_lines(["*not*found*test_missing*"])
def test_cmdline_python_namespace_package(self, testdir, monkeypatch):
"""Test --pyargs option with namespace packages (#1567).
Ref: https://packaging.python.org/guides/packaging-namespace-packages/
"""
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
search_path = []
for dirname in "hello", "world":
d = testdir.mkdir(dirname)
search_path.append(d)
ns = d.mkdir("ns_pkg")
ns.join("__init__.py").write(
"__import__('pkg_resources').declare_namespace(__name__)"
)
lib = ns.mkdir(dirname)
lib.ensure("__init__.py")
lib.join("test_{}.py".format(dirname)).write(
"def test_{}(): pass\ndef test_other():pass".format(dirname)
)
# The structure of the test directory is now:
# .
# ├── hello
# │ └── ns_pkg
# │ ├── __init__.py
# │ └── hello
# │ ├── __init__.py
# │ └── test_hello.py
# └── world
# └── ns_pkg
# ├── __init__.py
# └── world
# ├── __init__.py
# └── test_world.py
# NOTE: the different/reversed ordering is intentional here.
monkeypatch.setenv("PYTHONPATH", prepend_pythonpath(*search_path))
for p in search_path:
monkeypatch.syspath_prepend(p)
# mixed module and filenames:
monkeypatch.chdir("world")
result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "ns_pkg/world")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"test_hello.py::test_hello*PASSED*",
"test_hello.py::test_other*PASSED*",
"ns_pkg/world/test_world.py::test_world*PASSED*",
"ns_pkg/world/test_world.py::test_other*PASSED*",
"*4 passed in*",
]
)
# specify tests within a module
testdir.chdir()
result = testdir.runpytest(
"--pyargs", "-v", "ns_pkg.world.test_world::test_other"
)
assert result.ret == 0
result.stdout.fnmatch_lines(
["*test_world.py::test_other*PASSED*", "*1 passed*"]
)
def test_invoke_test_and_doctestmodules(self, testdir):
p = testdir.makepyfile(
"""
def test():
pass
"""
)
result = testdir.runpytest(str(p) + "::test", "--doctest-modules")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_cmdline_python_package_symlink(self, testdir, monkeypatch):
"""
--pyargs with packages with path containing symlink can have conftest.py in
their package (#2985)
"""
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
dirname = "lib"
d = testdir.mkdir(dirname)
foo = d.mkdir("foo")
foo.ensure("__init__.py")
lib = foo.mkdir("bar")
lib.ensure("__init__.py")
lib.join("test_bar.py").write(
"def test_bar(): pass\ndef test_other(a_fixture):pass"
)
lib.join("conftest.py").write(
"import pytest\[email protected]\ndef a_fixture():pass"
)
d_local = testdir.mkdir("symlink_root")
symlink_location = d_local / "lib"
symlink_or_skip(d, symlink_location, target_is_directory=True)
# The structure of the test directory is now:
# .
# ├── symlink_root
# │ └── lib -> ../lib
# └── lib
# └── foo
# ├── __init__.py
# └── bar
# ├── __init__.py
# ├── conftest.py
# └── test_bar.py
# NOTE: the different/reversed ordering is intentional here.
search_path = ["lib", os.path.join("symlink_root", "lib")]
monkeypatch.setenv("PYTHONPATH", prepend_pythonpath(*search_path))
for p in search_path:
monkeypatch.syspath_prepend(p)
# module picked up in symlink-ed directory:
# It picks up symlink_root/lib/foo/bar (symlink) via sys.path.
result = testdir.runpytest("--pyargs", "-v", "foo.bar")
testdir.chdir()
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"symlink_root/lib/foo/bar/test_bar.py::test_bar PASSED*",
"symlink_root/lib/foo/bar/test_bar.py::test_other PASSED*",
"*2 passed*",
]
)
def test_cmdline_python_package_not_exists(self, testdir):
result = testdir.runpytest("--pyargs", "tpkgwhatv")
assert result.ret
result.stderr.fnmatch_lines(["ERROR*module*or*package*not*found*"])
@pytest.mark.xfail(reason="decide: feature or bug")
def test_noclass_discovery_if_not_testcase(self, testdir):
testpath = testdir.makepyfile(
"""
import unittest
class TestHello(object):
def test_hello(self):
assert self.attr
class RealTest(unittest.TestCase, TestHello):
attr = 42
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1)
def test_doctest_id(self, testdir):
testdir.makefile(
".txt",
"""
>>> x=3
>>> x
4
""",
)
testid = "test_doctest_id.txt::test_doctest_id.txt"
expected_lines = [
"*= FAILURES =*",
"*_ ?doctest? test_doctest_id.txt _*",
"FAILED test_doctest_id.txt::test_doctest_id.txt",
"*= 1 failed in*",
]
result = testdir.runpytest(testid, "-rf", "--tb=short")
result.stdout.fnmatch_lines(expected_lines)
# Ensure that re-running it will still handle it as
# doctest.DocTestFailure, which was not the case before when
# re-importing doctest, but not creating a new RUNNER_CLASS.
result = testdir.runpytest(testid, "-rf", "--tb=short")
result.stdout.fnmatch_lines(expected_lines)
def test_core_backward_compatibility(self):
"""Test backward compatibility for get_plugin_manager function. See #787."""
import _pytest.config
assert (
type(_pytest.config.get_plugin_manager())
is _pytest.config.PytestPluginManager
)
def test_has_plugin(self, request):
"""Test hasplugin function of the plugin manager (#932)."""
assert request.config.pluginmanager.hasplugin("python")
class TestDurations:
source = """
from _pytest import timing
def test_something():
pass
def test_2():
timing.sleep(0.010)
def test_1():
timing.sleep(0.002)
def test_3():
timing.sleep(0.020)
"""
def test_calls(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
["*durations*", "*call*test_3*", "*call*test_2*"]
)
result.stdout.fnmatch_lines(
["(8 durations < 0.005s hidden. Use -vv to show these durations.)"]
)
def test_calls_show_2(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=2")
assert result.ret == 0
lines = result.stdout.get_lines_after("*slowest*durations*")
assert "4 passed" in lines[2]
def test_calls_showall(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=0")
assert result.ret == 0
tested = "3"
for x in tested:
for y in ("call",): # 'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found {} {}".format(x, y))
def test_calls_showall_verbose(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=0", "-vv")
assert result.ret == 0
for x in "123":
for y in ("call",): # 'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found {} {}".format(x, y))
def test_with_deselected(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=2", "-k test_3")
assert result.ret == 0
result.stdout.fnmatch_lines(["*durations*", "*call*test_3*"])
def test_with_failing_collection(self, testdir, mock_timing):
testdir.makepyfile(self.source)
testdir.makepyfile(test_collecterror="""xyz""")
result = testdir.runpytest_inprocess("--durations=2", "-k test_1")
assert result.ret == 2
result.stdout.fnmatch_lines(["*Interrupted: 1 error during collection*"])
# Collection errors abort test execution, therefore no duration is
# output
result.stdout.no_fnmatch_line("*duration*")
def test_with_not(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("-k not 1")
assert result.ret == 0
class TestDurationsWithFixture:
source = """
import pytest
from _pytest import timing
@pytest.fixture
def setup_fixt():
timing.sleep(2)
def test_1(setup_fixt):
timing.sleep(5)
"""
def test_setup_function(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
"""
*durations*
5.00s call *test_1*
2.00s setup *test_1*
"""
)
def test_zipimport_hook(testdir, tmpdir):
"""Test package loader is being used correctly (see #1837)."""
zipapp = pytest.importorskip("zipapp")
testdir.tmpdir.join("app").ensure(dir=1)
testdir.makepyfile(
**{
"app/foo.py": """
import pytest
def main():
pytest.main(['--pyargs', 'foo'])
"""
}
)
target = tmpdir.join("foo.zip")
zipapp.create_archive(str(testdir.tmpdir.join("app")), str(target), main="foo:main")
result = testdir.runpython(target)
assert result.ret == 0
result.stderr.fnmatch_lines(["*not found*foo*"])
result.stdout.no_fnmatch_line("*INTERNALERROR>*")
def test_import_plugin_unicode_name(testdir):
testdir.makepyfile(myplugin="")
testdir.makepyfile("def test(): pass")
testdir.makeconftest("pytest_plugins = ['myplugin']")
r = testdir.runpytest()
assert r.ret == 0
def test_pytest_plugins_as_module(testdir):
"""Do not raise an error if pytest_plugins attribute is a module (#3899)"""
testdir.makepyfile(
**{
"__init__.py": "",
"pytest_plugins.py": "",
"conftest.py": "from . import pytest_plugins",
"test_foo.py": "def test(): pass",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 passed in *"])
def test_deferred_hook_checking(testdir):
"""Check hooks as late as possible (#1821)."""
testdir.syspathinsert()
testdir.makepyfile(
**{
"plugin.py": """
class Hooks(object):
def pytest_my_hook(self, config):
pass
def pytest_configure(config):
config.pluginmanager.add_hookspecs(Hooks)
""",
"conftest.py": """
pytest_plugins = ['plugin']
def pytest_my_hook(config):
return 40
""",
"test_foo.py": """
def test(request):
assert request.config.hook.pytest_my_hook(config=request.config) == [40]
""",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_fixture_values_leak(testdir):
"""Ensure that fixture objects are properly destroyed by the garbage collector at the end of their expected
life-times (#2981).
"""
testdir.makepyfile(
"""
import attr
import gc
import pytest
import weakref
@attr.s
class SomeObj(object):
name = attr.ib()
fix_of_test1_ref = None
session_ref = None
@pytest.fixture(scope='session')
def session_fix():
global session_ref
obj = SomeObj(name='session-fixture')
session_ref = weakref.ref(obj)
return obj
@pytest.fixture
def fix(session_fix):
global fix_of_test1_ref
obj = SomeObj(name='local-fixture')
fix_of_test1_ref = weakref.ref(obj)
return obj
def test1(fix):
assert fix_of_test1_ref() is fix
def test2():
gc.collect()
# fixture "fix" created during test1 must have been destroyed by now
assert fix_of_test1_ref() is None
"""
)
# Running on subprocess does not activate the HookRecorder
# which holds itself a reference to objects in case of the
# pytest_assert_reprcompare hook
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["* 2 passed *"])
def test_fixture_order_respects_scope(testdir):
"""Ensure that fixtures are created according to scope order (#2405)."""
testdir.makepyfile(
"""
import pytest
data = {}
@pytest.fixture(scope='module')
def clean_data():
data.clear()
@pytest.fixture(autouse=True)
def add_data():
data.update(value=True)
@pytest.mark.usefixtures('clean_data')
def test_value():
assert data.get('value')
"""
)
result = testdir.runpytest()
assert result.ret == 0
def test_frame_leak_on_failing_test(testdir):
"""Pytest would leak garbage referencing the frames of tests that failed
that could never be reclaimed (#2798).
Unfortunately it was not possible to remove the actual circles because most of them
are made of traceback objects which cannot be weakly referenced. Those objects at least
can be eventually claimed by the garbage collector.
"""
testdir.makepyfile(
"""
import gc
import weakref
class Obj:
pass
ref = None
def test1():
obj = Obj()
global ref
ref = weakref.ref(obj)
assert 0
def test2():
gc.collect()
assert ref() is None
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 failed, 1 passed in*"])
def test_fixture_mock_integration(testdir):
"""Test that decorators applied to fixture are left working (#3774)"""
p = testdir.copy_example("acceptance/fixture_mock_integration.py")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_usage_error_code(testdir):
result = testdir.runpytest("-unknown-option-")
assert result.ret == ExitCode.USAGE_ERROR
@pytest.mark.filterwarnings("default")
def test_warn_on_async_function(testdir):
# In the below we .close() the coroutine only to avoid
# "RuntimeWarning: coroutine 'test_2' was never awaited"
# which messes with other tests.
testdir.makepyfile(
test_async="""
async def test_1():
pass
async def test_2():
pass
def test_3():
coro = test_2()
coro.close()
return coro
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"test_async.py::test_1",
"test_async.py::test_2",
"test_async.py::test_3",
"*async def functions are not natively supported*",
"*3 skipped, 3 warnings in*",
]
)
# ensure our warning message appears only once
assert (
result.stdout.str().count("async def functions are not natively supported") == 1
)
@pytest.mark.filterwarnings("default")
@pytest.mark.skipif(
sys.version_info < (3, 6), reason="async gen syntax available in Python 3.6+"
)
def test_warn_on_async_gen_function(testdir):
testdir.makepyfile(
test_async="""
async def test_1():
yield
async def test_2():
yield
def test_3():
return test_2()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"test_async.py::test_1",
"test_async.py::test_2",
"test_async.py::test_3",
"*async def functions are not natively supported*",
"*3 skipped, 3 warnings in*",
]
)
# ensure our warning message appears only once
assert (
result.stdout.str().count("async def functions are not natively supported") == 1
)
def test_pdb_can_be_rewritten(testdir):
testdir.makepyfile(
**{
"conftest.py": """
import pytest
pytest.register_assert_rewrite("pdb")
""",
"__init__.py": "",
"pdb.py": """
def check():
assert 1 == 2
""",
"test_pdb.py": """
def test():
import pdb
assert pdb.check()
""",
}
)
# Disable debugging plugin itself to avoid:
# > INTERNALERROR> AttributeError: module 'pdb' has no attribute 'set_trace'
result = testdir.runpytest_subprocess("-p", "no:debugging", "-vv")
result.stdout.fnmatch_lines(
[
" def check():",
"> assert 1 == 2",
"E assert 1 == 2",
"E +1",
"E -2",
"",
"pdb.py:2: AssertionError",
"*= 1 failed in *",
]
)
assert result.ret == 1
def test_tee_stdio_captures_and_live_prints(testdir):
testpath = testdir.makepyfile(
"""
import sys
def test_simple():
print ("@this is stdout@")
print ("@this is stderr@", file=sys.stderr)
"""
)
result = testdir.runpytest_subprocess(
testpath,
"--capture=tee-sys",
"--junitxml=output.xml",
"-o",
"junit_logging=all",
)
# ensure stdout/stderr were 'live printed'
result.stdout.fnmatch_lines(["*@this is stdout@*"])
result.stderr.fnmatch_lines(["*@this is stderr@*"])
# now ensure the output is in the junitxml
with open(os.path.join(testdir.tmpdir.strpath, "output.xml")) as f:
fullXml = f.read()
assert "@this is stdout@\n" in fullXml
assert "@this is stderr@\n" in fullXml
@pytest.mark.skipif(
sys.platform == "win32",
reason="Windows raises `OSError: [Errno 22] Invalid argument` instead",
)
def test_no_brokenpipeerror_message(testdir: Testdir) -> None:
"""Ensure that the broken pipe error message is supressed.
In some Python versions, it reaches sys.unraisablehook, in others
a BrokenPipeError exception is propagated, but either way it prints
to stderr on shutdown, so checking nothing is printed is enough.
"""
popen = testdir.popen((*testdir._getpytestargs(), "--help"))
popen.stdout.close()
ret = popen.wait()
assert popen.stderr.read() == b""
assert ret == 1
|
[] |
[] |
[
"PYTHONPATH"
] |
[]
|
["PYTHONPATH"]
|
python
| 1 | 0 | |
360agent/plugins/vms.py
|
from __future__ import print_function
import re, sys, os
import libvirt
import libxml2
import time
import plugins
import psutil
class Plugin(plugins.BasePlugin):
__name__ = 'vms'
def run(self, config):
'''
Using the libvirt API to fetch statistics from guests
running KVM, QEMU, Xen, Virtuozzo, VMWare ESX, LXC,
BHyve and more
'''
results = {}
last_value = {}
prev_cache = self.get_agent_cache() # Get absolute values from previous check
uri = os.getenv("uri", "qemu:///system")
values = self.fetch_values(uri)
deltas = {}
for key, value in values.items():
deltas[key] = {}
for subkey, subvalue in value.items():
if subkey == 'mem_bytes' or subkey == 'soft_limit_bytes' or subkey == 'min_guarantee_bytes' or subkey == 'hard_limit_bytes':
deltas[key][subkey] = value[subkey]
else:
deltas[key][subkey] = self.absolute_to_per_second('%s_%s' % (key, subkey), float(subvalue), prev_cache)
last_value['%s_%s' % (key, subkey)] = float(value[subkey])
last_value['ts'] = time.time()
self.set_agent_cache(last_value)
return deltas
def canon(self, name):
return re.sub(r"[^a-zA-Z0-9_]", "_", name)
def get_ifaces(self, dom):
xml = dom.XMLDesc(0)
doc = None
try:
doc = libxml2.parseDoc(xml)
except:
return []
ctx = doc.xpathNewContext()
ifaces = []
try:
ret = ctx.xpathEval("/domain/devices/interface")
for node in ret:
devdst = None
for child in node.children:
if child.name == "target":
devdst = child.prop("dev")
if devdst == None:
continue
ifaces.append(devdst)
finally:
if ctx != None:
ctx.xpathFreeContext()
if doc != None:
doc.freeDoc()
return ifaces
def get_memtune(self, dom):
memtune = { 'min_guarantee': 0, 'soft_limit': 0, 'hard_limit': 0 }
xml = dom.XMLDesc(0)
try:
doc = libxml2.parseDoc(xml)
except:
return []
ctx = doc.xpathNewContext()
try:
for key in memtune:
ret = ctx.xpathEval("/domain/memtune/%s" % key)
try:
for child in ret[0].children:
memtune[key] = int(child.content)
break
except IndexError:
# key not found in xml
pass
finally:
if ctx != None:
ctx.xpathFreeContext()
if doc != None:
doc.freeDoc()
return memtune
def fetch_values(self, uri):
conn = libvirt.openReadOnly(uri)
ids = conn.listDomainsID()
results = {}
for id in ids:
data = {}
data['net_rx_bytes'] = 0
data['net_tx_bytes'] = 0
try:
dom = conn.lookupByID(id)
name = dom.name()
except libvirt.libvirtError as err:
print("Id: %s: %s" % (id, err), file=sys.stderr)
continue
if name == "Domain-0":
continue
ifaces = self.get_ifaces(dom)
for iface in ifaces:
try:
stats = dom.interfaceStats(iface)
data['net_rx_bytes'] += stats[0]
data['net_tx_bytes'] += stats[4]
except:
print >>sys.stderr, "Cannot get ifstats for '%s' on '%s'" % (iface, name)
cputime = float(dom.info()[4])
cputime_percentage = 1.0e-7 * cputime
data['cpu'] = cputime_percentage
try:
data['cpu_percentage'] = cputime_percentage / psutil.cpu_count()
except Exception as e:
pass
maxmem, mem = dom.info()[1:3]
mem *= 1024
maxmem *= 1024
data['mem_bytes'] = mem
memtune = self.get_memtune(dom)
data['min_guarantee_bytes'] = memtune['min_guarantee'] * 1024
data['hard_limit_bytes'] = memtune['hard_limit'] * 1024
data['soft_limit_bytes'] = memtune['soft_limit'] * 1024
data['disk_rd_bytes'] = 0
data['disk_wr_bytes'] = 0
data['disk_wr_req'] = 0
data['disk_rd_req'] = 0
try:
dom = conn.lookupByID(id)
name = dom.name()
except libvirt.libvirtError as err:
print("Id: %s: %s" % (id, err), file=sys.stderr)
continue
if name == "Domain-0":
continue
disks = self.get_disks(dom)
for disk in disks:
try:
rd_req, rd_bytes, wr_req, wr_bytes, errs = dom.blockStats(disk)
data['disk_rd_bytes'] += rd_bytes
data['disk_wr_bytes'] += wr_bytes
data['disk_rd_req'] += rd_req
data['disk_wr_req'] += wr_req
except TypeError:
print >>sys.stderr, "Cannot get blockstats for '%s' on '%s'" % (disk, name)
results[self.canon(name)] = data
return results
def get_disks(self, dom):
xml = dom.XMLDesc(0)
doc = None
try:
doc = libxml2.parseDoc(xml)
except:
return []
ctx = doc.xpathNewContext()
disks = []
try:
ret = ctx.xpathEval("/domain/devices/disk")
for node in ret:
devdst = None
for child in node.children:
if child.name == "target":
devdst = child.prop("dev")
if devdst == None:
continue
disks.append(devdst)
finally:
if ctx != None:
ctx.xpathFreeContext()
if doc != None:
doc.freeDoc()
return disks
if __name__ == '__main__':
Plugin().execute()
|
[] |
[] |
[
"uri"
] |
[]
|
["uri"]
|
python
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Apache Libcloud documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 31 12:16:27 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import datetime
import subprocess
from sphinx.environment import BuildEnvironment
from sphinx.ext.autodoc import AutoDirective
from sphinx.ext.autodoc import AutodocReporter
from sphinx.domains.python import PythonDomain
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.abspath(BASE_DIR)
# Detect if we are running on read the docs
on_rtd = os.environ.get('READTHEDOCS', '').lower() == 'true'
on_travis = os.environ.get('TRAVIS', '').lower() == 'true'
if on_rtd:
cmd = 'sphinx-apidoc -d 4 -o apidocs/ ../libcloud/'
subprocess.call(cmd, shell=True)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
now = datetime.datetime.utcnow()
project = u'Apache Libcloud'
copyright = u'Copyright (C) 2009 - %s The Apache Software Foundation. Apache Libcloud, Libcloud, Apache, the Apache feather, and the Apache Libcloud project logo are trademarks of the Apache Software Foundation. All other marks mentioned may be trademarks or registered trademarks of their respective owners' % (now.year)
html_show_sphinx = False
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.2.0'
# The full version, including alpha/beta/rc tags.
release = '3.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_build',
'*/_*.rst'
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
RTD_NEW_THEME = True
else:
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_static/images/']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ApacheLibclouddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ApacheLibcloud.tex', u'Apache Libcloud Documentation',
u'The Apache Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'apachelibcloud', u'Apache Libcloud Documentation',
[u'The Apache Software Foundation'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ApacheLibcloud', u'Apache Libcloud Documentation',
u'The Apache Software Foundation', 'ApacheLibcloud', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autoclass_content = 'both'
# Note: For now we ignore sphinx-autodoc warnings since there are too many
# and we want at least documentation (not docstring) related warnings to be
# reported, treated as errors and fixed.
def noop(*args, **kwargs):
pass
def mock_warning(self, *args, **kwargs):
# We re-write warning as info (level 1)
return self.system_message(1, *args, **kwargs)
original_warn_node = BuildEnvironment.warn_node
def ignore_more_than_one_target_found_errors(self, msg, node):
if 'more than one target found' in msg:
return None
return original_warn_node(self, msg, node)
# Monkey patch the original methods
AutoDirective.warn = noop
AutodocReporter.warning = mock_warning
BuildEnvironment.warn_node = ignore_more_than_one_target_found_errors
# Ignore "more than one target found for cross-reference" errors which are false
# positives
class PatchedPythonDomain(PythonDomain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if 'refspecific' in node:
del node['refspecific']
return super(PatchedPythonDomain, self).resolve_xref(
env, fromdocname, builder, typ, target, node, contnode)
def setup(sphinx):
sphinx.override_domain(PatchedPythonDomain)
|
[] |
[] |
[
"TRAVIS",
"READTHEDOCS"
] |
[]
|
["TRAVIS", "READTHEDOCS"]
|
python
| 2 | 0 | |
bccsp/pkcs11/impl_test.go
|
// +build pkcs11
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package pkcs11
import (
"bytes"
"crypto"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/sha512"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"fmt"
"hash"
"math/big"
"net"
"os"
"strings"
"testing"
"time"
"deepchain/bccsp"
"deepchain/bccsp/signer"
"deepchain/bccsp/sw"
"deepchain/bccsp/utils"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/sha3"
)
var (
currentKS bccsp.KeyStore
currentBCCSP bccsp.BCCSP
currentTestConfig testConfig
)
type testConfig struct {
securityLevel int
hashFamily string
softVerify bool
immutable bool
}
func TestMain(m *testing.M) {
ks, err := sw.NewFileBasedKeyStore(nil, os.TempDir(), false)
if err != nil {
fmt.Printf("Failed initiliazing KeyStore [%s]", err)
os.Exit(-1)
}
currentKS = ks
lib, pin, label := FindPKCS11Lib()
tests := []testConfig{
{256, "SHA2", true, false},
{256, "SHA3", false, false},
{384, "SHA2", false, false},
{384, "SHA3", false, false},
{384, "SHA3", true, false},
}
if strings.Contains(lib, "softhsm") {
tests = append(tests, []testConfig{
{256, "SHA2", true, false},
{256, "SHA2", true, true},
}...)
}
opts := PKCS11Opts{
Library: lib,
Label: label,
Pin: pin,
}
for _, config := range tests {
var err error
currentTestConfig = config
opts.HashFamily = config.hashFamily
opts.SecLevel = config.securityLevel
opts.SoftVerify = config.softVerify
opts.Immutable = config.immutable
fmt.Printf("Immutable = [%v]", opts.Immutable)
currentBCCSP, err = New(opts, currentKS)
if err != nil {
fmt.Printf("Failed initiliazing BCCSP at [%+v]: [%s]", opts, err)
os.Exit(-1)
}
ret := m.Run()
if ret != 0 {
fmt.Printf("Failed testing at [%+v]", opts)
os.Exit(-1)
}
}
os.Exit(0)
}
func TestNew(t *testing.T) {
opts := PKCS11Opts{
HashFamily: "SHA2",
SecLevel: 256,
SoftVerify: false,
Library: "lib",
Label: "ForFabric",
Pin: "98765432",
}
// Setup PKCS11 library and provide initial set of values
lib, _, _ := FindPKCS11Lib()
opts.Library = lib
// Test for nil keystore
_, err := New(opts, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid bccsp.KeyStore instance. It must be different from nil.")
// Test for invalid PKCS11 loadLib
opts.Library = ""
_, err = New(opts, currentKS)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Failed initializing PKCS11 library")
}
func TestFindPKCS11LibEnvVars(t *testing.T) {
const (
dummy_PKCS11_LIB = "/usr/lib/pkcs11"
dummy_PKCS11_PIN = "98765432"
dummy_PKCS11_LABEL = "testing"
)
// Set environment variables used for test and preserve
// original values for restoration after test completion
orig_PKCS11_LIB := os.Getenv("PKCS11_LIB")
os.Setenv("PKCS11_LIB", dummy_PKCS11_LIB)
orig_PKCS11_PIN := os.Getenv("PKCS11_PIN")
os.Setenv("PKCS11_PIN", dummy_PKCS11_PIN)
orig_PKCS11_LABEL := os.Getenv("PKCS11_LABEL")
os.Setenv("PKCS11_LABEL", dummy_PKCS11_LABEL)
lib, pin, label := FindPKCS11Lib()
assert.EqualValues(t, dummy_PKCS11_LIB, lib, "FindPKCS11Lib did not return expected library")
assert.EqualValues(t, dummy_PKCS11_PIN, pin, "FindPKCS11Lib did not return expected pin")
assert.EqualValues(t, dummy_PKCS11_LABEL, label, "FindPKCS11Lib did not return expected label")
os.Setenv("PKCS11_LIB", orig_PKCS11_LIB)
os.Setenv("PKCS11_PIN", orig_PKCS11_PIN)
os.Setenv("PKCS11_LABEL", orig_PKCS11_LABEL)
}
func TestInvalidNewParameter(t *testing.T) {
lib, pin, label := FindPKCS11Lib()
opts := PKCS11Opts{
Library: lib,
Label: label,
Pin: pin,
SoftVerify: true,
}
opts.HashFamily = "SHA2"
opts.SecLevel = 0
r, err := New(opts, currentKS)
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if r != nil {
t.Fatal("Return value should be equal to nil in this case")
}
opts.HashFamily = "SHA8"
opts.SecLevel = 256
r, err = New(opts, currentKS)
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if r != nil {
t.Fatal("Return value should be equal to nil in this case")
}
opts.HashFamily = "SHA2"
opts.SecLevel = 256
r, err = New(opts, nil)
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if r != nil {
t.Fatal("Return value should be equal to nil in this case")
}
opts.HashFamily = "SHA3"
opts.SecLevel = 0
r, err = New(opts, nil)
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if r != nil {
t.Fatal("Return value should be equal to nil in this case")
}
}
func TestInvalidSKI(t *testing.T) {
k, err := currentBCCSP.GetKey(nil)
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if k != nil {
t.Fatal("Return value should be equal to nil in this case")
}
k, err = currentBCCSP.GetKey([]byte{0, 1, 2, 3, 4, 5, 6})
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if k != nil {
t.Fatal("Return value should be equal to nil in this case")
}
}
func TestKeyGenECDSAOpts(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestKeyGenECDSAOpts")
}
// Curve P256
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAP256KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA P256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating ECDSA P256 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating ECDSA P256 key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating ECDSA P256 key. Key should be asymmetric")
}
ecdsaKey := k.(*ecdsaPrivateKey).pub
if elliptic.P256() != ecdsaKey.pub.Curve {
t.Fatal("P256 generated key in invalid. The curve must be P256.")
}
// Curve P384
k, err = currentBCCSP.KeyGen(&bccsp.ECDSAP384KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA P384 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating ECDSA P384 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating ECDSA P384 key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating ECDSA P384 key. Key should be asymmetric")
}
ecdsaKey = k.(*ecdsaPrivateKey).pub
if elliptic.P384() != ecdsaKey.pub.Curve {
t.Fatal("P256 generated key in invalid. The curve must be P384.")
}
}
func TestKeyGenRSAOpts(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestKeyGenRSAOpts")
}
// 1024
k, err := currentBCCSP.KeyGen(&bccsp.RSA1024KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA 1024 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating RSA 1024 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating RSA 1024 key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating RSA 1024 key. Key should be asymmetric")
}
// 2048
k, err = currentBCCSP.KeyGen(&bccsp.RSA2048KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA 2048 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating RSA 2048 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating RSA 2048 key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating RSA 2048 key. Key should be asymmetric")
}
}
func TestKeyGenAESOpts(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestKeyGenAESOpts")
}
// AES 128
k, err := currentBCCSP.KeyGen(&bccsp.AES128KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES 128 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating AES 128 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating AES 128 key. Key should be private")
}
if !k.Symmetric() {
t.Fatal("Failed generating AES 128 key. Key should be symmetric")
}
// AES 192
k, err = currentBCCSP.KeyGen(&bccsp.AES192KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES 192 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating AES 192 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating AES 192 key. Key should be private")
}
if !k.Symmetric() {
t.Fatal("Failed generating AES 192 key. Key should be symmetric")
}
// AES 256
k, err = currentBCCSP.KeyGen(&bccsp.AES256KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES 256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating AES 256 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating AES 256 key. Key should be private")
}
if !k.Symmetric() {
t.Fatal("Failed generating AES 256 key. Key should be symmetric")
}
}
func TestHashOpts(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestHashOpts")
}
msg := []byte("abcd")
// SHA256
digest1, err := currentBCCSP.Hash(msg, &bccsp.SHA256Opts{})
if err != nil {
t.Fatalf("Failed computing SHA256 [%s]", err)
}
h := sha256.New()
h.Write(msg)
digest2 := h.Sum(nil)
if !bytes.Equal(digest1, digest2) {
t.Fatalf("Different SHA256 computed. [%x][%x]", digest1, digest2)
}
// SHA384
digest1, err = currentBCCSP.Hash(msg, &bccsp.SHA384Opts{})
if err != nil {
t.Fatalf("Failed computing SHA384 [%s]", err)
}
h = sha512.New384()
h.Write(msg)
digest2 = h.Sum(nil)
if !bytes.Equal(digest1, digest2) {
t.Fatalf("Different SHA384 computed. [%x][%x]", digest1, digest2)
}
// SHA3_256O
digest1, err = currentBCCSP.Hash(msg, &bccsp.SHA3_256Opts{})
if err != nil {
t.Fatalf("Failed computing SHA3_256 [%s]", err)
}
h = sha3.New256()
h.Write(msg)
digest2 = h.Sum(nil)
if !bytes.Equal(digest1, digest2) {
t.Fatalf("Different SHA3_256 computed. [%x][%x]", digest1, digest2)
}
// SHA3_384
digest1, err = currentBCCSP.Hash(msg, &bccsp.SHA3_384Opts{})
if err != nil {
t.Fatalf("Failed computing SHA3_384 [%s]", err)
}
h = sha3.New384()
h.Write(msg)
digest2 = h.Sum(nil)
if !bytes.Equal(digest1, digest2) {
t.Fatalf("Different SHA3_384 computed. [%x][%x]", digest1, digest2)
}
}
func TestECDSAKeyGenEphemeral(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSAKeyGenEphemeral")
}
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: true})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating ECDSA key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating ECDSA key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating ECDSA key. Key should be asymmetric")
}
raw, err := k.Bytes()
if err == nil {
t.Fatal("Failed marshalling to bytes. Marshalling must fail.")
}
if len(raw) != 0 {
t.Fatal("Failed marshalling to bytes. Output should be 0 bytes")
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting corresponding public key [%s]", err)
}
if pk == nil {
t.Fatal("Public key must be different from nil.")
}
}
func TestECDSAPrivateKeySKI(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSAPrivateKeySKI")
}
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
ski := k.SKI()
if len(ski) == 0 {
t.Fatal("SKI not valid. Zero length.")
}
}
func TestECDSAKeyGenNonEphemeral(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSAKeyGenNonEphemeral")
}
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating ECDSA key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating ECDSA key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating ECDSA key. Key should be asymmetric")
}
}
func TestECDSAGetKeyBySKI(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSAGetKeyBySKI")
}
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
k2, err := currentBCCSP.GetKey(k.SKI())
if err != nil {
t.Fatalf("Failed getting ECDSA key [%s]", err)
}
if k2 == nil {
t.Fatal("Failed getting ECDSA key. Key must be different from nil")
}
if !k2.Private() {
t.Fatal("Failed getting ECDSA key. Key should be private")
}
if k2.Symmetric() {
t.Fatal("Failed getting ECDSA key. Key should be asymmetric")
}
// Check that the SKIs are the same
if !bytes.Equal(k.SKI(), k2.SKI()) {
t.Fatalf("SKIs are different [%x]!=[%x]", k.SKI(), k2.SKI())
}
}
func TestECDSAPublicKeyFromPrivateKey(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSAPublicKeyFromPrivateKey")
}
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private ECDSA key [%s]", err)
}
if pk == nil {
t.Fatal("Failed getting public key from private ECDSA key. Key must be different from nil")
}
if pk.Private() {
t.Fatal("Failed generating ECDSA key. Key should be public")
}
if pk.Symmetric() {
t.Fatal("Failed generating ECDSA key. Key should be asymmetric")
}
}
func TestECDSAPublicKeyBytes(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSAPublicKeyBytes")
}
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private ECDSA key [%s]", err)
}
raw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed marshalling ECDSA public key [%s]", err)
}
if len(raw) == 0 {
t.Fatal("Failed marshalling ECDSA public key. Zero length")
}
}
func TestECDSAPublicKeySKI(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSAPublicKeySKI")
}
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private ECDSA key [%s]", err)
}
ski := pk.SKI()
if len(ski) == 0 {
t.Fatal("SKI not valid. Zero length.")
}
}
func TestECDSASign(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSASign")
}
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
if len(signature) == 0 {
t.Fatal("Failed generating ECDSA key. Signature must be different from nil")
}
_, err = currentBCCSP.Sign(nil, digest, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid Key. It must not be nil")
_, err = currentBCCSP.Sign(k, nil, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid digest. Cannot be empty")
}
func TestECDSAVerify(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSAVerify")
}
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(k, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting corresponding public key [%s]", err)
}
valid, err = currentBCCSP.Verify(pk, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
_, err = currentBCCSP.Verify(nil, signature, digest, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid Key. It must not be nil")
_, err = currentBCCSP.Verify(pk, nil, digest, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid signature. Cannot be empty")
_, err = currentBCCSP.Verify(pk, signature, nil, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid digest. Cannot be empty")
// Import the exported public key
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting ECDSA raw public key [%s]", err)
}
// Store public key
_, err = currentBCCSP.KeyImport(pkRaw, &bccsp.ECDSAPKIXPublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed storing corresponding public key [%s]", err)
}
pk2, err := currentBCCSP.GetKey(pk.SKI())
if err != nil {
t.Fatalf("Failed retrieving corresponding public key [%s]", err)
}
valid, err = currentBCCSP.Verify(pk2, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
}
func TestECDSAKeyImportFromExportedKey(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSAKeyImportFromExportedKey")
}
// Generate an ECDSA key
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
// Export the public key
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting ECDSA public key [%s]", err)
}
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting ECDSA raw public key [%s]", err)
}
// Import the exported public key
pk2, err := currentBCCSP.KeyImport(pkRaw, &bccsp.ECDSAPKIXPublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing ECDSA public key [%s]", err)
}
if pk2 == nil {
t.Fatal("Failed importing ECDSA public key. Return BCCSP key cannot be nil.")
}
// Sign and verify with the imported public key
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(pk2, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
}
func TestECDSAKeyImportFromECDSAPublicKey(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSAKeyImportFromECDSAPublicKey")
}
// Generate an ECDSA key
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
// Export the public key
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting ECDSA public key [%s]", err)
}
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting ECDSA raw public key [%s]", err)
}
pub, err := utils.DERToPublicKey(pkRaw)
if err != nil {
t.Fatalf("Failed converting raw to ecdsa.PublicKey [%s]", err)
}
// Import the ecdsa.PublicKey
pk2, err := currentBCCSP.KeyImport(pub, &bccsp.ECDSAGoPublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing ECDSA public key [%s]", err)
}
if pk2 == nil {
t.Fatal("Failed importing ECDSA public key. Return BCCSP key cannot be nil.")
}
// Sign and verify with the imported public key
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(pk2, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
}
func TestKeyImportFromX509ECDSAPublicKey(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestKeyImportFromX509ECDSAPublicKey")
}
// Generate an ECDSA key
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
// Generate a self-signed certificate
testExtKeyUsage := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}
testUnknownExtKeyUsage := []asn1.ObjectIdentifier{[]int{1, 2, 3}, []int{2, 59, 1}}
extraExtensionData := []byte("extra extension")
commonName := "test.example.com"
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: commonName,
Organization: []string{"Σ Acme Co"},
Country: []string{"US"},
ExtraNames: []pkix.AttributeTypeAndValue{
{
Type: []int{2, 5, 4, 42},
Value: "Gopher",
},
// This should override the Country, above.
{
Type: []int{2, 5, 4, 6},
Value: "NL",
},
},
},
NotBefore: time.Now().Add(-1 * time.Hour),
NotAfter: time.Now().Add(1 * time.Hour),
SignatureAlgorithm: x509.ECDSAWithSHA256,
SubjectKeyId: []byte{1, 2, 3, 4},
KeyUsage: x509.KeyUsageCertSign,
ExtKeyUsage: testExtKeyUsage,
UnknownExtKeyUsage: testUnknownExtKeyUsage,
BasicConstraintsValid: true,
IsCA: true,
OCSPServer: []string{"http://ocurrentBCCSP.example.com"},
IssuingCertificateURL: []string{"http://crt.example.com/ca1.crt"},
DNSNames: []string{"test.example.com"},
EmailAddresses: []string{"[email protected]"},
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1).To4(), net.ParseIP("2001:4860:0:2001::68")},
PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}},
PermittedDNSDomains: []string{".example.com", "example.com"},
CRLDistributionPoints: []string{"http://crl1.example.com/ca1.crl", "http://crl2.example.com/ca1.crl"},
ExtraExtensions: []pkix.Extension{
{
Id: []int{1, 2, 3, 4},
Value: extraExtensionData,
},
},
}
cryptoSigner, err := signer.New(currentBCCSP, k)
if err != nil {
t.Fatalf("Failed initializing CyrptoSigner [%s]", err)
}
// Export the public key
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting ECDSA public key [%s]", err)
}
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting ECDSA raw public key [%s]", err)
}
pub, err := utils.DERToPublicKey(pkRaw)
if err != nil {
t.Fatalf("Failed converting raw to ECDSA.PublicKey [%s]", err)
}
certRaw, err := x509.CreateCertificate(rand.Reader, &template, &template, pub, cryptoSigner)
if err != nil {
t.Fatalf("Failed generating self-signed certificate [%s]", err)
}
cert, err := utils.DERToX509Certificate(certRaw)
if err != nil {
t.Fatalf("Failed generating X509 certificate object from raw [%s]", err)
}
// Import the certificate's public key
pk2, err := currentBCCSP.KeyImport(cert, &bccsp.X509PublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing ECDSA public key [%s]", err)
}
if pk2 == nil {
t.Fatal("Failed importing ECDSA public key. Return BCCSP key cannot be nil.")
}
// Sign and verify with the imported public key
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(pk2, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
}
func TestECDSASignatureEncoding(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSASignatureEncoding")
}
v := []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x02, 0xff, 0xf1}
_, err := asn1.Unmarshal(v, &utils.ECDSASignature{})
if err == nil {
t.Fatalf("Unmarshalling should fail for [% x]", v)
}
t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err)
v = []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x02, 0x00, 0x01}
_, err = asn1.Unmarshal(v, &utils.ECDSASignature{})
if err == nil {
t.Fatalf("Unmarshalling should fail for [% x]", v)
}
t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err)
v = []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x81, 0x01, 0x01}
_, err = asn1.Unmarshal(v, &utils.ECDSASignature{})
if err == nil {
t.Fatalf("Unmarshalling should fail for [% x]", v)
}
t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err)
v = []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x81, 0x01, 0x8F}
_, err = asn1.Unmarshal(v, &utils.ECDSASignature{})
if err == nil {
t.Fatalf("Unmarshalling should fail for [% x]", v)
}
t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err)
v = []byte{0x30, 0x0A, 0x02, 0x01, 0x8F, 0x02, 0x05, 0x00, 0x00, 0x00, 0x00, 0x8F}
_, err = asn1.Unmarshal(v, &utils.ECDSASignature{})
if err == nil {
t.Fatalf("Unmarshalling should fail for [% x]", v)
}
t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err)
}
func TestECDSALowS(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestECDSALowS")
}
// Ensure that signature with low-S are generated
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
R, S, err := utils.UnmarshalECDSASignature(signature)
if err != nil {
t.Fatalf("Failed unmarshalling signature [%s]", err)
}
if S.Cmp(utils.GetCurveHalfOrdersAt(k.(*ecdsaPrivateKey).pub.pub.Curve)) >= 0 {
t.Fatal("Invalid signature. It must have low-S")
}
valid, err := currentBCCSP.Verify(k, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
// Ensure that signature with high-S are rejected.
for {
R, S, err = currentBCCSP.(*impl).signP11ECDSA(k.SKI(), digest)
if err != nil {
t.Fatalf("Failed generating signature [%s]", err)
}
if S.Cmp(utils.GetCurveHalfOrdersAt(k.(*ecdsaPrivateKey).pub.pub.Curve)) > 0 {
break
}
}
sig, err := utils.MarshalECDSASignature(R, S)
if err != nil {
t.Fatalf("Failing unmarshalling signature [%s]", err)
}
valid, err = currentBCCSP.Verify(k, sig, digest, nil)
if err == nil {
t.Fatal("Failed verifying ECDSA signature. It must fail for a signature with high-S")
}
if valid {
t.Fatal("Failed verifying ECDSA signature. It must fail for a signature with high-S")
}
}
func TestAESKeyGen(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestAESKeyGen")
}
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating AES_256 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating AES_256 key. Key should be private")
}
if !k.Symmetric() {
t.Fatal("Failed generating AES_256 key. Key should be symmetric")
}
pk, err := k.PublicKey()
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if pk != nil {
t.Fatal("Return value should be equal to nil in this case")
}
}
func TestAESEncrypt(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestAESEncrypt")
}
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
ct, err := currentBCCSP.Encrypt(k, []byte("Hello World"), &bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed encrypting [%s]", err)
}
if len(ct) == 0 {
t.Fatal("Failed encrypting. Nil ciphertext")
}
}
func TestAESDecrypt(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestAESDecrypt")
}
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
msg := []byte("Hello World")
ct, err := currentBCCSP.Encrypt(k, msg, &bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed encrypting [%s]", err)
}
pt, err := currentBCCSP.Decrypt(k, ct, bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed decrypting [%s]", err)
}
if len(ct) == 0 {
t.Fatal("Failed decrypting. Nil plaintext")
}
if !bytes.Equal(msg, pt) {
t.Fatalf("Failed decrypting. Decrypted plaintext is different from the original. [%x][%x]", msg, pt)
}
}
func TestHMACTruncated256KeyDerivOverAES256Key(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestHMACTruncated256KeyDerivOverAES256Key")
}
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
hmcaedKey, err := currentBCCSP.KeyDeriv(k, &bccsp.HMACTruncated256AESDeriveKeyOpts{Temporary: false, Arg: []byte{1}})
if err != nil {
t.Fatalf("Failed HMACing AES_256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed HMACing AES_256 key. HMACed Key must be different from nil")
}
if !hmcaedKey.Private() {
t.Fatal("Failed HMACing AES_256 key. HMACed Key should be private")
}
if !hmcaedKey.Symmetric() {
t.Fatal("Failed HMACing AES_256 key. HMACed Key should be asymmetric")
}
raw, err := hmcaedKey.Bytes()
if err == nil {
t.Fatal("Failed marshalling to bytes. Operation must be forbidden")
}
if len(raw) != 0 {
t.Fatal("Failed marshalling to bytes. Operation must return 0 bytes")
}
msg := []byte("Hello World")
ct, err := currentBCCSP.Encrypt(hmcaedKey, msg, &bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed encrypting [%s]", err)
}
pt, err := currentBCCSP.Decrypt(hmcaedKey, ct, bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed decrypting [%s]", err)
}
if len(ct) == 0 {
t.Fatal("Failed decrypting. Nil plaintext")
}
if !bytes.Equal(msg, pt) {
t.Fatalf("Failed decrypting. Decrypted plaintext is different from the original. [%x][%x]", msg, pt)
}
}
func TestHMACKeyDerivOverAES256Key(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestHMACKeyDerivOverAES256Key")
}
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
hmcaedKey, err := currentBCCSP.KeyDeriv(k, &bccsp.HMACDeriveKeyOpts{Temporary: false, Arg: []byte{1}})
if err != nil {
t.Fatalf("Failed HMACing AES_256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed HMACing AES_256 key. HMACed Key must be different from nil")
}
if !hmcaedKey.Private() {
t.Fatal("Failed HMACing AES_256 key. HMACed Key should be private")
}
if !hmcaedKey.Symmetric() {
t.Fatal("Failed HMACing AES_256 key. HMACed Key should be asymmetric")
}
raw, err := hmcaedKey.Bytes()
if err != nil {
t.Fatalf("Failed marshalling to bytes [%s]", err)
}
if len(raw) == 0 {
t.Fatal("Failed marshalling to bytes. 0 bytes")
}
}
func TestAES256KeyImport(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestAES256KeyImport")
}
raw, err := sw.GetRandomBytes(32)
if err != nil {
t.Fatalf("Failed generating AES key [%s]", err)
}
k, err := currentBCCSP.KeyImport(raw, &bccsp.AES256ImportKeyOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing AES_256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed importing AES_256 key. Imported Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed HMACing AES_256 key. Imported Key should be private")
}
if !k.Symmetric() {
t.Fatal("Failed HMACing AES_256 key. Imported Key should be asymmetric")
}
raw, err = k.Bytes()
if err == nil {
t.Fatal("Failed marshalling to bytes. Marshalling must fail.")
}
if len(raw) != 0 {
t.Fatal("Failed marshalling to bytes. Output should be 0 bytes")
}
msg := []byte("Hello World")
ct, err := currentBCCSP.Encrypt(k, msg, &bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed encrypting [%s]", err)
}
pt, err := currentBCCSP.Decrypt(k, ct, bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed decrypting [%s]", err)
}
if len(ct) == 0 {
t.Fatal("Failed decrypting. Nil plaintext")
}
if !bytes.Equal(msg, pt) {
t.Fatalf("Failed decrypting. Decrypted plaintext is different from the original. [%x][%x]", msg, pt)
}
}
func TestAES256KeyImportBadPaths(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestAES256KeyImportBadPaths")
}
_, err := currentBCCSP.KeyImport(nil, &bccsp.AES256ImportKeyOpts{Temporary: false})
if err == nil {
t.Fatal("Failed importing key. Must fail on importing nil key")
}
_, err = currentBCCSP.KeyImport([]byte{1}, &bccsp.AES256ImportKeyOpts{Temporary: false})
if err == nil {
t.Fatal("Failed importing key. Must fail on importing a key with an invalid length")
}
}
func TestAES256KeyGenSKI(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestAES256KeyGenSKI")
}
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
k2, err := currentBCCSP.GetKey(k.SKI())
if err != nil {
t.Fatalf("Failed getting AES_256 key [%s]", err)
}
if k2 == nil {
t.Fatal("Failed getting AES_256 key. Key must be different from nil")
}
if !k2.Private() {
t.Fatal("Failed getting AES_256 key. Key should be private")
}
if !k2.Symmetric() {
t.Fatal("Failed getting AES_256 key. Key should be symmetric")
}
// Check that the SKIs are the same
if !bytes.Equal(k.SKI(), k2.SKI()) {
t.Fatalf("SKIs are different [%x]!=[%x]", k.SKI(), k2.SKI())
}
}
func TestSHA(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestSHA")
}
for i := 0; i < 100; i++ {
b, err := sw.GetRandomBytes(i)
if err != nil {
t.Fatalf("Failed getting random bytes [%s]", err)
}
h1, err := currentBCCSP.Hash(b, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing SHA [%s]", err)
}
var h hash.Hash
switch currentTestConfig.hashFamily {
case "SHA2":
switch currentTestConfig.securityLevel {
case 256:
h = sha256.New()
case 384:
h = sha512.New384()
default:
t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel)
}
case "SHA3":
switch currentTestConfig.securityLevel {
case 256:
h = sha3.New256()
case 384:
h = sha3.New384()
default:
t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel)
}
default:
t.Fatalf("Invalid hash family [%s]", currentTestConfig.hashFamily)
}
h.Write(b)
h2 := h.Sum(nil)
if !bytes.Equal(h1, h2) {
t.Fatalf("Discrempancy found in HASH result [%x], [%x]!=[%x]", b, h1, h2)
}
}
}
func TestRSAKeyGenEphemeral(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRSAKeyGenEphemeral")
}
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: true})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating RSA key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating RSA key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating RSA key. Key should be asymmetric")
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed generating RSA corresponding public key [%s]", err)
}
if pk == nil {
t.Fatal("PK must be different from nil")
}
b, err := k.Bytes()
if err == nil {
t.Fatal("Secret keys cannot be exported. It must fail in this case")
}
if len(b) != 0 {
t.Fatal("Secret keys cannot be exported. It must be nil")
}
}
func TestRSAPrivateKeySKI(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRSAPrivateKeySKI")
}
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
ski := k.SKI()
if len(ski) == 0 {
t.Fatal("SKI not valid. Zero length.")
}
}
func TestRSAKeyGenNonEphemeral(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRSAKeyGenNonEphemeral")
}
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating RSA key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating RSA key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating RSA key. Key should be asymmetric")
}
}
func TestRSAGetKeyBySKI(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRSAGetKeyBySKI")
}
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
k2, err := currentBCCSP.GetKey(k.SKI())
if err != nil {
t.Fatalf("Failed getting RSA key [%s]", err)
}
if k2 == nil {
t.Fatal("Failed getting RSA key. Key must be different from nil")
}
if !k2.Private() {
t.Fatal("Failed getting RSA key. Key should be private")
}
if k2.Symmetric() {
t.Fatal("Failed getting RSA key. Key should be asymmetric")
}
// Check that the SKIs are the same
if !bytes.Equal(k.SKI(), k2.SKI()) {
t.Fatalf("SKIs are different [%x]!=[%x]", k.SKI(), k2.SKI())
}
}
func TestRSAPublicKeyFromPrivateKey(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRSAPublicKeyFromPrivateKey")
}
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private RSA key [%s]", err)
}
if pk == nil {
t.Fatal("Failed getting public key from private RSA key. Key must be different from nil")
}
if pk.Private() {
t.Fatal("Failed generating RSA key. Key should be public")
}
if pk.Symmetric() {
t.Fatal("Failed generating RSA key. Key should be asymmetric")
}
}
func TestRSAPublicKeyBytes(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRSAPublicKeyBytes")
}
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private RSA key [%s]", err)
}
raw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed marshalling RSA public key [%s]", err)
}
if len(raw) == 0 {
t.Fatal("Failed marshalling RSA public key. Zero length")
}
}
func TestRSAPublicKeySKI(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRSAPublicKeySKI")
}
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private RSA key [%s]", err)
}
ski := pk.SKI()
if len(ski) == 0 {
t.Fatal("SKI not valid. Zero length.")
}
}
func TestRSASign(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRSASign")
}
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed generating RSA signature [%s]", err)
}
if len(signature) == 0 {
t.Fatal("Failed generating RSA key. Signature must be different from nil")
}
}
func TestRSAVerify(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRSAVerify")
}
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed generating RSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(k, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed verifying RSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying RSA signature. Signature not valid.")
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting corresponding public key [%s]", err)
}
valid, err = currentBCCSP.Verify(pk, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed verifying RSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying RSA signature. Signature not valid.")
}
// Store public key
err = currentKS.StoreKey(pk)
if err != nil {
t.Fatalf("Failed storing corresponding public key [%s]", err)
}
pk2, err := currentKS.GetKey(pk.SKI())
if err != nil {
t.Fatalf("Failed retrieving corresponding public key [%s]", err)
}
valid, err = currentBCCSP.Verify(pk2, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed verifying RSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying RSA signature. Signature not valid.")
}
}
func TestRSAKeyImportFromRSAPublicKey(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRSAKeyImportFromRSAPublicKey")
}
// Generate an RSA key
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
// Export the public key
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting RSA public key [%s]", err)
}
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting RSA raw public key [%s]", err)
}
pub, err := utils.DERToPublicKey(pkRaw)
if err != nil {
t.Fatalf("Failed converting raw to RSA.PublicKey [%s]", err)
}
// Import the RSA.PublicKey
pk2, err := currentBCCSP.KeyImport(pub, &bccsp.RSAGoPublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing RSA public key [%s]", err)
}
if pk2 == nil {
t.Fatal("Failed importing RSA public key. Return BCCSP key cannot be nil.")
}
// Sign and verify with the imported public key
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed generating RSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(pk2, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed verifying RSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying RSA signature. Signature not valid.")
}
}
func TestKeyImportFromX509RSAPublicKey(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestKeyImportFromX509RSAPublicKey")
}
// Generate an RSA key
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
// Generate a self-signed certificate
testExtKeyUsage := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}
testUnknownExtKeyUsage := []asn1.ObjectIdentifier{[]int{1, 2, 3}, []int{2, 59, 1}}
extraExtensionData := []byte("extra extension")
commonName := "test.example.com"
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: commonName,
Organization: []string{"Σ Acme Co"},
Country: []string{"US"},
ExtraNames: []pkix.AttributeTypeAndValue{
{
Type: []int{2, 5, 4, 42},
Value: "Gopher",
},
// This should override the Country, above.
{
Type: []int{2, 5, 4, 6},
Value: "NL",
},
},
},
NotBefore: time.Now().Add(-1 * time.Hour),
NotAfter: time.Now().Add(1 * time.Hour),
SignatureAlgorithm: x509.SHA256WithRSA,
SubjectKeyId: []byte{1, 2, 3, 4},
KeyUsage: x509.KeyUsageCertSign,
ExtKeyUsage: testExtKeyUsage,
UnknownExtKeyUsage: testUnknownExtKeyUsage,
BasicConstraintsValid: true,
IsCA: true,
OCSPServer: []string{"http://ocurrentBCCSP.example.com"},
IssuingCertificateURL: []string{"http://crt.example.com/ca1.crt"},
DNSNames: []string{"test.example.com"},
EmailAddresses: []string{"[email protected]"},
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1).To4(), net.ParseIP("2001:4860:0:2001::68")},
PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}},
PermittedDNSDomains: []string{".example.com", "example.com"},
CRLDistributionPoints: []string{"http://crl1.example.com/ca1.crl", "http://crl2.example.com/ca1.crl"},
ExtraExtensions: []pkix.Extension{
{
Id: []int{1, 2, 3, 4},
Value: extraExtensionData,
},
},
}
cryptoSigner, err := signer.New(currentBCCSP, k)
if err != nil {
t.Fatalf("Failed initializing CyrptoSigner [%s]", err)
}
// Export the public key
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting RSA public key [%s]", err)
}
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting RSA raw public key [%s]", err)
}
pub, err := utils.DERToPublicKey(pkRaw)
if err != nil {
t.Fatalf("Failed converting raw to RSA.PublicKey [%s]", err)
}
certRaw, err := x509.CreateCertificate(rand.Reader, &template, &template, pub, cryptoSigner)
if err != nil {
t.Fatalf("Failed generating self-signed certificate [%s]", err)
}
cert, err := utils.DERToX509Certificate(certRaw)
if err != nil {
t.Fatalf("Failed generating X509 certificate object from raw [%s]", err)
}
// Import the certificate's public key
pk2, err := currentBCCSP.KeyImport(cert, &bccsp.X509PublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing RSA public key [%s]", err)
}
if pk2 == nil {
t.Fatal("Failed importing RSA public key. Return BCCSP key cannot be nil.")
}
// Sign and verify with the imported public key
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed generating RSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(pk2, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed verifying RSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying RSA signature. Signature not valid.")
}
}
func getCryptoHashIndex(t *testing.T) crypto.Hash {
switch currentTestConfig.hashFamily {
case "SHA2":
switch currentTestConfig.securityLevel {
case 256:
return crypto.SHA256
case 384:
return crypto.SHA384
default:
t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel)
}
case "SHA3":
switch currentTestConfig.securityLevel {
case 256:
return crypto.SHA3_256
case 384:
return crypto.SHA3_384
default:
t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel)
}
default:
t.Fatalf("Invalid hash family [%s]", currentTestConfig.hashFamily)
}
return crypto.SHA3_256
}
|
[
"\"PKCS11_LIB\"",
"\"PKCS11_PIN\"",
"\"PKCS11_LABEL\""
] |
[] |
[
"PKCS11_PIN",
"PKCS11_LIB",
"PKCS11_LABEL"
] |
[]
|
["PKCS11_PIN", "PKCS11_LIB", "PKCS11_LABEL"]
|
go
| 3 | 0 | |
sdk/webpubsub/azure-messaging-webpubsubservice/samples/get_client_access_token_async.py
|
# coding=utf-8
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import logging
import os
import asyncio
from azure.messaging.webpubsubservice.aio import WebPubSubServiceClient as WebPubSubServiceClientAsync
from azure.identity.aio import DefaultAzureCredential
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger()
async def main():
# Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables:
# AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, WEBPUBSUB_ENDPOINT, WEBPUBSUB_CONNECTION_STRING
try:
endpoint = os.environ["WEBPUBSUB_ENDPOINT"]
connection_string = os.environ['WEBPUBSUB_CONNECTION_STRING']
except KeyError:
LOG.error("Missing environment variable 'WEBPUBSUB_ENDPOINT' or 'WEBPUBSUB_CONNECTION_STRING' - please set if before running the example")
exit()
# Build a client through AAD(async)
async with DefaultAzureCredential() as credential:
async with WebPubSubServiceClientAsync(endpoint=endpoint, hub='hub', credential=credential) as client_aad_async:
# Build authentication token(async)
token_aad_async = await client_aad_async.get_client_access_token()
print('token by AAD(async): {}'.format(token_aad_async))
# Build a client through connection string(async)
async with WebPubSubServiceClientAsync.from_connection_string(connection_string, hub='hub') as client_key_async:
# Build authentication token(async)
token_key_async = await client_key_async.get_client_access_token()
print('token by access key(async): {}'.format(token_key_async))
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
[] |
[] |
[
"WEBPUBSUB_ENDPOINT",
"WEBPUBSUB_CONNECTION_STRING"
] |
[]
|
["WEBPUBSUB_ENDPOINT", "WEBPUBSUB_CONNECTION_STRING"]
|
python
| 2 | 0 | |
sshelper.go
|
package sshelper
import (
"os"
"path/filepath"
"runtime"
)
func HashKnownHosts() bool {
return false
}
// GlobalConfigFile returns the global ssh client config file name.
// See https://man.openbsd.org/ssh#FILES
func GlobalConfigFile() string {
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("ALLUSERSPROFILE"), "ssh/ssh_config")
} else {
return "/etc/ssh/ssh_config"
}
}
// UserConfigFile returns the per-user ssh client config file name.
// See https://man.openbsd.org/ssh#FILES
func UserConfigFile() string {
d, _ := os.UserHomeDir()
return filepath.Join(d, ".ssh/config")
}
|
[
"\"ALLUSERSPROFILE\""
] |
[] |
[
"ALLUSERSPROFILE"
] |
[]
|
["ALLUSERSPROFILE"]
|
go
| 1 | 0 | |
store/storetest/mocks/JobStore.go
|
// Code generated by mockery v1.0.0. DO NOT EDIT.
// Regenerate this file using `make store-mocks`.
package mocks
import (
model "github.com/zgordan-vv/zacmm-server/model"
mock "github.com/stretchr/testify/mock"
)
// JobStore is an autogenerated mock type for the JobStore type
type JobStore struct {
mock.Mock
}
// Delete provides a mock function with given fields: id
func (_m *JobStore) Delete(id string) (string, error) {
ret := _m.Called(id)
var r0 string
if rf, ok := ret.Get(0).(func(string) string); ok {
r0 = rf(id)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Get provides a mock function with given fields: id
func (_m *JobStore) Get(id string) (*model.Job, error) {
ret := _m.Called(id)
var r0 *model.Job
if rf, ok := ret.Get(0).(func(string) *model.Job); ok {
r0 = rf(id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Job)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetAllByStatus provides a mock function with given fields: status
func (_m *JobStore) GetAllByStatus(status string) ([]*model.Job, error) {
ret := _m.Called(status)
var r0 []*model.Job
if rf, ok := ret.Get(0).(func(string) []*model.Job); ok {
r0 = rf(status)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.Job)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(status)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetAllByType provides a mock function with given fields: jobType
func (_m *JobStore) GetAllByType(jobType string) ([]*model.Job, error) {
ret := _m.Called(jobType)
var r0 []*model.Job
if rf, ok := ret.Get(0).(func(string) []*model.Job); ok {
r0 = rf(jobType)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.Job)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(jobType)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetAllByTypePage provides a mock function with given fields: jobType, offset, limit
func (_m *JobStore) GetAllByTypePage(jobType string, offset int, limit int) ([]*model.Job, error) {
ret := _m.Called(jobType, offset, limit)
var r0 []*model.Job
if rf, ok := ret.Get(0).(func(string, int, int) []*model.Job); ok {
r0 = rf(jobType, offset, limit)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.Job)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, int, int) error); ok {
r1 = rf(jobType, offset, limit)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetAllPage provides a mock function with given fields: offset, limit
func (_m *JobStore) GetAllPage(offset int, limit int) ([]*model.Job, error) {
ret := _m.Called(offset, limit)
var r0 []*model.Job
if rf, ok := ret.Get(0).(func(int, int) []*model.Job); ok {
r0 = rf(offset, limit)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.Job)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(int, int) error); ok {
r1 = rf(offset, limit)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetCountByStatusAndType provides a mock function with given fields: status, jobType
func (_m *JobStore) GetCountByStatusAndType(status string, jobType string) (int64, error) {
ret := _m.Called(status, jobType)
var r0 int64
if rf, ok := ret.Get(0).(func(string, string) int64); ok {
r0 = rf(status, jobType)
} else {
r0 = ret.Get(0).(int64)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string) error); ok {
r1 = rf(status, jobType)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetNewestJobByStatusAndType provides a mock function with given fields: status, jobType
func (_m *JobStore) GetNewestJobByStatusAndType(status string, jobType string) (*model.Job, error) {
ret := _m.Called(status, jobType)
var r0 *model.Job
if rf, ok := ret.Get(0).(func(string, string) *model.Job); ok {
r0 = rf(status, jobType)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Job)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string) error); ok {
r1 = rf(status, jobType)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetNewestJobByStatusesAndType provides a mock function with given fields: statuses, jobType
func (_m *JobStore) GetNewestJobByStatusesAndType(statuses []string, jobType string) (*model.Job, error) {
ret := _m.Called(statuses, jobType)
var r0 *model.Job
if rf, ok := ret.Get(0).(func([]string, string) *model.Job); ok {
r0 = rf(statuses, jobType)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Job)
}
}
var r1 error
if rf, ok := ret.Get(1).(func([]string, string) error); ok {
r1 = rf(statuses, jobType)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Save provides a mock function with given fields: job
func (_m *JobStore) Save(job *model.Job) (*model.Job, error) {
ret := _m.Called(job)
var r0 *model.Job
if rf, ok := ret.Get(0).(func(*model.Job) *model.Job); ok {
r0 = rf(job)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Job)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.Job) error); ok {
r1 = rf(job)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UpdateOptimistically provides a mock function with given fields: job, currentStatus
func (_m *JobStore) UpdateOptimistically(job *model.Job, currentStatus string) (bool, error) {
ret := _m.Called(job, currentStatus)
var r0 bool
if rf, ok := ret.Get(0).(func(*model.Job, string) bool); ok {
r0 = rf(job, currentStatus)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.Job, string) error); ok {
r1 = rf(job, currentStatus)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UpdateStatus provides a mock function with given fields: id, status
func (_m *JobStore) UpdateStatus(id string, status string) (*model.Job, error) {
ret := _m.Called(id, status)
var r0 *model.Job
if rf, ok := ret.Get(0).(func(string, string) *model.Job); ok {
r0 = rf(id, status)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Job)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string) error); ok {
r1 = rf(id, status)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UpdateStatusOptimistically provides a mock function with given fields: id, currentStatus, newStatus
func (_m *JobStore) UpdateStatusOptimistically(id string, currentStatus string, newStatus string) (bool, error) {
ret := _m.Called(id, currentStatus, newStatus)
var r0 bool
if rf, ok := ret.Get(0).(func(string, string, string) bool); ok {
r0 = rf(id, currentStatus, newStatus)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string, string) error); ok {
r1 = rf(id, currentStatus, newStatus)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
exporter/signalfxexporter/internal/hostmetadata/host.go
|
// Copyright OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Taken from https://github.com/signalfx/golib/blob/master/metadata/hostmetadata/host.go
// with minor modifications.
package hostmetadata // import "github.com/rati3l/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/hostmetadata"
import (
"bytes"
"context"
"errors"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"time"
"github.com/shirou/gopsutil/v3/cpu"
"github.com/shirou/gopsutil/v3/host"
"github.com/shirou/gopsutil/v3/mem"
)
// etcPath is the path to host etc and can be set using the env var "HOST_ETC"
// this is to maintain consistency with gopsutil
var etcPath = func() string {
if etcPath := os.Getenv("HOST_ETC"); etcPath != "" {
return etcPath
}
return "/etc"
}
const cpuStatsTimeout = 10 * time.Second
// Map library functions to unexported package variables for testing purposes.
var cpuInfo = cpu.InfoWithContext
var cpuCounts = cpu.CountsWithContext
var memVirtualMemory = mem.VirtualMemory
var hostInfo = host.Info
// hostCPU information about the host
type hostCPU struct {
HostPhysicalCPUs int
HostLogicalCPUs int
HostCPUCores int64
HostCPUModel string
HostMachine string
HostProcessor string
}
// toStringMap returns the hostCPU as a string map
func (c *hostCPU) toStringMap() map[string]string {
return map[string]string{
"host_physical_cpus": strconv.Itoa(c.HostPhysicalCPUs),
"host_cpu_cores": strconv.FormatInt(c.HostCPUCores, 10),
"host_cpu_model": c.HostCPUModel,
"host_logical_cpus": strconv.Itoa(c.HostLogicalCPUs),
"host_processor": c.HostProcessor,
"host_machine": c.HostMachine,
}
}
// getCPU - adds information about the host cpu to the supplied map
func getCPU() (info *hostCPU, err error) {
info = &hostCPU{}
// get physical cpu stats
var cpus []cpu.InfoStat
// On Windows this can sometimes take longer than the default timeout (10 seconds).
ctx, cancel := context.WithTimeout(context.Background(), cpuStatsTimeout)
defer cancel()
cpus, err = cpuInfo(ctx)
if err != nil {
return info, err
}
info.HostPhysicalCPUs = len(cpus)
// get logical cpu stats
info.HostLogicalCPUs, err = cpuCounts(ctx, true)
if err != nil {
return info, err
}
// total number of cpu cores
for i := range cpus {
info.HostCPUCores += int64(cpus[i].Cores)
// TODO: This is not ideal... if there are different processors
// we will only report one of the models... This is unlikely to happen,
// but it could
info.HostCPUModel = cpus[i].ModelName
}
err = fillPlatformSpecificCPUData(info)
return info, err
}
// hostOS is a struct containing information about the host os
type hostOS struct {
HostOSName string
HostKernelName string
HostKernelRelease string
HostKernelVersion string
HostLinuxVersion string
}
// toStringMap returns a map of key/value metadata about the host os
func (o *hostOS) toStringMap() map[string]string {
return map[string]string{
"host_kernel_name": o.HostKernelName,
"host_kernel_release": o.HostKernelRelease,
"host_kernel_version": o.HostKernelVersion,
"host_os_name": o.HostOSName,
"host_linux_version": o.HostLinuxVersion,
}
}
// int8ArrayToByteArray converts an []int8 to []byte
func int8ArrayToByteArray(in []int8) []byte {
bts := make([]byte, len(in))
for i, c := range in {
bts[i] = byte(c)
}
return bytes.Trim(bts, "\x00")
}
// getOS returns a struct with information about the host os
func getOS() (info *hostOS, err error) {
info = &hostOS{}
hInfo, err := hostInfo()
if err != nil {
return info, err
}
info.HostOSName = hInfo.Platform
info.HostKernelName = hInfo.OS
// in gopsutil KernelVersion returns what we would expect for Kernel Release
info.HostKernelRelease = hInfo.KernelVersion
err = fillPlatformSpecificOSData(info)
return info, err
}
// getLinuxVersion - adds information about the host linux version to the supplied map
func getLinuxVersion() (string, error) {
etc := etcPath()
if value, err := getStringFromFile(`DISTRIB_DESCRIPTION="(.*)"`, filepath.Join(etc, "lsb-release")); err == nil {
return value, nil
}
if value, err := getStringFromFile(`PRETTY_NAME="(.*)"`, filepath.Join(etc, "os-release")); err == nil {
return value, nil
}
if value, err := ioutil.ReadFile(filepath.Join(etc, "centos-release")); err == nil {
return string(value), nil
}
if value, err := ioutil.ReadFile(filepath.Join(etc, "redhat-release")); err == nil {
return string(value), nil
}
if value, err := ioutil.ReadFile(filepath.Join(etc, "system-release")); err == nil {
return string(value), nil
}
return "", errors.New("unable to find linux version")
}
// Memory stores memory collected from the host
type Memory struct {
Total int
}
// toStringMap returns a map of key/value metadata about the host memory
// where memory sizes are reported in Kb
func (m *Memory) toStringMap() map[string]string {
return map[string]string{
"host_mem_total": strconv.Itoa(bytesToKilobytes(m.Total)),
}
}
// getMemory returns the amount of memory on the host as datatype.USize
func getMemory() (*Memory, error) {
m := &Memory{}
memoryStat, err := memVirtualMemory()
if err == nil {
m.Total = int(memoryStat.Total)
}
return m, err
}
func getStringFromFile(pattern string, path string) (string, error) {
var err error
var file []byte
var reg = regexp.MustCompile(pattern)
if file, err = ioutil.ReadFile(path); err == nil {
if match := reg.FindSubmatch(file); len(match) > 1 {
return string(match[1]), nil
}
}
return "", err
}
func bytesToKilobytes(b int) int {
return b / 1024
}
|
[
"\"HOST_ETC\""
] |
[] |
[
"HOST_ETC"
] |
[]
|
["HOST_ETC"]
|
go
| 1 | 0 | |
test/functional/test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'keypool-topup.py',
'zmq_test.py',
'bitcoin_cli.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'multiwallet.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'disconnect_ban.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'net.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'test_script_address2.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'mining.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'wallet-encryption.py',
'bipdersig-p2p.py',
'bip65-cltv-p2p.py',
'uptime.py',
'resendwallettransactions.py',
'minchainwork.py',
'p2p-acceptblock.py',
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
'dbcrash.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'assumevalid.py',
'example_test.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'replace-by-fee.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/sheepcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and sheepcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "sheepcoind"]) is not None:
print("%sWARNING!%s There is already a sheepcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "LITECOIND" not in os.environ:
os.environ["LITECOIND"] = build_dir + '/src/sheepcoind' + exeext
os.environ["LITECOINCLI"] = build_dir + '/src/sheepcoin-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
tmpdir = ["--tmpdir=%s/%s_%s" % (self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage(object):
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `sheepcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
[] |
[] |
[
"LITECOIND",
"LITECOINCLI",
"TRAVIS"
] |
[]
|
["LITECOIND", "LITECOINCLI", "TRAVIS"]
|
python
| 3 | 0 | |
cmd/bot/main.go
|
package main
import (
"net/http"
"os"
"strconv"
tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api"
"github.com/imroc/req"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"gopkg.in/gomail.v2"
)
func main() {
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
log.Info().Msg("Starting an app..")
token := os.Getenv("BOT_TOKEN")
bot, err := tgbotapi.NewBotAPI(token)
if err != nil {
log.Panic().Err(err)
}
bot.Debug = os.Getenv("DEBUG") == "1"
log.Info().Msg("Authorized on account " + bot.Self.UserName)
port := os.Getenv("PORT")
herokuAppName := os.Getenv("HEROKU_APP_NAME")
webhookHost := "https://" + herokuAppName + ".herokuapp.com/" + bot.Token
log.Info().Msg("Webhook host: " + webhookHost)
resp, err := bot.SetWebhook(tgbotapi.NewWebhook(webhookHost))
if err != nil {
log.Panic().Err(err)
} else {
log.Info().Msg(resp.Description)
}
updates := bot.ListenForWebhook("/" + bot.Token)
go http.ListenAndServe(":"+port, nil) //nolint:errcheck
for update := range updates {
if update.Message == nil { // ignore any non-Message Updates
continue
}
log.Printf("[%s] %s", update.Message.From.UserName, update.Message.Text)
if update.Message.Document == nil {
msg := tgbotapi.NewMessage(update.Message.Chat.ID, "Send me book, please 🥳")
msg.ReplyToMessageID = update.Message.MessageID
_, err := bot.Send(msg)
if err != nil {
log.Error().Err(err)
}
continue
}
document := update.Message.Document
if document.MimeType != "application/x-mobipocket-ebook" {
msg := tgbotapi.NewMessage(update.Message.Chat.ID, "Unfortunately, I can receive only .mobi books 😭")
msg.ReplyToMessageID = update.Message.MessageID
_, err := bot.Send(msg)
if err != nil {
log.Error().Err(err)
}
continue
}
fileUrl, err := bot.GetFileDirectURL(document.FileID)
if err != nil {
log.Error().Err(err)
continue
}
r, _ := req.Get(fileUrl)
userId := strconv.Itoa(update.Message.From.ID)
_ = os.MkdirAll("books/"+userId, os.ModePerm)
err = r.ToFile("books/" + userId + "/" + document.FileName)
if err != nil {
log.Error().Err(err)
continue
} else {
log.Info().Msg("Download complete: " + document.FileName)
}
from := os.Getenv("FROM_EMAIL")
to := os.Getenv("TO_EMAIL")
emailUsername := os.Getenv("EMAIL_USERNAME")
emailPassword := os.Getenv("EMAIL_PASSWORD")
smtpHost := os.Getenv("SMTP_HOST")
smtpPort, _ := strconv.Atoi(os.Getenv("SMTP_PORT"))
m := gomail.NewMessage(gomail.SetEncoding(gomail.Base64))
m.SetHeader("From", from)
m.SetHeader("To", to)
m.SetHeader("Subject", "New book from Awesome Kindle Bot!")
m.SetBody("text/html", "Get your book!")
m.Attach("books/" + userId + "/" + document.FileName)
d := gomail.NewDialer(smtpHost, smtpPort, emailUsername, emailPassword)
if err := d.DialAndSend(m); err != nil {
log.Error().Err(err)
} else {
log.Info().Msg("Book " + document.FileName + " was sent to " + to)
}
}
}
|
[
"\"BOT_TOKEN\"",
"\"DEBUG\"",
"\"PORT\"",
"\"HEROKU_APP_NAME\"",
"\"FROM_EMAIL\"",
"\"TO_EMAIL\"",
"\"EMAIL_USERNAME\"",
"\"EMAIL_PASSWORD\"",
"\"SMTP_HOST\"",
"\"SMTP_PORT\""
] |
[] |
[
"PORT",
"SMTP_PORT",
"TO_EMAIL",
"EMAIL_PASSWORD",
"EMAIL_USERNAME",
"HEROKU_APP_NAME",
"BOT_TOKEN",
"DEBUG",
"SMTP_HOST",
"FROM_EMAIL"
] |
[]
|
["PORT", "SMTP_PORT", "TO_EMAIL", "EMAIL_PASSWORD", "EMAIL_USERNAME", "HEROKU_APP_NAME", "BOT_TOKEN", "DEBUG", "SMTP_HOST", "FROM_EMAIL"]
|
go
| 10 | 0 | |
googleapi/api.go
|
package googleapi
import (
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
)
const (
googleGeoCodeAPI = "https://maps.googleapis.com/maps/api/geocode/json"
)
var (
digitRegex = regexp.MustCompile("[0-9]*")
client = http.DefaultClient
)
type LocationJSON struct {
Results []struct {
FormattedAddress string `json:"formatted_address"`
Geometry struct {
Location struct {
Lat json.Number `json:"lat"`
Lng json.Number `json:"lng"`
} `json:"location"`
} `json:"geometry"`
} `json:"results"`
Status string `json:"status"`
}
func (lj *LocationJSON) Location() (*Location, error) {
if len(lj.Results) == 0 {
return nil, errors.New("no results found")
}
result := lj.Results[0]
latitude, err := result.Geometry.Location.Lat.Float64()
if err != nil {
return nil, err
}
longitude, err := result.Geometry.Location.Lng.Float64()
if err != nil {
return nil, err
}
return &Location{
FormattedAddress: result.FormattedAddress,
Latitude: latitude,
Longitude: longitude,
}, nil
}
type Location struct {
FormattedAddress string
Latitude, Longitude float64
}
func LocationInfo(address string) (*Location, error) {
u, err := url.Parse(googleGeoCodeAPI)
if err != nil {
return nil, err
}
uv := &url.Values{}
uv.Add("address", formatURL(address))
uv.Add("key", os.Getenv("GOOGLE_GEOCODE_API_KEY"))
u.RawQuery = uv.Encode()
resp, err := client.Get(u.String())
if err != nil {
return nil, err
}
defer resp.Body.Close()
var lj LocationJSON
if err := json.NewDecoder(resp.Body).Decode(&lj); err != nil {
_, _ = io.Copy(ioutil.Discard, resp.Body)
return nil, err
}
return lj.Location()
}
func formatURL(address string) string {
address = strings.Replace(address, "&", "and", -1)
address = strings.Replace(address, " ", "+", -1)
return digitRegex.ReplaceAllStringFunc(address, func(s string) string {
switch n, err := strconv.Atoi(s); {
case err != nil:
return s
case n%10 == 1:
return s + "st"
case n%10 == 2:
return s + "nd"
case n%10 == 3:
return s + "rd"
default:
return s + "th"
}
return s
})
}
|
[
"\"GOOGLE_GEOCODE_API_KEY\""
] |
[] |
[
"GOOGLE_GEOCODE_API_KEY"
] |
[]
|
["GOOGLE_GEOCODE_API_KEY"]
|
go
| 1 | 0 | |
repo/content/committed_read_manager.go
|
package content
import (
"context"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/kopia/kopia/internal/cache"
"github.com/kopia/kopia/internal/clock"
"github.com/kopia/kopia/internal/epoch"
"github.com/kopia/kopia/internal/gather"
"github.com/kopia/kopia/internal/listcache"
"github.com/kopia/kopia/internal/ownwrites"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/blob/filesystem"
"github.com/kopia/kopia/repo/compression"
"github.com/kopia/kopia/repo/hashing"
"github.com/kopia/kopia/repo/logging"
)
// number of bytes to read from each pack index when recovering the index.
// per-pack indexes are usually short (<100-200 contents).
const indexRecoverPostambleSize = 8192
const indexRefreshFrequency = 15 * time.Minute
const ownWritesCacheDuration = 15 * time.Minute
var cachedIndexBlobPrefixes = []blob.ID{
IndexBlobPrefix,
compactionLogBlobPrefix,
cleanupBlobPrefix,
epoch.UncompactedIndexBlobPrefix,
epoch.EpochMarkerIndexBlobPrefix,
epoch.SingleEpochCompactionBlobPrefix,
epoch.RangeCheckpointIndexBlobPrefix,
}
var allIndexBlobPrefixes = []blob.ID{
IndexBlobPrefix,
epoch.UncompactedIndexBlobPrefix,
epoch.SingleEpochCompactionBlobPrefix,
epoch.RangeCheckpointIndexBlobPrefix,
}
// indexBlobManager is the API of index blob manager as used by content manager.
type indexBlobManager interface {
writeIndexBlobs(ctx context.Context, data []gather.Bytes, sessionID SessionID) ([]blob.Metadata, error)
listActiveIndexBlobs(ctx context.Context) ([]IndexBlobInfo, time.Time, error)
compact(ctx context.Context, opts CompactOptions) error
flushCache(ctx context.Context)
}
// SharedManager is responsible for read-only access to committed data.
type SharedManager struct {
refCount int32 // number of Manager objects that refer to this SharedManager
closed int32 // set to 1 if shared manager has been closed
Stats *Stats
st blob.Storage
indexBlobManager indexBlobManager // points at either indexBlobManagerV0 or indexBlobManagerV1
indexBlobManagerV0 *indexBlobManagerV0
indexBlobManagerV1 *indexBlobManagerV1
contentCache contentCache
metadataCache contentCache
committedContents *committedContentIndex
crypter *Crypter
enc *encryptedBlobMgr
timeNow func() time.Time
// lock to protect the set of commtited indexes
// shared lock will be acquired when writing new content to allow it to happen in parallel
// exclusive lock will be acquired during compaction or refresh.
indexesLock sync.RWMutex
// maybeRefreshIndexes() will call Refresh() after this point in ime.
refreshIndexesAfter time.Time
format FormattingOptions
checkInvariantsOnUnlock bool
writeFormatVersion int32 // format version to write
maxPackSize int
minPreambleLength int
maxPreambleLength int
paddingUnit int
repositoryFormatBytes []byte
indexVersion int
indexShardSize int
// logger where logs should be written
log logging.Logger
// base logger used by other related components with their own prefixes,
// do not log there directly.
sharedBaseLogger logging.Logger
internalLogManager *internalLogManager
internalLogger *internalLogger // backing logger for 'sharedBaseLogger'
}
// Crypter returns the crypter.
func (sm *SharedManager) Crypter() *Crypter {
return sm.crypter
}
func (sm *SharedManager) readPackFileLocalIndex(ctx context.Context, packFile blob.ID, packFileLength int64, output *gather.WriteBuffer) error {
var err error
if packFileLength >= indexRecoverPostambleSize {
if err = sm.attemptReadPackFileLocalIndex(ctx, packFile, packFileLength-indexRecoverPostambleSize, indexRecoverPostambleSize, output); err == nil {
sm.log.Debugf("recovered %v index bytes from blob %v using optimized method", output.Length(), packFile)
return nil
}
sm.log.Debugf("unable to recover using optimized method: %v", err)
}
if err = sm.attemptReadPackFileLocalIndex(ctx, packFile, 0, -1, output); err == nil {
sm.log.Debugf("recovered %v index bytes from blob %v using full blob read", output.Length(), packFile)
return nil
}
return err
}
func (sm *SharedManager) attemptReadPackFileLocalIndex(ctx context.Context, packFile blob.ID, offset, length int64, output *gather.WriteBuffer) error {
var payload gather.WriteBuffer
defer payload.Close()
output.Reset()
err := sm.st.GetBlob(ctx, packFile, offset, length, &payload)
if err != nil {
return errors.Wrapf(err, "error getting blob %v", packFile)
}
postamble := findPostamble(payload.Bytes().ToByteSlice())
if postamble == nil {
return errors.Errorf("unable to find valid postamble in file %v", packFile)
}
if uint32(offset) > postamble.localIndexOffset {
return errors.Errorf("not enough data read during optimized attempt %v", packFile)
}
postamble.localIndexOffset -= uint32(offset)
if uint64(postamble.localIndexOffset+postamble.localIndexLength) > uint64(payload.Length()) {
// invalid offset/length
return errors.Errorf("unable to find valid local index in file %v - invalid offset/length", packFile)
}
var encryptedLocalIndexBytes gather.WriteBuffer
defer encryptedLocalIndexBytes.Close()
if err := payload.AppendSectionTo(&encryptedLocalIndexBytes, int(postamble.localIndexOffset), int(postamble.localIndexLength)); err != nil {
// should never happen
return errors.Wrap(err, "error appending to local index bytes")
}
return errors.Wrap(
sm.decryptAndVerify(encryptedLocalIndexBytes.Bytes(), postamble.localIndexIV, output),
"unable to decrypt local index")
}
func (sm *SharedManager) loadPackIndexesLocked(ctx context.Context) error {
nextSleepTime := 100 * time.Millisecond //nolint:gomnd
for i := 0; i < indexLoadAttempts; i++ {
if err := ctx.Err(); err != nil {
// nolint:wrapcheck
return err
}
if i > 0 {
sm.indexBlobManager.flushCache(ctx)
sm.log.Debugf("encountered NOT_FOUND when loading, sleeping %v before retrying #%v", nextSleepTime, i)
time.Sleep(nextSleepTime)
nextSleepTime *= 2
}
indexBlobs, ignoreDeletedBefore, err := sm.indexBlobManager.listActiveIndexBlobs(ctx)
if err != nil {
return errors.Wrap(err, "error listing index blobs")
}
var indexBlobIDs []blob.ID
for _, b := range indexBlobs {
indexBlobIDs = append(indexBlobIDs, b.BlobID)
}
err = sm.committedContents.fetchIndexBlobs(ctx, indexBlobIDs)
if err == nil {
err = sm.committedContents.use(ctx, indexBlobIDs, ignoreDeletedBefore)
if err != nil {
return err
}
if len(indexBlobs) > indexBlobCompactionWarningThreshold {
sm.log.Errorf("Found too many index blobs (%v), this may result in degraded performance.\n\nPlease ensure periodic repository maintenance is enabled or run 'kopia maintenance'.", len(indexBlobs))
}
sm.refreshIndexesAfter = sm.timeNow().Add(indexRefreshFrequency)
return nil
}
if !errors.Is(err, blob.ErrBlobNotFound) {
return err
}
}
return errors.Errorf("unable to load pack indexes despite %v retries", indexLoadAttempts)
}
func (sm *SharedManager) getCacheForContentID(id ID) contentCache {
if id.HasPrefix() {
return sm.metadataCache
}
return sm.contentCache
}
func (sm *SharedManager) decryptContentAndVerify(payload gather.Bytes, bi Info, output *gather.WriteBuffer) error {
sm.Stats.readContent(payload.Length())
var hashBuf [hashing.MaxHashSize]byte
iv, err := getPackedContentIV(hashBuf[:], bi.GetContentID())
if err != nil {
return err
}
// reserved for future use
if k := bi.GetEncryptionKeyID(); k != 0 {
return errors.Errorf("unsupported encryption key ID: %v", k)
}
h := bi.GetCompressionHeaderID()
if h == 0 {
return errors.Wrapf(
sm.decryptAndVerify(payload, iv, output),
"invalid checksum at %v offset %v length %v/%v", bi.GetPackBlobID(), bi.GetPackOffset(), bi.GetPackedLength(), payload.Length())
}
var tmp gather.WriteBuffer
defer tmp.Close()
if err := sm.decryptAndVerify(payload, iv, &tmp); err != nil {
return errors.Wrapf(err, "invalid checksum at %v offset %v length %v/%v", bi.GetPackBlobID(), bi.GetPackOffset(), bi.GetPackedLength(), payload.Length())
}
c := compression.ByHeaderID[h]
if c == nil {
return errors.Errorf("unsupported compressor %x", h)
}
if err := c.Decompress(output, tmp.Bytes().Reader(), true); err != nil {
return errors.Wrap(err, "error decompressing")
}
return nil
}
func (sm *SharedManager) decryptAndVerify(encrypted gather.Bytes, iv []byte, output *gather.WriteBuffer) error {
if err := sm.crypter.Encryptor.Decrypt(encrypted, iv, output); err != nil {
sm.Stats.foundInvalidContent()
return errors.Wrap(err, "decrypt")
}
sm.Stats.foundValidContent()
sm.Stats.decrypted(output.Length())
// already verified
return nil
}
// IndexBlobs returns the list of active index blobs.
func (sm *SharedManager) IndexBlobs(ctx context.Context, includeInactive bool) ([]IndexBlobInfo, error) {
if includeInactive {
var result []IndexBlobInfo
for _, prefix := range allIndexBlobPrefixes {
blobs, err := blob.ListAllBlobs(ctx, sm.st, prefix)
if err != nil {
return nil, errors.Wrapf(err, "error listing %v blogs", prefix)
}
for _, bm := range blobs {
result = append(result, IndexBlobInfo{Metadata: bm})
}
}
return result, nil
}
blobs, _, err := sm.indexBlobManager.listActiveIndexBlobs(ctx)
// nolint:wrapcheck
return blobs, err
}
func newOwnWritesCache(ctx context.Context, st blob.Storage, caching *CachingOptions) (blob.Storage, error) {
cacheSt, err := newCacheBackingStorage(ctx, caching, "own-writes")
if err != nil {
return nil, errors.Wrap(err, "unable to get list cache backing storage")
}
return ownwrites.NewWrapper(st, cacheSt, cachedIndexBlobPrefixes, ownWritesCacheDuration), nil
}
func newListCache(ctx context.Context, st blob.Storage, caching *CachingOptions) (blob.Storage, error) {
cacheSt, err := newCacheBackingStorage(ctx, caching, "blob-list")
if err != nil {
return nil, errors.Wrap(err, "unable to get list cache backing storage")
}
return listcache.NewWrapper(st, cacheSt, cachedIndexBlobPrefixes, caching.HMACSecret, time.Duration(caching.MaxListCacheDurationSec)*time.Second), nil
}
func newCacheBackingStorage(ctx context.Context, caching *CachingOptions, subdir string) (blob.Storage, error) {
if caching.CacheDirectory == "" {
return nil, nil
}
blobListCacheDir := filepath.Join(caching.CacheDirectory, subdir)
if _, err := os.Stat(blobListCacheDir); os.IsNotExist(err) {
if err := os.MkdirAll(blobListCacheDir, cache.DirMode); err != nil {
return nil, errors.Wrap(err, "error creating list cache directory")
}
}
// nolint:wrapcheck
return filesystem.New(ctx, &filesystem.Options{
Path: blobListCacheDir,
DirectoryShards: []int{},
})
}
func (sm *SharedManager) setupReadManagerCaches(ctx context.Context, caching *CachingOptions) error {
dataCacheStorage, err := cache.NewStorageOrNil(ctx, caching.CacheDirectory, caching.MaxCacheSizeBytes, "contents")
if err != nil {
return errors.Wrap(err, "unable to initialize data cache storage")
}
dataCache, err := newContentCacheForData(ctx, sm.st, dataCacheStorage, caching.MaxCacheSizeBytes, caching.HMACSecret)
if err != nil {
return errors.Wrap(err, "unable to initialize content cache")
}
metadataCacheSize := caching.MaxMetadataCacheSizeBytes
if metadataCacheSize == 0 && caching.MaxCacheSizeBytes > 0 {
metadataCacheSize = caching.MaxCacheSizeBytes
}
metadataCacheStorage, err := cache.NewStorageOrNil(ctx, caching.CacheDirectory, metadataCacheSize, "metadata")
if err != nil {
return errors.Wrap(err, "unable to initialize data cache storage")
}
metadataCache, err := newContentCacheForMetadata(ctx, sm.st, metadataCacheStorage, metadataCacheSize)
if err != nil {
return errors.Wrap(err, "unable to initialize metadata cache")
}
ownWritesCachingSt, err := newOwnWritesCache(ctx, sm.st, caching)
if err != nil {
return errors.Wrap(err, "unable to initialize own writes cache")
}
cachedSt, err := newListCache(ctx, ownWritesCachingSt, caching)
if err != nil {
return errors.Wrap(err, "unable to initialize list cache")
}
sm.enc = &encryptedBlobMgr{
st: cachedSt,
crypter: sm.crypter,
indexBlobCache: metadataCache,
log: logging.WithPrefix("[encrypted-blob-manager] ", sm.sharedBaseLogger),
}
// set up legacy index blob manager
sm.indexBlobManagerV0 = &indexBlobManagerV0{
st: cachedSt,
enc: sm.enc,
timeNow: sm.timeNow,
maxPackSize: sm.maxPackSize,
indexVersion: sm.indexVersion,
indexShardSize: sm.indexShardSize,
log: logging.WithPrefix("[index-blob-manager] ", sm.sharedBaseLogger),
}
// set up new index blob manager
sm.indexBlobManagerV1 = &indexBlobManagerV1{
st: cachedSt,
enc: sm.enc,
timeNow: sm.timeNow,
maxPackSize: sm.maxPackSize,
indexShardSize: sm.indexShardSize,
indexVersion: sm.indexVersion,
log: logging.WithPrefix("[index-blob-manager] ", sm.sharedBaseLogger),
}
sm.indexBlobManagerV1.epochMgr = epoch.NewManager(cachedSt, sm.format.EpochParameters, sm.indexBlobManagerV1.compactEpoch, sm.sharedBaseLogger)
// select active index blob manager based on parameters
if sm.format.EpochParameters.Enabled {
sm.indexBlobManager = sm.indexBlobManagerV1
} else {
sm.indexBlobManager = sm.indexBlobManagerV0
}
// once everything is ready, set it up
sm.contentCache = dataCache
sm.metadataCache = metadataCache
sm.committedContents = newCommittedContentIndex(caching, uint32(sm.crypter.Encryptor.Overhead()), sm.indexVersion, sm.enc.getEncryptedBlob, sm.sharedBaseLogger)
return nil
}
// EpochManager returns the epoch manager.
func (sm *SharedManager) EpochManager() (*epoch.Manager, bool) {
ibm1, ok := sm.indexBlobManager.(*indexBlobManagerV1)
if !ok {
return nil, false
}
return ibm1.epochMgr, true
}
// AddRef adds a reference to shared manager to prevents its closing on Release().
func (sm *SharedManager) addRef() {
if atomic.LoadInt32(&sm.closed) != 0 {
panic("attempted to re-use closed SharedManager")
}
atomic.AddInt32(&sm.refCount, 1)
}
// release removes a reference to the shared manager and destroys it if no more references are remaining.
func (sm *SharedManager) release(ctx context.Context) error {
if atomic.LoadInt32(&sm.closed) != 0 {
// already closed
return nil
}
remaining := atomic.AddInt32(&sm.refCount, -1)
if remaining != 0 {
sm.log.Debugf("not closing shared manager, remaining = %v", remaining)
return nil
}
atomic.StoreInt32(&sm.closed, 1)
sm.log.Debugf("closing shared manager")
if err := sm.committedContents.close(); err != nil {
return errors.Wrap(err, "error closing committed content index")
}
sm.contentCache.close(ctx)
sm.metadataCache.close(ctx)
if sm.internalLogger != nil {
sm.internalLogger.Close(ctx)
}
sm.internalLogManager.Close(ctx)
sm.indexBlobManagerV1.epochMgr.Flush()
return errors.Wrap(sm.st.Close(ctx), "error closing storage")
}
// InternalLogger returns the internal logger.
func (sm *SharedManager) InternalLogger() logging.Logger {
return sm.internalLogger
}
func (sm *SharedManager) shouldRefreshIndexes() bool {
sm.indexesLock.RLock()
defer sm.indexesLock.RUnlock()
return sm.timeNow().After(sm.refreshIndexesAfter)
}
func (sm *SharedManager) maybeRefreshIndexes(ctx context.Context) error {
if sm.shouldRefreshIndexes() {
if err := sm.Refresh(ctx); err != nil {
return errors.Wrap(err, "error refreshing indexes")
}
}
return nil
}
// NewSharedManager returns SharedManager that is used by SessionWriteManagers on top of a repository.
func NewSharedManager(ctx context.Context, st blob.Storage, f *FormattingOptions, caching *CachingOptions, opts *ManagerOptions) (*SharedManager, error) {
opts = opts.CloneOrDefault()
if opts.TimeNow == nil {
opts.TimeNow = clock.Now
}
if f.Version < minSupportedReadVersion || f.Version > currentWriteVersion {
return nil, errors.Errorf("can't handle repositories created using version %v (min supported %v, max supported %v)", f.Version, minSupportedReadVersion, maxSupportedReadVersion)
}
if f.Version < minSupportedWriteVersion || f.Version > currentWriteVersion {
return nil, errors.Errorf("can't handle repositories created using version %v (min supported %v, max supported %v)", f.Version, minSupportedWriteVersion, maxSupportedWriteVersion)
}
crypter, err := CreateCrypter(f)
if err != nil {
return nil, err
}
actualIndexVersion := f.IndexVersion
if actualIndexVersion == 0 {
actualIndexVersion = DefaultIndexVersion
}
if actualIndexVersion < v1IndexVersion || actualIndexVersion > v2IndexVersion {
return nil, errors.Errorf("index version %v is not supported", actualIndexVersion)
}
// create internal logger that will be writing logs as encrypted repository blobs.
ilm := newInternalLogManager(ctx, st, crypter)
// sharedBaseLogger writes to the both context and internal log
// and is used as a base for all content manager components.
var internalLog *internalLogger
// capture logger (usually console or log file) associated with current context.
sharedBaseLogger := logging.GetContextLoggerFunc(FormatLogModule)(ctx)
if !opts.DisableInternalLog {
internalLog = ilm.NewLogger()
sharedBaseLogger = logging.Broadcast{sharedBaseLogger, internalLog}
}
sm := &SharedManager{
st: st,
crypter: crypter,
Stats: new(Stats),
timeNow: opts.TimeNow,
format: *f,
maxPackSize: f.MaxPackSize,
minPreambleLength: defaultMinPreambleLength,
maxPreambleLength: defaultMaxPreambleLength,
paddingUnit: defaultPaddingUnit,
repositoryFormatBytes: opts.RepositoryFormatBytes,
checkInvariantsOnUnlock: os.Getenv("KOPIA_VERIFY_INVARIANTS") != "",
writeFormatVersion: int32(f.Version),
indexVersion: actualIndexVersion,
indexShardSize: defaultIndexShardSize,
internalLogManager: ilm,
internalLogger: internalLog,
sharedBaseLogger: sharedBaseLogger,
// remember logger defined for the context.
log: logging.WithPrefix("[shared-manager] ", sharedBaseLogger),
}
caching = caching.CloneOrDefault()
if err := sm.setupReadManagerCaches(ctx, caching); err != nil {
return nil, errors.Wrap(err, "error setting up read manager caches")
}
if err := sm.loadPackIndexesLocked(ctx); err != nil {
return nil, errors.Wrap(err, "error loading indexes")
}
return sm, nil
}
|
[
"\"KOPIA_VERIFY_INVARIANTS\""
] |
[] |
[
"KOPIA_VERIFY_INVARIANTS"
] |
[]
|
["KOPIA_VERIFY_INVARIANTS"]
|
go
| 1 | 0 | |
core/chaincode/exectransaction_test.go
|
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package chaincode
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"math/rand"
"net"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/udo/bccsp/factory"
mockpolicies "github.com/hyperledger/udo/common/mocks/policies"
"github.com/hyperledger/udo/common/policies"
"github.com/hyperledger/udo/common/util"
"github.com/hyperledger/udo/core/chaincode/accesscontrol"
"github.com/hyperledger/udo/core/common/ccprovider"
"github.com/hyperledger/udo/core/config"
"github.com/hyperledger/udo/core/container"
"github.com/hyperledger/udo/core/container/ccintf"
"github.com/hyperledger/udo/core/ledger"
"github.com/hyperledger/udo/core/ledger/ledgerconfig"
"github.com/hyperledger/udo/core/ledger/ledgermgmt"
"github.com/hyperledger/udo/core/ledger/util/couchdb"
"github.com/hyperledger/udo/core/peer"
"github.com/hyperledger/udo/core/policy"
"github.com/hyperledger/udo/core/policy/mocks"
"github.com/hyperledger/udo/core/scc"
"github.com/hyperledger/udo/core/testutil"
"github.com/hyperledger/udo/msp"
mspmgmt "github.com/hyperledger/udo/msp/mgmt"
"github.com/hyperledger/udo/msp/mgmt/testtools"
"github.com/hyperledger/udo/protos/common"
pb "github.com/hyperledger/udo/protos/peer"
putils "github.com/hyperledger/udo/protos/utils"
"github.com/spf13/viper"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
var runTests bool
func testForSkip(t *testing.T) {
//run tests
if !runTests {
t.SkipNow()
}
}
//initialize peer and start up. If security==enabled, login as vp
func initPeer(chainIDs ...string) (net.Listener, error) {
//start clean
finitPeer(nil, chainIDs...)
peer.MockInitialize()
mspGetter := func(cid string) []string {
return []string{"DEFAULT"}
}
peer.MockSetMSPIDGetter(mspGetter)
// For unit-test, tls is not required.
viper.Set("peer.tls.enabled", false)
var opts []grpc.ServerOption
if viper.GetBool("peer.tls.enabled") {
creds, err := credentials.NewServerTLSFromFile(config.GetPath("peer.tls.cert.file"), config.GetPath("peer.tls.key.file"))
if err != nil {
return nil, fmt.Errorf("Failed to generate credentials %v", err)
}
opts = []grpc.ServerOption{grpc.Creds(creds)}
}
grpcServer := grpc.NewServer(opts...)
peerAddress, err := peer.GetLocalAddress()
if err != nil {
return nil, fmt.Errorf("Error obtaining peer address: %s", err)
}
lis, err := net.Listen("tcp", peerAddress)
if err != nil {
return nil, fmt.Errorf("Error starting peer listener %s", err)
}
ccStartupTimeout := time.Duration(3) * time.Minute
ca, _ := accesscontrol.NewCA()
pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(peerAddress, false, ccStartupTimeout, ca))
// Mock policy checker
policy.RegisterPolicyCheckerFactory(&mockPolicyCheckerFactory{})
scc.RegisterSysCCs()
for _, id := range chainIDs {
scc.DeDeploySysCCs(id)
if err = peer.MockCreateChain(id); err != nil {
closeListenerAndSleep(lis)
return nil, err
}
scc.DeploySysCCs(id)
// any chain other than the default testchainid does not have a MSP set up -> create one
if id != util.GetTestChainID() {
mspmgmt.XXXSetMSPManager(id, mspmgmt.GetManagerForChain(util.GetTestChainID()))
}
}
go grpcServer.Serve(lis)
return lis, nil
}
func finitPeer(lis net.Listener, chainIDs ...string) {
if lis != nil {
for _, c := range chainIDs {
scc.DeDeploySysCCs(c)
if lgr := peer.GetLedger(c); lgr != nil {
lgr.Close()
}
}
closeListenerAndSleep(lis)
}
ledgermgmt.CleanupTestEnv()
ledgerPath := config.GetPath("peer.fileSystemPath")
os.RemoveAll(ledgerPath)
os.RemoveAll(filepath.Join(os.TempDir(), "hyperledger"))
//if couchdb is enabled, then cleanup the test couchdb
if ledgerconfig.IsCouchDBEnabled() == true {
chainID := util.GetTestChainID()
connectURL := viper.GetString("ledger.state.couchDBConfig.couchDBAddress")
username := viper.GetString("ledger.state.couchDBConfig.username")
password := viper.GetString("ledger.state.couchDBConfig.password")
maxRetries := viper.GetInt("ledger.state.couchDBConfig.maxRetries")
maxRetriesOnStartup := viper.GetInt("ledger.state.couchDBConfig.maxRetriesOnStartup")
requestTimeout := viper.GetDuration("ledger.state.couchDBConfig.requestTimeout")
couchInstance, _ := couchdb.CreateCouchInstance(connectURL, username, password, maxRetries, maxRetriesOnStartup, requestTimeout)
db := couchdb.CouchDatabase{CouchInstance: *couchInstance, DBName: chainID}
//drop the test database
db.DropDatabase()
}
}
func startTxSimulation(ctxt context.Context, chainID string, txid string) (context.Context, ledger.TxSimulator, error) {
lgr := peer.GetLedger(chainID)
txsim, err := lgr.NewTxSimulator(txid)
if err != nil {
return nil, nil, err
}
historyQueryExecutor, err := lgr.NewHistoryQueryExecutor()
if err != nil {
return nil, nil, err
}
ctxt = context.WithValue(ctxt, TXSimulatorKey, txsim)
ctxt = context.WithValue(ctxt, HistoryQueryExecutorKey, historyQueryExecutor)
return ctxt, txsim, nil
}
func endTxSimulationCDS(chainID string, txid string, txsim ledger.TxSimulator, payload []byte, commit bool, cds *pb.ChaincodeDeploymentSpec, blockNumber uint64) error {
// get serialized version of the signer
ss, err := signer.Serialize()
if err != nil {
return err
}
// get lscc ChaincodeID
lsccid := &pb.ChaincodeID{
Name: "lscc",
Version: util.GetSysCCVersion(),
}
// get a proposal - we need it to get a transaction
prop, _, err := putils.CreateDeployProposalFromCDS(chainID, cds, ss, nil, nil, nil, nil)
if err != nil {
return err
}
return endTxSimulation(chainID, lsccid, txsim, payload, commit, prop, blockNumber)
}
func endTxSimulationCIS(chainID string, ccid *pb.ChaincodeID, txid string, txsim ledger.TxSimulator, payload []byte, commit bool, cis *pb.ChaincodeInvocationSpec, blockNumber uint64) error {
// get serialized version of the signer
ss, err := signer.Serialize()
if err != nil {
return err
}
// get a proposal - we need it to get a transaction
prop, returnedTxid, err := putils.CreateProposalFromCISAndTxid(txid, common.HeaderType_ENDORSER_TRANSACTION, chainID, cis, ss)
if err != nil {
return err
}
if returnedTxid != txid {
return errors.New("txids are not same")
}
return endTxSimulation(chainID, ccid, txsim, payload, commit, prop, blockNumber)
}
//getting a crash from ledger.Commit when doing concurrent invokes
//It is likely intentional that ledger.Commit is serial (ie, the real
//Committer will invoke this serially on each block). Mimic that here
//by forcing serialization of the ledger.Commit call.
//
//NOTE-this should NOT have any effect on the older serial tests.
//This affects only the tests in concurrent_test.go which call these
//concurrently (100 concurrent invokes followed by 100 concurrent queries)
var _commitLock_ sync.Mutex
func endTxSimulation(chainID string, ccid *pb.ChaincodeID, txsim ledger.TxSimulator, _ []byte, commit bool, prop *pb.Proposal, blockNumber uint64) error {
txsim.Done()
if lgr := peer.GetLedger(chainID); lgr != nil {
if commit {
var txSimulationResults *ledger.TxSimulationResults
var txSimulationBytes []byte
var err error
txsim.Done()
//get simulation results
if txSimulationResults, err = txsim.GetTxSimulationResults(); err != nil {
return err
}
if txSimulationBytes, err = txSimulationResults.GetPubSimulationBytes(); err != nil {
return nil
}
// assemble a (signed) proposal response message
resp, err := putils.CreateProposalResponse(prop.Header, prop.Payload, &pb.Response{Status: 200},
txSimulationBytes, nil, ccid, nil, signer)
if err != nil {
return err
}
// get the envelope
env, err := putils.CreateSignedTx(prop, signer, resp)
if err != nil {
return err
}
envBytes, err := putils.GetBytesEnvelope(env)
if err != nil {
return err
}
//create the block with 1 transaction
block := common.NewBlock(blockNumber, []byte{})
block.Data.Data = [][]byte{envBytes}
//commit the block
//see comment on _commitLock_
_commitLock_.Lock()
defer _commitLock_.Unlock()
blockAndPvtData := &ledger.BlockAndPvtData{
Block: block,
BlockPvtData: make(map[uint64]*ledger.TxPvtData),
}
// All tests are performed with just one transaction in a block.
// Hence, we can simiplify the procedure of constructing the
// block with private data. There is not enough need to
// add more than one transaction in a block for testing chaincode
// API.
// ASSUMPTION: Only one transaction in a block.
seqInBlock := uint64(0)
if txSimulationResults.PvtSimulationResults != nil {
blockAndPvtData.BlockPvtData[seqInBlock] = &ledger.TxPvtData{
SeqInBlock: seqInBlock,
WriteSet: txSimulationResults.PvtSimulationResults,
}
}
if err := lgr.CommitWithPvtData(blockAndPvtData); err != nil {
return err
}
}
}
return nil
}
// Build a chaincode.
func getDeploymentSpec(_ context.Context, spec *pb.ChaincodeSpec) (*pb.ChaincodeDeploymentSpec, error) {
fmt.Printf("getting deployment spec for chaincode spec: %v\n", spec)
codePackageBytes, err := container.GetChaincodePackageBytes(spec)
if err != nil {
return nil, err
}
cdDeploymentSpec := &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec, CodePackage: codePackageBytes}
return cdDeploymentSpec, nil
}
//getDeployLSCCSpec gets the spec for the chaincode deployment to be sent to LSCC
func getDeployLSCCSpec(chainID string, cds *pb.ChaincodeDeploymentSpec) (*pb.ChaincodeInvocationSpec, error) {
b, err := proto.Marshal(cds)
if err != nil {
return nil, err
}
sysCCVers := util.GetSysCCVersion()
//wrap the deployment in an invocation spec to lscc...
lsccSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_GOLANG, ChaincodeId: &pb.ChaincodeID{Name: "lscc", Version: sysCCVers}, Input: &pb.ChaincodeInput{Args: [][]byte{[]byte("deploy"), []byte(chainID), b}}}}
return lsccSpec, nil
}
// Deploy a chaincode - i.e., build and initialize.
func deploy(ctx context.Context, cccid *ccprovider.CCContext, spec *pb.ChaincodeSpec, blockNumber uint64) (b []byte, err error) {
// First build and get the deployment spec
cdDeploymentSpec, err := getDeploymentSpec(ctx, spec)
if err != nil {
return nil, err
}
return deploy2(ctx, cccid, cdDeploymentSpec, blockNumber)
}
func deploy2(ctx context.Context, cccid *ccprovider.CCContext, chaincodeDeploymentSpec *pb.ChaincodeDeploymentSpec, blockNumber uint64) (b []byte, err error) {
cis, err := getDeployLSCCSpec(cccid.ChainID, chaincodeDeploymentSpec)
if err != nil {
return nil, fmt.Errorf("Error creating lscc spec : %s\n", err)
}
uuid := util.GenerateUUID()
cccid.TxID = uuid
ctx, txsim, err := startTxSimulation(ctx, cccid.ChainID, cccid.TxID)
if err != nil {
return nil, fmt.Errorf("Failed to get handle to simulator: %s ", err)
}
defer func() {
//no error, lets try commit
if err == nil {
//capture returned error from commit
err = endTxSimulationCDS(cccid.ChainID, uuid, txsim, []byte("deployed"), true, chaincodeDeploymentSpec, blockNumber)
} else {
//there was an error, just close simulation and return that
endTxSimulationCDS(cccid.ChainID, uuid, txsim, []byte("deployed"), false, chaincodeDeploymentSpec, blockNumber)
}
}()
//ignore existence errors
ccprovider.PutChaincodeIntoFS(chaincodeDeploymentSpec)
sysCCVers := util.GetSysCCVersion()
sprop, prop := putils.MockSignedEndorserProposal2OrPanic(cccid.ChainID, cis.ChaincodeSpec, signer)
lsccid := ccprovider.NewCCContext(cccid.ChainID, cis.ChaincodeSpec.ChaincodeId.Name, sysCCVers, uuid, true, sprop, prop)
//write to lscc
if _, _, err = ExecuteWithErrorFilter(ctx, lsccid, cis); err != nil {
return nil, fmt.Errorf("Error deploying chaincode (1): %s", err)
}
if b, _, err = ExecuteWithErrorFilter(ctx, cccid, chaincodeDeploymentSpec); err != nil {
return nil, fmt.Errorf("Error deploying chaincode(2): %s", err)
}
return b, nil
}
// Invoke a chaincode.
func invoke(ctx context.Context, chainID string, spec *pb.ChaincodeSpec, blockNumber uint64, creator []byte) (ccevt *pb.ChaincodeEvent, uuid string, retval []byte, err error) {
return invokeWithVersion(ctx, chainID, spec.GetChaincodeId().Version, spec, blockNumber, creator)
}
// Invoke a chaincode with version (needed for upgrade)
func invokeWithVersion(ctx context.Context, chainID string, version string, spec *pb.ChaincodeSpec, blockNumber uint64, creator []byte) (ccevt *pb.ChaincodeEvent, uuid string, retval []byte, err error) {
cdInvocationSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: spec}
// Now create the Transactions message and send to Peer.
uuid = util.GenerateUUID()
var txsim ledger.TxSimulator
ctx, txsim, err = startTxSimulation(ctx, chainID, uuid)
if err != nil {
return nil, uuid, nil, fmt.Errorf("Failed to get handle to simulator: %s ", err)
}
defer func() {
//no error, lets try commit
if err == nil {
//capture returned error from commit
err = endTxSimulationCIS(chainID, spec.ChaincodeId, uuid, txsim, []byte("invoke"), true, cdInvocationSpec, blockNumber)
} else {
//there was an error, just close simulation and return that
endTxSimulationCIS(chainID, spec.ChaincodeId, uuid, txsim, []byte("invoke"), false, cdInvocationSpec, blockNumber)
}
}()
if len(creator) == 0 {
creator = []byte("Admin")
}
sprop, prop := putils.MockSignedEndorserProposalOrPanic(chainID, spec, creator, []byte("msg1"))
cccid := ccprovider.NewCCContext(chainID, cdInvocationSpec.ChaincodeSpec.ChaincodeId.Name, version, uuid, false, sprop, prop)
retval, ccevt, err = ExecuteWithErrorFilter(ctx, cccid, cdInvocationSpec)
if err != nil {
return nil, uuid, nil, fmt.Errorf("Error invoking chaincode: %s", err)
}
return ccevt, uuid, retval, err
}
func closeListenerAndSleep(l net.Listener) {
if l != nil {
l.Close()
time.Sleep(2 * time.Second)
}
}
func executeDeployTransaction(t *testing.T, chainID string, name string, url string) {
lis, err := initPeer(chainID)
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis, chainID)
var ctxt = context.Background()
f := "init"
args := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec := &pb.ChaincodeSpec{Type: 1, ChaincodeId: &pb.ChaincodeID{Name: name, Path: url, Version: "0"}, Input: &pb.ChaincodeInput{Args: args}}
cccid := ccprovider.NewCCContext(chainID, name, "0", "", false, nil, nil)
_, err = deploy(ctxt, cccid, spec, 0)
cID := spec.ChaincodeId.Name
if err != nil {
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
t.Fail()
t.Logf("Error deploying <%s>: %s", cID, err)
return
}
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
}
// chaincodeQueryChaincode function
func _(chainID string, _ string) error {
var ctxt = context.Background()
// Deploy first chaincode
url1 := "github.com/hyperledger/udo/examples/chaincode/go/chaincode_example02"
cID1 := &pb.ChaincodeID{Name: "example02", Path: url1, Version: "0"}
f := "init"
args := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID1, Input: &pb.ChaincodeInput{Args: args}}
cccid1 := ccprovider.NewCCContext(chainID, "example02", "0", "", false, nil, nil)
var nextBlockNumber uint64
_, err := deploy(ctxt, cccid1, spec1, nextBlockNumber)
nextBlockNumber++
ccID1 := spec1.ChaincodeId.Name
if err != nil {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
return fmt.Errorf("Error initializing chaincode %s(%s)", ccID1, err)
}
time.Sleep(time.Second)
// Deploy second chaincode
url2 := "github.com/hyperledger/udo/examples/chaincode/go/chaincode_example05"
cID2 := &pb.ChaincodeID{Name: "example05", Path: url2, Version: "0"}
f = "init"
args = util.ToChaincodeArgs(f, "sum", "0")
spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID2, Input: &pb.ChaincodeInput{Args: args}}
cccid2 := ccprovider.NewCCContext(chainID, "example05", "0", "", false, nil, nil)
_, err = deploy(ctxt, cccid2, spec2, nextBlockNumber)
nextBlockNumber++
ccID2 := spec2.ChaincodeId.Name
if err != nil {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return fmt.Errorf("Error initializing chaincode %s(%s)", ccID2, err)
}
time.Sleep(time.Second)
// Invoke second chaincode, which will inturn query the first chaincode
f = "invoke"
args = util.ToChaincodeArgs(f, ccID1, "sum")
spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID2, Input: &pb.ChaincodeInput{Args: args}}
// Invoke chaincode
var retVal []byte
_, _, retVal, err = invoke(ctxt, chainID, spec2, nextBlockNumber, []byte("Alice"))
nextBlockNumber++
if err != nil {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return fmt.Errorf("Error invoking <%s>: %s", ccID2, err)
}
// Check the return value
result, err := strconv.Atoi(string(retVal))
if err != nil || result != 300 {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return fmt.Errorf("Incorrect final state after transaction for <%s>: %s", ccID1, err)
}
// Query second chaincode, which will inturn query the first chaincode
f = "query"
args = util.ToChaincodeArgs(f, ccID1, "sum")
spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID2, Input: &pb.ChaincodeInput{Args: args}}
// Invoke chaincode
_, _, retVal, err = invoke(ctxt, chainID, spec2, nextBlockNumber, []byte("Alice"))
if err != nil {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return fmt.Errorf("Error querying <%s>: %s", ccID2, err)
}
// Check the return value
result, err = strconv.Atoi(string(retVal))
if err != nil || result != 300 {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return fmt.Errorf("Incorrect final value after query for <%s>: %s", ccID1, err)
}
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return nil
}
// Check the correctness of the final state after transaction execution.
func checkFinalState(cccid *ccprovider.CCContext, a int, b int) error {
txid := util.GenerateUUID()
_, txsim, err := startTxSimulation(context.Background(), cccid.ChainID, txid)
if err != nil {
return fmt.Errorf("Failed to get handle to simulator: %s ", err)
}
defer txsim.Done()
cName := cccid.GetCanonicalName()
// Invoke ledger to get state
var Aval, Bval int
resbytes, resErr := txsim.GetState(cccid.Name, "a")
if resErr != nil {
return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", cName, resErr)
}
fmt.Printf("Got string: %s\n", string(resbytes))
Aval, resErr = strconv.Atoi(string(resbytes))
if resErr != nil {
return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", cName, resErr)
}
if Aval != a {
return fmt.Errorf("Incorrect result. Aval %d != %d <%s>", Aval, a, cName)
}
resbytes, resErr = txsim.GetState(cccid.Name, "b")
if resErr != nil {
return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", cName, resErr)
}
Bval, resErr = strconv.Atoi(string(resbytes))
if resErr != nil {
return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", cName, resErr)
}
if Bval != b {
return fmt.Errorf("Incorrect result. Bval %d != %d <%s>", Bval, b, cName)
}
// Success
fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval)
return nil
}
// Invoke chaincode_example02
func invokeExample02Transaction(ctxt context.Context, cccid *ccprovider.CCContext, cID *pb.ChaincodeID, chaincodeType pb.ChaincodeSpec_Type, args []string, destroyImage bool) error {
// the ledger is created with genesis block. Start block number 1 onwards
var nextBlockNumber uint64 = 1
f := "init"
argsDeploy := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec := &pb.ChaincodeSpec{Type: chaincodeType, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: argsDeploy}}
_, err := deploy(ctxt, cccid, spec, nextBlockNumber)
nextBlockNumber++
ccID := spec.ChaincodeId.Name
if err != nil {
return fmt.Errorf("Error deploying <%s>: %s", ccID, err)
}
time.Sleep(time.Second)
if destroyImage {
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
dir := container.DestroyImageReq{CCID: ccintf.CCID{ChaincodeSpec: spec, NetworkID: theChaincodeSupport.peerNetworkID, PeerID: theChaincodeSupport.peerID, ChainID: cccid.ChainID}, Force: true, NoPrune: true}
_, err = container.VMCProcess(ctxt, container.DOCKER, dir)
if err != nil {
err = fmt.Errorf("Error destroying image: %s", err)
return err
}
}
f = "invoke"
invokeArgs := append([]string{f}, args...)
spec = &pb.ChaincodeSpec{ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: util.ToChaincodeArgs(invokeArgs...)}}
_, uuid, _, err := invoke(ctxt, cccid.ChainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
return fmt.Errorf("Error invoking <%s>: %s", cccid.Name, err)
}
cccid.TxID = uuid
err = checkFinalState(cccid, 90, 210)
if err != nil {
return fmt.Errorf("Incorrect final state after transaction for <%s>: %s", ccID, err)
}
// Test for delete state
f = "delete"
delArgs := util.ToChaincodeArgs(f, "a")
spec = &pb.ChaincodeSpec{ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: delArgs}}
_, _, _, err = invoke(ctxt, cccid.ChainID, spec, nextBlockNumber, nil)
if err != nil {
return fmt.Errorf("Error deleting state in <%s>: %s", cccid.Name, err)
}
return nil
}
const (
chaincodeExample02GolangPath = "github.com/hyperledger/udo/examples/chaincode/go/chaincode_example02"
chaincodeExample04GolangPath = "github.com/hyperledger/udo/examples/chaincode/go/chaincode_example04"
chaincodeEventSenderGolangPath = "github.com/hyperledger/udo/examples/chaincode/go/eventsender"
chaincodeExample02JavaPath = "../../examples/chaincode/java/chaincode_example02"
chaincodeExample04JavaPath = "../../examples/chaincode/java/chaincode_example04"
chaincodeExample06JavaPath = "../../examples/chaincode/java/chaincode_example06"
chaincodeEventSenderJavaPath = "../../examples/chaincode/java/eventsender"
)
func runChaincodeInvokeChaincode(t *testing.T, channel1 string, channel2 string, tc tcicTc, cccid1 *ccprovider.CCContext, expectedA int, expectedB int, nextBlockNumber1, nextBlockNumber2 uint64) (uint64, uint64) {
var ctxt = context.Background()
// chaincode2: the chaincode that will call by chaincode1
chaincode2Name := generateChaincodeName(tc.chaincodeType)
chaincode2Version := "0"
chaincode2Type := tc.chaincodeType
chaincode2Path := tc.chaincodePath
chaincode2InitArgs := util.ToChaincodeArgs("init", "e", "0")
chaincode2Creator := []byte([]byte("Alice"))
// deploy second chaincode on channel1
_, cccid2, err := deployChaincode(ctxt, chaincode2Name, chaincode2Version, chaincode2Type, chaincode2Path, chaincode2InitArgs, chaincode2Creator, channel1, nextBlockNumber1)
if err != nil {
stopChaincode(ctxt, cccid1)
stopChaincode(ctxt, cccid2)
t.Fatalf("Error initializing chaincode %s(%s)", chaincode2Name, err)
return nextBlockNumber1, nextBlockNumber2
}
nextBlockNumber1++
time.Sleep(time.Second)
// Invoke second chaincode passing the first chaincode's name as first param,
// which will inturn invoke the first chaincode
chaincode2InvokeSpec := &pb.ChaincodeSpec{
Type: chaincode2Type,
ChaincodeId: &pb.ChaincodeID{
Name: chaincode2Name,
Version: chaincode2Version,
},
Input: &pb.ChaincodeInput{
Args: util.ToChaincodeArgs("invoke", cccid1.Name, "e", "1"),
},
}
// Invoke chaincode
_, txID, _, err := invoke(ctxt, channel1, chaincode2InvokeSpec, nextBlockNumber1, []byte("Alice"))
if err != nil {
stopChaincode(ctxt, cccid1)
stopChaincode(ctxt, cccid2)
t.Fatalf("Error invoking <%s>: %s", chaincode2Name, err)
return nextBlockNumber1, nextBlockNumber2
}
nextBlockNumber1++
// TODO this doesn't seeem to be used, remove?
cccid1.TxID = txID
// Check the state in the ledger
err = checkFinalState(cccid1, expectedA, expectedB)
if err != nil {
stopChaincode(ctxt, cccid1)
stopChaincode(ctxt, cccid2)
t.Fatalf("Incorrect final state after transaction for <%s>: %s", cccid1.Name, err)
return nextBlockNumber1, nextBlockNumber2
}
// Change the policies of the two channels in such a way:
// 1. Alice has reader access to both the channels.
// 2. Bob has access only to chainID2.
// Therefore the chaincode invocation should fail.
pm := peer.GetPolicyManager(channel1)
pm.(*mockpolicies.Manager).PolicyMap = map[string]policies.Policy{
policies.ChannelApplicationWriters: &CreatorPolicy{Creators: [][]byte{[]byte("Alice")}},
}
pm = peer.GetPolicyManager(channel2)
pm.(*mockpolicies.Manager).PolicyMap = map[string]policies.Policy{
policies.ChannelApplicationWriters: &CreatorPolicy{Creators: [][]byte{[]byte("Alice"), []byte("Bob")}},
}
// deploy chaincode2 on channel2
_, cccid3, err := deployChaincode(ctxt, chaincode2Name, chaincode2Version, chaincode2Type, chaincode2Path, chaincode2InitArgs, chaincode2Creator, channel2, nextBlockNumber2)
if err != nil {
stopChaincode(ctxt, cccid1)
stopChaincode(ctxt, cccid2)
stopChaincode(ctxt, cccid3)
t.Fatalf("Error initializing chaincode %s/%s: %s", chaincode2Name, channel2, err)
return nextBlockNumber1, nextBlockNumber2
}
nextBlockNumber2++
time.Sleep(time.Second)
// as Bob, invoke chaincode2 on channel2 so that it invokes chaincode1 on channel1
chaincode2InvokeSpec = &pb.ChaincodeSpec{
Type: chaincode2Type,
ChaincodeId: &pb.ChaincodeID{
Name: chaincode2Name,
Version: chaincode2Version,
},
Input: &pb.ChaincodeInput{
Args: util.ToChaincodeArgs("invoke", cccid1.Name, "e", "1", channel1),
},
}
_, _, _, err = invoke(ctxt, channel2, chaincode2InvokeSpec, nextBlockNumber2, []byte("Bob"))
if err == nil {
// Bob should not be able to call
stopChaincode(ctxt, cccid1)
stopChaincode(ctxt, cccid2)
stopChaincode(ctxt, cccid3)
nextBlockNumber2++
t.Fatalf("As Bob, invoking <%s/%s> via <%s/%s> should fail, but it succeeded.", cccid1.Name, cccid1.ChainID, chaincode2Name, channel2)
return nextBlockNumber1, nextBlockNumber2
}
// as Alice, invoke chaincode2 on channel2 so that it invokes chaincode1 on channel1
_, _, _, err = invoke(ctxt, channel2, chaincode2InvokeSpec, nextBlockNumber2, []byte("Alice"))
if err != nil {
// Alice should be able to call
stopChaincode(ctxt, cccid1)
stopChaincode(ctxt, cccid2)
stopChaincode(ctxt, cccid3)
t.Fatalf("As Alice, invoking <%s/%s> via <%s/%s> should should of succeeded, but it failed: %s", cccid1.Name, cccid1.ChainID, chaincode2Name, channel2, err)
return nextBlockNumber1, nextBlockNumber2
}
nextBlockNumber2++
stopChaincode(ctxt, cccid1)
stopChaincode(ctxt, cccid2)
stopChaincode(ctxt, cccid3)
return nextBlockNumber1, nextBlockNumber2
}
// Test deploy of a transaction
func TestExecuteDeployTransaction(t *testing.T) {
//chaincoe is deployed as part of many tests. No need for a separate one for this
t.Skip()
chainID := util.GetTestChainID()
executeDeployTransaction(t, chainID, "example01", "github.com/hyperledger/udo/examples/chaincode/go/chaincode_example01")
}
// Test deploy of a transaction with a GOPATH with multiple elements
func TestGopathExecuteDeployTransaction(t *testing.T) {
//this is no longer critical as chaincode is assembled in the client side (SDK)
t.Skip()
chainID := util.GetTestChainID()
// add a trailing slash to GOPATH
// and a couple of elements - it doesn't matter what they are
os.Setenv("GOPATH", os.Getenv("GOPATH")+string(os.PathSeparator)+string(os.PathListSeparator)+"/tmp/foo"+string(os.PathListSeparator)+"/tmp/bar")
executeDeployTransaction(t, chainID, "example01", "github.com/hyperledger/udo/examples/chaincode/go/chaincode_example01")
}
func TestExecuteInvokeTransaction(t *testing.T) {
testForSkip(t)
testCases := []struct {
chaincodeType pb.ChaincodeSpec_Type
chaincodePath string
}{
{pb.ChaincodeSpec_GOLANG, chaincodeExample02GolangPath},
{pb.ChaincodeSpec_JAVA, chaincodeExample02JavaPath},
}
for _, tc := range testCases {
t.Run(tc.chaincodeType.String(), func(t *testing.T) {
if tc.chaincodeType == pb.ChaincodeSpec_JAVA && runtime.GOARCH != "amd64" {
t.Skip("No Java chaincode support yet on non-x86_64.")
}
chainID := util.GetTestChainID()
lis, err := initPeer(chainID)
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis, chainID)
var ctxt = context.Background()
chaincodeName := generateChaincodeName(tc.chaincodeType)
chaincodeVersion := "1.0.0.0"
cccid := ccprovider.NewCCContext(chainID, chaincodeName, chaincodeVersion, "", false, nil, nil)
ccID := &pb.ChaincodeID{Name: chaincodeName, Path: tc.chaincodePath, Version: chaincodeVersion}
args := []string{"a", "b", "10"}
err = invokeExample02Transaction(ctxt, cccid, ccID, tc.chaincodeType, args, true)
if err != nil {
t.Fail()
t.Logf("Error invoking transaction: %s", err)
} else {
fmt.Print("Invoke test passed\n")
t.Log("Invoke test passed")
}
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeId: ccID}})
})
}
}
// Test the execution of an invalid transaction.
func TestExecuteInvokeInvalidTransaction(t *testing.T) {
testForSkip(t)
chainID := util.GetTestChainID()
lis, err := initPeer(chainID)
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis, chainID)
var ctxt = context.Background()
url := "github.com/hyperledger/udo/examples/chaincode/go/chaincode_example02"
ccID := &pb.ChaincodeID{Name: "example02", Path: url, Version: "0"}
cccid := ccprovider.NewCCContext(chainID, "example02", "0", "", false, nil, nil)
//FAIL, FAIL!
args := []string{"x", "-1"}
err = invokeExample02Transaction(ctxt, cccid, ccID, pb.ChaincodeSpec_GOLANG, args, false)
//this HAS to fail with expectedDeltaStringPrefix
if err != nil {
errStr := err.Error()
t.Logf("Got error %s\n", errStr)
t.Log("InvalidInvoke test passed")
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeId: ccID}})
return
}
t.Fail()
t.Logf("Error invoking transaction %s", err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeId: ccID}})
}
// testcase parameters for TestChaincodeInvokeChaincode
type tcicTc struct {
chaincodeType pb.ChaincodeSpec_Type
chaincodePath string
}
// Test the execution of a chaincode that invokes another chaincode.
func TestChaincodeInvokeChaincode(t *testing.T) {
testForSkip(t)
channel := util.GetTestChainID()
channel2 := channel + "2"
lis, err := initPeer(channel, channel2)
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis, channel, channel2)
testCases := []tcicTc{
{pb.ChaincodeSpec_GOLANG, chaincodeExample04GolangPath},
{pb.ChaincodeSpec_JAVA, chaincodeExample04JavaPath},
}
ctx := context.Background()
var nextBlockNumber1 uint64 = 1
var nextBlockNumber2 uint64 = 1
// deploy the chaincode that will be called by the second chaincode
chaincode1Name := generateChaincodeName(pb.ChaincodeSpec_GOLANG)
chaincode1Version := "0"
chaincode1Type := pb.ChaincodeSpec_GOLANG
chaincode1Path := chaincodeExample02GolangPath
initialA := 100
initialB := 200
chaincode1InitArgs := util.ToChaincodeArgs("init", "a", strconv.Itoa(initialA), "b", strconv.Itoa(initialB))
chaincode1Creator := []byte([]byte("Alice"))
// Deploy first chaincode
_, chaincodeCtx, err := deployChaincode(ctx, chaincode1Name, chaincode1Version, chaincode1Type, chaincode1Path, chaincode1InitArgs, chaincode1Creator, channel, nextBlockNumber1)
if err != nil {
stopChaincode(ctx, chaincodeCtx)
t.Fatalf("Error initializing chaincode %s: %s", chaincodeCtx.Name, err)
}
nextBlockNumber1++
time.Sleep(time.Second)
expectedA := initialA
expectedB := initialB
for _, tc := range testCases {
t.Run(tc.chaincodeType.String(), func(t *testing.T) {
if tc.chaincodeType == pb.ChaincodeSpec_JAVA && runtime.GOARCH != "amd64" {
t.Skip("No Java chaincode support yet on non-x86_64.")
}
expectedA = expectedA - 10
expectedB = expectedB + 10
nextBlockNumber1, nextBlockNumber2 = runChaincodeInvokeChaincode(t, channel, channel2, tc, chaincodeCtx, expectedA, expectedB, nextBlockNumber1, nextBlockNumber2)
})
}
closeListenerAndSleep(lis)
}
func stopChaincode(ctx context.Context, chaincodeCtx *ccprovider.CCContext) {
theChaincodeSupport.Stop(ctx, chaincodeCtx,
&pb.ChaincodeDeploymentSpec{
ChaincodeSpec: &pb.ChaincodeSpec{
ChaincodeId: &pb.ChaincodeID{
Name: chaincodeCtx.Name,
Version: chaincodeCtx.Version,
},
},
})
}
// Test the execution of a chaincode that invokes another chaincode with wrong parameters. Should receive error from
// from the called chaincode
func TestChaincodeInvokeChaincodeErrorCase(t *testing.T) {
testForSkip(t)
chainID := util.GetTestChainID()
lis, err := initPeer(chainID)
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis, chainID)
var ctxt = context.Background()
// Deploy first chaincode
url1 := "github.com/hyperledger/udo/examples/chaincode/go/chaincode_example02"
cID1 := &pb.ChaincodeID{Name: "example02", Path: url1, Version: "0"}
f := "init"
args := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID1, Input: &pb.ChaincodeInput{Args: args}}
sProp, prop := putils.MockSignedEndorserProposalOrPanic(util.GetTestChainID(), spec1, []byte([]byte("Alice")), nil)
cccid1 := ccprovider.NewCCContext(chainID, "example02", "0", "", false, sProp, prop)
var nextBlockNumber uint64 = 1
_, err = deploy(ctxt, cccid1, spec1, nextBlockNumber)
nextBlockNumber++
ccID1 := spec1.ChaincodeId.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", ccID1, err)
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
return
}
time.Sleep(time.Second)
// Deploy second chaincode
url2 := "github.com/hyperledger/udo/examples/chaincode/go/passthru"
cID2 := &pb.ChaincodeID{Name: "pthru", Path: url2, Version: "0"}
f = "init"
args = util.ToChaincodeArgs(f)
spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID2, Input: &pb.ChaincodeInput{Args: args}}
cccid2 := ccprovider.NewCCContext(chainID, "pthru", "0", "", false, sProp, prop)
_, err = deploy(ctxt, cccid2, spec2, nextBlockNumber)
nextBlockNumber++
ccID2 := spec2.ChaincodeId.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", ccID2, err)
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
time.Sleep(time.Second)
// Invoke second chaincode, which will inturn invoke the first chaincode but pass bad params
f = ccID1
args = util.ToChaincodeArgs(f, "invoke", "a")
spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID2, Input: &pb.ChaincodeInput{Args: args}}
// Invoke chaincode
_, _, _, err = invoke(ctxt, chainID, spec2, nextBlockNumber, []byte("Alice"))
if err == nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID2, err)
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
if strings.Index(err.Error(), "Error invoking chaincode: Incorrect number of arguments. Expecting 3") < 0 {
t.Fail()
t.Logf("Unexpected error %s", err)
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
}
// Test the invocation of a transaction.
func TestQueries(t *testing.T) {
// Allow queries test alone so that end to end test can be performed. It takes less than 5 seconds.
//testForSkip(t)
chainID := util.GetTestChainID()
lis, err := initPeer(chainID)
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis, chainID)
var ctxt = context.Background()
url := "github.com/hyperledger/udo/examples/chaincode/go/map"
cID := &pb.ChaincodeID{Name: "tmap", Path: url, Version: "0"}
f := "init"
args := util.ToChaincodeArgs(f)
spec := &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
cccid := ccprovider.NewCCContext(chainID, "tmap", "0", "", false, nil, nil)
var nextBlockNumber uint64 = 1
_, err = deploy(ctxt, cccid, spec, nextBlockNumber)
nextBlockNumber++
ccID := spec.ChaincodeId.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
var keys []interface{}
// Add 101 marbles for testing range queries and rich queries (for capable ledgers)
// The tests will test both range and rich queries and queries with query limits
for i := 1; i <= 101; i++ {
f = "put"
// 51 owned by tom, 50 by jerry
owner := "tom"
if i%2 == 0 {
owner = "jerry"
}
// one marble color is red, 100 are blue
color := "blue"
if i == 12 {
color = "red"
}
key := fmt.Sprintf("marble%03d", i)
argsString := fmt.Sprintf("{\"docType\":\"marble\",\"name\":\"%s\",\"color\":\"%s\",\"size\":35,\"owner\":\"%s\"}", key, color, owner)
args = util.ToChaincodeArgs(f, key, argsString)
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, _, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
}
//The following range query for "marble001" to "marble011" should return 10 marbles
f = "keys"
args = util.ToChaincodeArgs(f, "marble001", "marble011")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, retval, err := invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
err = json.Unmarshal(retval, &keys)
if len(keys) != 10 {
t.Fail()
t.Logf("Error detected with the range query, should have returned 10 but returned %v", len(keys))
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//FAB-1163- The following range query should timeout and produce an error
//the peer should handle this gracefully and not die
//save the original timeout and set a new timeout of 1 sec
origTimeout := theChaincodeSupport.executetimeout
theChaincodeSupport.executetimeout = time.Duration(1) * time.Second
//chaincode to sleep for 2 secs with timeout 1
args = util.ToChaincodeArgs(f, "marble001", "marble002", "2000")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, retval, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
if err == nil {
t.Fail()
t.Logf("expected timeout error but succeeded")
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//restore timeout
theChaincodeSupport.executetimeout = origTimeout
// querying for all marbles will return 101 marbles
// this query should return exactly 101 results (one call to Next())
//The following range query for "marble001" to "marble102" should return 101 marbles
f = "keys"
args = util.ToChaincodeArgs(f, "marble001", "marble102")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, retval, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//unmarshal the results
err = json.Unmarshal(retval, &keys)
//check to see if there are 101 values
//default query limit of 10000 is used, this query is effectively unlimited
if len(keys) != 101 {
t.Fail()
t.Logf("Error detected with the range query, should have returned 101 but returned %v", len(keys))
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
// querying for all simple key. This query should return exactly 101 simple keys (one
// call to Next()) no composite keys.
//The following open ended range query for "" to "" should return 101 marbles
f = "keys"
args = util.ToChaincodeArgs(f, "", "")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, retval, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//unmarshal the results
err = json.Unmarshal(retval, &keys)
//check to see if there are 101 values
//default query limit of 10000 is used, this query is effectively unlimited
if len(keys) != 101 {
t.Fail()
t.Logf("Error detected with the range query, should have returned 101 but returned %v", len(keys))
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
// ExecuteQuery supported only for CouchDB and
// query limits apply for CouchDB range and rich queries only
if ledgerconfig.IsCouchDBEnabled() == true {
// corner cases for shim batching. currnt shim batch size is 100
// this query should return exactly 100 results (no call to Next())
f = "query"
args = util.ToChaincodeArgs(f, "{\"selector\":{\"color\":\"blue\"}}")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, _, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//unmarshal the results
err = json.Unmarshal(retval, &keys)
//check to see if there are 100 values
if len(keys) != 100 {
t.Fail()
t.Logf("Error detected with the rich query, should have returned 100 but returned %v %s", len(keys), keys)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//Reset the query limit to 5
viper.Set("ledger.state.queryLimit", 5)
//The following range query for "marble01" to "marble11" should return 5 marbles due to the queryLimit
f = "keys"
args = util.ToChaincodeArgs(f, "marble001", "marble011")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, retval, err := invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//unmarshal the results
err = json.Unmarshal(retval, &keys)
//check to see if there are 5 values
if len(keys) != 5 {
t.Fail()
t.Logf("Error detected with the range query, should have returned 5 but returned %v", len(keys))
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//Reset the query limit to 10000
viper.Set("ledger.state.queryLimit", 10000)
//The following rich query for should return 50 marbles
f = "query"
args = util.ToChaincodeArgs(f, "{\"selector\":{\"owner\":\"jerry\"}}")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, retval, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//unmarshal the results
err = json.Unmarshal(retval, &keys)
//check to see if there are 50 values
//default query limit of 10000 is used, this query is effectively unlimited
if len(keys) != 50 {
t.Fail()
t.Logf("Error detected with the rich query, should have returned 50 but returned %v", len(keys))
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//Reset the query limit to 5
viper.Set("ledger.state.queryLimit", 5)
//The following rich query should return 5 marbles due to the queryLimit
f = "query"
args = util.ToChaincodeArgs(f, "{\"selector\":{\"owner\":\"jerry\"}}")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, retval, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//unmarshal the results
err = json.Unmarshal(retval, &keys)
//check to see if there are 5 values
if len(keys) != 5 {
t.Fail()
t.Logf("Error detected with the rich query, should have returned 5 but returned %v", len(keys))
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
}
// modifications for history query
f = "put"
args = util.ToChaincodeArgs(f, "marble012", "{\"docType\":\"marble\",\"name\":\"marble012\",\"color\":\"red\",\"size\":30,\"owner\":\"jerry\"}")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, _, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
f = "put"
args = util.ToChaincodeArgs(f, "marble012", "{\"docType\":\"marble\",\"name\":\"marble012\",\"color\":\"red\",\"size\":30,\"owner\":\"jerry\"}")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, _, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
//The following history query for "marble12" should return 3 records
f = "history"
args = util.ToChaincodeArgs(f, "marble012")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
_, _, retval, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
var history []interface{}
err = json.Unmarshal(retval, &history)
if len(history) != 3 {
t.Fail()
t.Logf("Error detected with the history query, should have returned 3 but returned %v", len(history))
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
}
func TestGetEvent(t *testing.T) {
testForSkip(t)
testCases := []struct {
chaincodeType pb.ChaincodeSpec_Type
chaincodePath string
}{
{pb.ChaincodeSpec_GOLANG, chaincodeEventSenderGolangPath},
{pb.ChaincodeSpec_JAVA, chaincodeEventSenderJavaPath},
}
chainID := util.GetTestChainID()
var nextBlockNumber uint64
lis, err := initPeer(chainID)
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
nextBlockNumber++
defer finitPeer(lis, chainID)
for _, tc := range testCases {
t.Run(tc.chaincodeType.String(), func(t *testing.T) {
if tc.chaincodeType == pb.ChaincodeSpec_JAVA && runtime.GOARCH != "amd64" {
t.Skip("No Java chaincode support yet on non-x86_64.")
}
var ctxt = context.Background()
cID := &pb.ChaincodeID{Name: generateChaincodeName(tc.chaincodeType), Path: tc.chaincodePath, Version: "0"}
f := "init"
spec := &pb.ChaincodeSpec{Type: tc.chaincodeType, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: util.ToChaincodeArgs(f)}}
cccid := ccprovider.NewCCContext(chainID, cID.Name, cID.Version, "", false, nil, nil)
_, err = deploy(ctxt, cccid, spec, nextBlockNumber)
nextBlockNumber++
ccID := spec.ChaincodeId.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
time.Sleep(time.Second)
args := util.ToChaincodeArgs("invoke", "i", "am", "satoshi")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
var ccevt *pb.ChaincodeEvent
ccevt, _, _, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
nextBlockNumber++
if err != nil {
t.Logf("Error invoking chaincode %s(%s)", ccID, err)
t.Fail()
}
if ccevt == nil {
t.Logf("Error ccevt is nil %s(%s)", ccID, err)
t.Fail()
}
if ccevt.ChaincodeId != ccID {
t.Logf("Error ccevt id(%s) != cid(%s)", ccevt.ChaincodeId, ccID)
t.Fail()
}
if strings.Index(string(ccevt.Payload), "i,am,satoshi") < 0 {
t.Logf("Error expected event not found (%s)", string(ccevt.Payload))
t.Fail()
}
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
})
}
}
// Test the execution of a chaincode that queries another chaincode
// example02 implements "query" as a function in Invoke. example05 calls example02
func TestChaincodeQueryChaincodeUsingInvoke(t *testing.T) {
testForSkip(t)
//this is essentially same as the ChaincodeInvokeChaincode now that
//we don't distinguish between Invoke and Query (there's no separate "Query")
t.Skip()
chainID := util.GetTestChainID()
var peerLis net.Listener
var err error
if peerLis, err = initPeer(chainID); err != nil {
t.Fail()
t.Logf("Error registering user %s", err)
return
}
defer finitPeer(peerLis, chainID)
var ctxt = context.Background()
// Deploy first chaincode
url1 := "github.com/hyperledger/udo/examples/chaincode/go/chaincode_example02"
cID1 := &pb.ChaincodeID{Name: "example02", Path: url1, Version: "0"}
f := "init"
args := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID1, Input: &pb.ChaincodeInput{Args: args}}
sProp, prop := putils.MockSignedEndorserProposalOrPanic(util.GetTestChainID(), spec1, []byte([]byte("Alice")), nil)
cccid1 := ccprovider.NewCCContext(chainID, "example02", "0", "", false, sProp, prop)
var nextBlockNumber uint64
_, err = deploy(ctxt, cccid1, spec1, nextBlockNumber)
nextBlockNumber++
ccID1 := spec1.ChaincodeId.Name
if err != nil {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", ccID1, err)
return
}
time.Sleep(time.Second)
// Deploy second chaincode
url2 := "github.com/hyperledger/udo/examples/chaincode/go/chaincode_example05"
cID2 := &pb.ChaincodeID{Name: "example05", Path: url2, Version: "0"}
f = "init"
args = util.ToChaincodeArgs(f, "sum", "0")
spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID2, Input: &pb.ChaincodeInput{Args: args}}
cccid2 := ccprovider.NewCCContext(chainID, "example05", "0", "", false, sProp, prop)
_, err = deploy(ctxt, cccid2, spec2, nextBlockNumber)
nextBlockNumber++
ccID2 := spec2.ChaincodeId.Name
if err != nil {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", ccID2, err)
return
}
time.Sleep(time.Second)
// Invoke second chaincode, which will inturn query the first chaincode
f = "invoke"
args = util.ToChaincodeArgs(f, ccID1, "sum")
spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID2, Input: &pb.ChaincodeInput{Args: args}}
// Invoke chaincode
var retVal []byte
_, _, retVal, err = invoke(ctxt, chainID, spec2, nextBlockNumber, []byte("Alice"))
nextBlockNumber++
if err != nil {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID2, err)
return
}
// Check the return value
result, err := strconv.Atoi(string(retVal))
if err != nil || result != 300 {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
t.Fail()
t.Logf("Incorrect final state after transaction for <%s>: %s", ccID1, err)
return
}
// Query second chaincode, which will inturn query the first chaincode
f = "query"
args = util.ToChaincodeArgs(f, ccID1, "sum")
spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID2, Input: &pb.ChaincodeInput{Args: args}}
// Invoke chaincode
_, _, retVal, err = invoke(ctxt, chainID, spec2, nextBlockNumber, []byte("Alice"))
if err != nil {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
t.Fail()
t.Logf("Error querying <%s>: %s", ccID2, err)
return
}
// Check the return value
result, err = strconv.Atoi(string(retVal))
if err != nil || result != 300 {
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
t.Fail()
t.Logf("Incorrect final value after query for <%s>: %s", ccID1, err)
return
}
theChaincodeSupport.Stop(ctxt, cccid1, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
theChaincodeSupport.Stop(ctxt, cccid2, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
}
// test that invoking a security-sensitive system chaincode fails
func TestChaincodeInvokesForbiddenSystemChaincode(t *testing.T) {
testForSkip(t)
chainID := util.GetTestChainID()
lis, err := initPeer(chainID)
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis, chainID)
var ctxt = context.Background()
var nextBlockNumber uint64 = 1
// Deploy second chaincode
url := "github.com/hyperledger/udo/examples/chaincode/go/passthru"
cID := &pb.ChaincodeID{Name: "pthru", Path: url, Version: "0"}
f := "init"
args := util.ToChaincodeArgs(f)
spec := &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
cccid := ccprovider.NewCCContext(chainID, "pthru", "0", "", false, nil, nil)
_, err = deploy(ctxt, cccid, spec, nextBlockNumber)
nextBlockNumber++
ccID := spec.ChaincodeId.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
time.Sleep(time.Second)
// send an invoke to pass thru to invoke "escc" system chaincode
// this should fail
args = util.ToChaincodeArgs("escc/"+chainID, "getid", chainID, "pthru")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
// Invoke chaincode
_, _, _, err = invoke(ctxt, chainID, spec, nextBlockNumber, nil)
if err == nil {
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
t.Logf("invoking <%s> should have failed", ccID)
t.Fail()
return
}
}
// Test the execution of a chaincode that invokes system chaincode
// uses the "pthru" chaincode to query "lscc" for the "pthru" chaincode
func TestChaincodeInvokesSystemChaincode(t *testing.T) {
testForSkip(t)
chainID := util.GetTestChainID()
lis, err := initPeer(chainID)
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis, chainID)
var ctxt = context.Background()
var nextBlockNumber uint64 = 1
// Deploy second chaincode
url := "github.com/hyperledger/udo/examples/chaincode/go/passthru"
cID := &pb.ChaincodeID{Name: "pthru", Path: url, Version: "0"}
f := "init"
args := util.ToChaincodeArgs(f)
spec := &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
cccid := ccprovider.NewCCContext(chainID, "pthru", "0", "", false, nil, nil)
_, err = deploy(ctxt, cccid, spec, nextBlockNumber)
nextBlockNumber++
ccID := spec.ChaincodeId.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
time.Sleep(time.Second)
//send an invoke to pass thru to query "lscc" system chaincode on chainID to get
//information about "pthru"
args = util.ToChaincodeArgs("lscc/"+chainID, "getid", chainID, "pthru")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeId: cID, Input: &pb.ChaincodeInput{Args: args}}
// Invoke chaincode
_, _, retval, err := invoke(ctxt, chainID, spec, nextBlockNumber, nil)
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", ccID, err)
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
if string(retval) != "pthru" {
t.Fail()
t.Logf("Expected to get back \"pthru\" from lscc but got back %s", string(retval))
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
theChaincodeSupport.Stop(ctxt, cccid, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
}
func TestChaincodeInitializeInitError(t *testing.T) {
testForSkip(t)
testCases := []struct {
name string
chaincodeType pb.ChaincodeSpec_Type
chaincodePath string
args []string
}{
{"NotSuccessResponse", pb.ChaincodeSpec_GOLANG, chaincodeExample02GolangPath, []string{"init", "not", "enough", "args"}},
{"NotSuccessResponse", pb.ChaincodeSpec_JAVA, chaincodeExample02JavaPath, []string{"init", "not", "enough", "args"}},
{"RuntimeException", pb.ChaincodeSpec_JAVA, chaincodeExample06JavaPath, []string{"runtimeException"}},
}
channel := util.GetTestChainID()
for _, tc := range testCases {
t.Run(tc.name+"_"+tc.chaincodeType.String(), func(t *testing.T) {
if tc.chaincodeType == pb.ChaincodeSpec_JAVA && runtime.GOARCH != "amd64" {
t.Skip("No Java chaincode support yet on non-x86_64.")
}
// initialize peer
if listener, err := initPeer(channel); err != nil {
t.Errorf("Error creating peer: %s", err)
} else {
defer finitPeer(listener, channel)
}
var nextBlockNumber uint64
// the chaincode to install and instantiate
chaincodeName := generateChaincodeName(tc.chaincodeType)
chaincodePath := tc.chaincodePath
chaincodeVersion := "1.0.0.0"
chaincodeType := tc.chaincodeType
chaincodeDeployArgs := util.ArrayToChaincodeArgs(tc.args)
// attempt to deploy chaincode
_, chaincodeCtx, err := deployChaincode(context.Background(), chaincodeName, chaincodeVersion, chaincodeType, chaincodePath, chaincodeDeployArgs, nil, channel, nextBlockNumber)
// deploy should of failed
if err == nil {
stopChaincode(context.Background(), chaincodeCtx)
t.Fatal("Deployment should have failed.")
}
t.Log(err)
})
}
}
func TestMain(m *testing.M) {
var err error
msptesttools.LoadMSPSetupForTesting()
signer, err = mspmgmt.GetLocalMSP().GetDefaultSigningIdentity()
if err != nil {
fmt.Print("Could not initialize msp/signer")
os.Exit(-1)
return
}
setupTestConfig()
os.Exit(m.Run())
}
func setupTestConfig() {
flag.Parse()
// Now set the configuration file
viper.SetEnvPrefix("CORE")
viper.AutomaticEnv()
replacer := strings.NewReplacer(".", "_")
viper.SetEnvKeyReplacer(replacer)
viper.SetConfigName("chaincodetest") // name of config file (without extension)
viper.AddConfigPath("./") // path to look for the config file in
err := viper.ReadInConfig() // Find and read the config file
if err != nil { // Handle errors reading the config file
panic(fmt.Errorf("Fatal error config file: %s \n", err))
}
testutil.SetupTestLogging()
// Set the number of maxprocs
var numProcsDesired = viper.GetInt("peer.gomaxprocs")
chaincodeLogger.Debugf("setting Number of procs to %d, was %d\n", numProcsDesired, runtime.GOMAXPROCS(numProcsDesired))
// Init the BCCSP
err = factory.InitFactories(nil)
if err != nil {
panic(fmt.Errorf("Could not initialize BCCSP Factories [%s]", err))
}
}
func deployChaincode(ctx context.Context, name string, version string, chaincodeType pb.ChaincodeSpec_Type, path string, args [][]byte, creator []byte, channel string, nextBlockNumber uint64) ([]byte, *ccprovider.CCContext, error) {
chaincodeSpec := &pb.ChaincodeSpec{
ChaincodeId: &pb.ChaincodeID{
Name: name,
Version: version,
Path: path,
},
Type: chaincodeType,
Input: &pb.ChaincodeInput{
Args: args,
},
}
signedProposal, proposal := putils.MockSignedEndorserProposal2OrPanic(channel, chaincodeSpec, signer)
chaincodeCtx := ccprovider.NewCCContext(channel, name, version, "", false, signedProposal, proposal)
result, err := deploy(ctx, chaincodeCtx, chaincodeSpec, nextBlockNumber)
if err != nil {
return nil, chaincodeCtx, fmt.Errorf("Error deploying <%s:%s>: %s", name, version, err)
}
return result, chaincodeCtx, nil
}
var signer msp.SigningIdentity
var rng *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))
func generateChaincodeName(chaincodeType pb.ChaincodeSpec_Type) string {
prefix := "cc_"
switch chaincodeType {
case pb.ChaincodeSpec_GOLANG:
prefix = "cc_go_"
case pb.ChaincodeSpec_JAVA:
prefix = "cc_java_"
case pb.ChaincodeSpec_NODE:
prefix = "cc_js_"
}
return fmt.Sprintf("%s%06d", prefix, rng.Intn(999999))
}
type CreatorPolicy struct {
Creators [][]byte
}
// Evaluate takes a set of SignedData and evaluates whether this set of signatures satisfies the policy
func (c *CreatorPolicy) Evaluate(signatureSet []*common.SignedData) error {
for _, value := range c.Creators {
if bytes.Compare(signatureSet[0].Identity, value) == 0 {
return nil
}
}
return fmt.Errorf("Creator not recognized [%s]", string(signatureSet[0].Identity))
}
type mockPolicyCheckerFactory struct{}
func (f *mockPolicyCheckerFactory) NewPolicyChecker() policy.PolicyChecker {
return policy.NewPolicyChecker(
peer.NewChannelPolicyManagerGetter(),
&mocks.MockIdentityDeserializer{[]byte("Admin"), []byte("msg1")},
&mocks.MockMSPPrincipalGetter{Principal: []byte("Admin")},
)
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
metricbeat/tests/system/test_couchbase.py
|
import os
import metricbeat
import unittest
from parameterized import parameterized
class Test(metricbeat.BaseTest):
COMPOSE_SERVICES = ['couchbase']
FIELDS = ['couchbase']
@parameterized.expand([
("bucket"),
("cluster"),
("node"),
])
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_couchbase(self, metricset):
"""
couchbase metricsets tests
"""
self.check_metricset("couchbase", metricset, self.get_hosts(), self.FIELDS)
def get_hosts(self):
return ["http://Administrator:password@" +
os.getenv('COUCHBASE_HOST', 'localhost') + ':' +
os.getenv('COUCHBASE_PORT', '8091')]
|
[] |
[] |
[
"COUCHBASE_HOST",
"COUCHBASE_PORT"
] |
[]
|
["COUCHBASE_HOST", "COUCHBASE_PORT"]
|
python
| 2 | 0 | |
elasticsearch/create_index_with_embedding.py
|
import logging
import os
import argparse
import json
import yaml
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from sentence_transformers import SentenceTransformer
import numpy as np
import base64
from es_vec_classifier.use_encoder import USEEncoderAPI, USEEncoder
np.random.seed(123456789) # makes random sampling from training data deterministic between runs
logging.basicConfig()
logger = logging.getLogger("es")
logger.setLevel(logging.INFO)
def create_new(config, embedder):
_create_index_request_body = {
"mappings": {
"properties": {
"question_idx": {
"type": "integer"
},
"question_text": {
"type": "text",
},
"agent": {
"type": "keyword",
},
"embedding_vector": {
"type": "binary",
"doc_values": True
}
}
}
}
es = Elasticsearch([config["es_url"]], scheme="http")
es.cluster.health(wait_for_status='yellow', request_timeout=60*20)
es.indices.delete(index=config["index_name"], ignore=[400, 404], request_timeout=5*60)
es.indices.create(index=config["index_name"], body=_create_index_request_body)
base_path = config.get("base_path", "")
idx = 0
all_data = []
truncate_idx = 0
for (agent, path) in config["agents"].items():
logger.info("Reading questions for agent {} from {}".format(agent, path))
truncate = config.get("truncate", -1)
if isinstance(truncate, list):
truncate = truncate[truncate_idx]
truncate_idx = (truncate_idx+1)%len(config.get("truncate", -1))
data, idx = _read_file(path, base_path, idx, agent, config["index_name"], truncate)
data = _embed_data(data, embedder, config)
logger.info("{} questions".format(len(data)))
all_data.extend(data)
success, failed = bulk(es, all_data, request_timeout=5*60, max_retries=5)
logger.info('Creating index "{}"... (Success={}, Failed={})'.format(config["index_name"], success, failed))
def add(config, embedder):
es = Elasticsearch([config["es_url"]], scheme="http")
base_path = config.get("base_path", "")
res = es.count(index=config["index_name"])
idx = res["count"]
all_data = []
for (agent, path) in config["agents"].items():
logger.info("Reading questions for agent {} from {}".format(agent, path))
data, idx = _read_file(path, base_path, idx, agent, config["index_name"], config.get("truncate", -1))
data = _embed_data(data, embedder, config)
logger.info("{} questions".format(len(data)))
all_data.extend(data)
success, failed = bulk(es, all_data, request_timeout=5*60, max_retries=5)
logger.info('Adding to index "{}"... (Success={}, Failed={})'.format(config["index_name"], success, failed))
def _embed_data(data, embedder, config):
arr = _embed_and_encode([d["_source"]["question_text"] for d in data], embedder, config)
for d, vec in zip(data, arr):
d["_source"]["embedding_vector"] = vec
return data
def _embed_and_encode(sentences, embedder, config):
def encode(vec):
dfloat32 = np.dtype(">f4") # big endian 32bit float
base64_str = base64.b64encode(np.array(vec).astype(dfloat32)).decode("utf-8")
return base64_str
logger.info("Embedding chunk")
arr = embedder.encode(sentences, batch_size=config.get("batchsize", 32))
arr = [encode(vec) for vec in arr]
return arr
def _read_file(paths, base_paths, idx, agent, index, truncate=-1):
if not isinstance(paths, list):
paths = [paths]
if not isinstance(base_paths, list):
base_paths = [base_paths]
data = []
sentences = []
for path in paths:
for base_path in base_paths:
entire_path = os.path.join(base_path, path)
if os.path.isfile(entire_path):
exists = True
with open(entire_path, "r", encoding="utf-8") as f:
for l in f.readlines():
sentences.append(l.strip())
if not exists:
raise FileNotFoundError("{} was not found in any base path".format(path))
chosen_idxs = np.random.choice(len(sentences), truncate) if truncate > 0 else range(len(sentences))
for i in chosen_idxs:
data.append({
"_index": index,
"_source": {
'question_idx': idx,
'question_text': sentences[i],
'agent': agent
}
})
idx += 1
return data, idx
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("config", default="config.yaml")
parser.add_argument("--add", action='store_true')
parser.add_argument("--new", action='store_true')
args = parser.parse_args()
with open(args.config, encoding="utf-8") as f:
config = yaml.load(f)
if "cache_dir" in config:
os.environ['TORCH_HOME'] = config["cache_dir"]
if config["sentence_transformer_model"] == "use-qa":
embedder = USEEncoderAPI()
elif "https" in config["sentence_transformer_model"]:
embedder = USEEncoder(config["sentence_transformer_model"])
else:
embedder = SentenceTransformer(config["sentence_transformer_model"])
if args.new:
create_new(config, embedder)
elif args.add:
add(config, embedder)
else:
logger.info("Add either --new or --add")
|
[] |
[] |
[
"TORCH_HOME"
] |
[]
|
["TORCH_HOME"]
|
python
| 1 | 0 | |
binaryTree/redBlackTree.py
|
"""redBlackTree.py
This module implements Left-Leaning Red-Black Tree (LLRB)
insertion & deletion takes only O(h) time
height of the RBT(N nodes) is not bigger than 2logN
- if there are no RED nodes in the tree, h = logN
- if it has the maximum RED nodes, h <= 2logN
"""
import bst_utils
"""constant"""
RED = True
BLACK = False
class Node():
def __init__(self, key, value, color):
self.key = key
self.value = value
self.color = color
self.left = None
self.right = None
def __repr__(self):
color = 'RED'
if self.color == BLACK:
color = 'BLACK'
return 'Node({},{},{})'.format(self.key, repr(self.value), color)
class RedBlackTree():
def __init__(self):
self.root = None
def is_empty(self):
return self.root is None
def is_red(self, node):
if node is None:
return False
return node.color == RED
def search(self, key):
return self._search(self.root, key)
def _search(self, node, key):
searched, parent = self._search_node(node, key)
if searched is None:
return None
else:
return searched.value
def _search_node(self, node, key, parent=None):
if node is None:
return None, None
if key < node.key:
return self._search_node(node.left, key, parent=node)
elif key > node.key:
return self._search_node(node.right, key, parent=node)
else:
return node, parent
def search_less_near(self, key):
return self._search_less_near(self.root, key)
def _search_less_near(self, node, key, largest=None):
"""
search for the node that has smaller and nearest key (not equal)
"""
if node is None:
return largest
elif key < node.key:
return self._search_less_near(node.left, key, largest=largest)
elif key > node.key:
if largest is None or largest.key < node.key:
largest = node
return self._search_less_near(node.right, key, largest=largest)
else:
return self._search_less_near(node.left, key, largest=largest)
def search_greater_near(self, key):
return self._search_greater_near(self.root, key)
def _search_greater_near(self, node, key, smallest=None):
"""
search for the node that has greater and nearest key (not equal)
"""
if node is None:
return smallest
elif key < node.key:
if smallest is None or smallest.key > node.key:
smallest = node
return self._search_greater_near(node.left, key, smallest=smallest)
elif key > node.key:
return self._search_greater_near(node.right, key, smallest=smallest)
else:
return self._search_greater_near(node.right, key, smallest=smallest)
def rotate_left(self, node):
"""
move the right red link of a node to the left
initial status: node is BLACK, node.right is RED
this method used to rotate these two nodes counter-clockwise, and make the left child RED (right->BLACK)
-> node.right move to node's original position, and the node will be the left child of node.right
"""
t = node.right
node.right = t.left
t.left = node
t.color = node.color
node.color = RED
return t
def rotate_right(self, node):
"""
move the left red link of a node to the right
opposite case of rotate_left(node)
"""
t = node.left
node.left = t.right
t.right = node
t.color = node.color
node.color = RED
return t
def flip_colors(self, node):
"""
if two links' color are the same, change both to the other color,
and make the parent's color reversed also
"""
node.color = not node.color
node.left.color = not node.left.color
node.right.color = not node.right.color
def insert(self, key, value):
self.root = self._insert(self.root, key, value)
self.root.color = BLACK
def _insert(self, node, key, value):
"""
insert a new node(RED)
case 0: right child is RED, left child is BLACK -> rotate_left
case 1: left child is RED, left child's child is also RED -> rotate_right
case 2: both two children are RED -> flip_colors
"""
if node is None:
return Node(key, value, RED)
if key < node.key:
node.left = self._insert(node.left, key, value)
elif key > node.key:
node.right = self._insert(node.right, key, value)
else:
node.value = value
if (not self.is_red(node.left)) and self.is_red(node.right):
node = self.rotate_left(node)
if self.is_red(node.left) and self.is_red(node.left.left):
node = self.rotate_right(node)
if self.is_red(node.left) and self.is_red(node.right):
self.flip_colors(node)
return node
def move_red_left(self, node):
"""
make the red color node on the left side for deleting a node
case 0: node.left & node.left.left are all BLACK, node.right.left is also BLACK -> flip_colors
case 1: node.left & node.left.left are all BLACK, and node.right.left is RED -> move RED to the left
"""
self.flip_colors(node)
if self.is_red(node.right.left):
node.right = self.rotate_right(node.right)
node = self.rotate_left(node)
self.flip_colors(node)
return node
def move_red_right(self, node):
"""
make the red color node on the right side for deleting a node
"""
self.flip_colors(node)
if self.is_red(node.left.left):
node = self.rotate_right(node)
self.flip_colors(node)
return node
def minimum_node(self, node):
"""get the minimum key node from the subtree"""
if node.left is None:
return node
else:
return self.minimum_node(node.left)
def delete_min(self):
"""delete a node which has the min key"""
self.root = self._delete_min(self.root)
self.root.color = BLACK
def _delete_min(self, node):
if node.left is None:
return None
if (not self.is_red(node.left)) and (not self.is_red(node.left.left)):
node = self.move_red_left(node)
node.left = self._delete_min(node.left)
return self.fix_up(node)
def fix_up(self, node):
"""fix the structure of RBT after deleting a node"""
if self.is_red(node.right):
node = self.rotate_left(node)
if self.is_red(node.left) and self.is_red(node.left.left):
node = self.rotate_right(node)
if self.is_red(node.left) and self.is_red(node.right):
self.flip_colors(node)
return node
def delete(self, key):
self.root = self._delete(self.root, key)
def _delete(self, node, key):
if key < node.key:
if node.left is None:
raise KeyError(key)
if not (self.is_red(node.left)) and (not self.is_red(node.left.left)):
node = self.move_red_left(node)
node.left = self._delete(node.left, key)
return node
else:
if self.is_red(node.left):
node = self.rotate_right(node)
if key == node.key and node.right is None:
return node.left
elif node.right is None:
raise KeyError(key)
elif (not self.is_red(node.right)) and (not self.is_red(node.right.left)):
node = self.move_red_right(node)
if key == node.key: # and node.right is not None:
# remove root
min_node = self.minimum_node(node.right)
node.key = min_node.key
node.value = min_node.value
node.right = self._delete_min(node.right)
else:
node.right = self._delete(node.right, key)
return self.fix_up(node)
if __name__ == '__main__':
"""test"""
rbt = RedBlackTree()
rbt.insert(12, 'A')
rbt.insert(9, 'B')
rbt.insert(15, 'C')
rbt.insert(16, 'D')
rbt.insert(18, 'E')
print(rbt._search_less_near(rbt.root, 12))
print('after insertion')
#bst_utils.pre_order(rbt.root)
bst_utils.print_tree(rbt.root)
print('delete min')
rbt.delete_min()
print('after deletion')
#bst_utils.pre_order(rbt.root)
bst_utils.print_tree(rbt.root)
print('delete 15')
rbt._delete(rbt.root, 15)
print('after deletion')
#bst_utils.pre_order(rbt.root)
bst_utils.print_tree(rbt.root)
print('abc', rbt.root.left.right)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
src/grouping.py
|
import re
from pathlib import Path
from src import model
def groupImages(images: list[model.ImageInfo]) -> list[list[model.ImageInfo]]:
"""Aggregate images into groups of the same file name by index range"""
previousPath = None
previousIndex = None
results: list[list[model.ImageInfo]] = []
currentGroup: list[model.ImageInfo] = []
for image in images:
# Grab the previous path and index #
p = Path(image.filePath)
path = p.parents[0]
index = int(re.findall(r"\d+", p.stem)[0])
# Is this a new group?
isNewPath = path != previousPath
isUnExpectedIndex = previousIndex is None or index != previousIndex + 1
# print(f"PATH: {p} - New? {isNewPath} Index: {isUnExpectedIndex}")
if isNewPath or isUnExpectedIndex:
# Create a new group and add it to our collection of groups
currentGroup = []
results.append(currentGroup)
previousPath = path
previousIndex = index
currentGroup.append(image)
return results
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
config/settings/common.py
|
# -*- coding: utf-8 -*-
"""
Django settings for MAD Web project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import json
import os
import datetime
import environ
from django.core.exceptions import ImproperlyConfigured
ROOT_DIR = environ.Path(__file__) - 3 # (texaslan/config/settings/common.py - 3 = texaslan/)
APPS_DIR = ROOT_DIR.path('texaslan')
# JSON-based config file
# ------------------------------------------------------------------------------
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'config.json')) as file:
config = json.loads(file.read())
def get_config(key, config=config):
try:
return config[key]
except KeyError:
error_message = "Set the {0} config variable".format(key)
raise ImproperlyConfigured(error_message)
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'oauth2_provider', # OAuth Provider
'django_slack_oauth', # Slack
'rest_framework', # Django REST framework
)
# Apps specific for this project go here.
LOCAL_APPS = (
'texaslan',
# custom users app
'texaslan.users.apps.UsersConfig',
# Your stuff: custom apps go here
'texaslan.events.apps.EventsConfig',
'texaslan.go.apps.GoConfig',
'texaslan.notify.apps.NotifyConfig',
'texaslan.comments.apps.CommentsConfig',
'texaslan.applications.apps.ApplicationsConfig',
'texaslan.voting.apps.VotingConfig',
'texaslan.site_settings.apps.SiteSettingsConfig'
)
CLEANUP_APP = (
# has to go last in order to work
'django_cleanup',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS + CLEANUP_APP
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oauth2_provider.middleware.OAuth2TokenMiddleware',
'texaslan.api.middleware.JWTAuthMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'texaslan.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = get_config('DEBUG')
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = ()
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_WEBMASTER = '[email protected]'
DEFAULT_FROM_EMAIL = 'LAN <[email protected]>'
EMAIL_SUBJECT_PREFIX = '[LAN] '
SERVER_EMAIL = DEFAULT_FROM_EMAIL
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Drew Romanyk""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': get_config("DATABASE")
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
'texaslan.photos.context_processor.photos_url'
],
},
},
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'oauth2_provider.backends.OAuth2Backend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_SIGNUP_FORM_CLASS = 'texaslan.users.forms.UserSignupForm'
ACCOUNT_ADAPTER = 'texaslan.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'texaslan.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# django-compressor
# ------------------------------------------------------------------------------
# Sendgrid
# ------------------------------------------------------------------------------
SENDGRID_API_KEY = get_config("SENDGRID_API_KEY")
SENDGRID_MAILING_LIST_ID = get_config("SENDGRID_MAILING_LIST_ID")
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# Slack
SLACK_CLIENT_ID = get_config("SLACK_CLIENT_ID")
SLACK_CLIENT_SECRET = get_config("SLACK_CLIENT_SECRET")
SLACK_SCOPE = 'files:read,files:write:user,users:read'
SLACK_SUCCESS_REDIRECT_URL = '/users/~update/'
SLACK_PIPELINES = [
'texaslan.slack.pipelines.on_success.register_token',
]
# Photos
PHOTOS_DRIVE_FOLDER_URL = get_config("PHOTOS_DRIVE_FOLDER_URL")
# Django Rest JWT
JWT_AUTH = {
'JWT_RESPONSE_PAYLOAD_HANDLER': 'texaslan.utils.utils.jwt_response_payload_format',
'JWT_ALLOW_REFRESH': True,
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=90),
}
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
build.go
|
package pack
import (
"context"
"crypto/rand"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"github.com/Masterminds/semver"
"github.com/buildpacks/imgutil"
"github.com/buildpacks/imgutil/local"
"github.com/buildpacks/imgutil/remote"
"github.com/buildpacks/lifecycle/platform"
"github.com/docker/docker/api/types"
"github.com/docker/docker/volume/mounts"
"github.com/google/go-containerregistry/pkg/name"
"github.com/pkg/errors"
ignore "github.com/sabhiram/go-gitignore"
"github.com/buildpacks/pack/config"
"github.com/buildpacks/pack/internal/blob"
"github.com/buildpacks/pack/internal/build"
"github.com/buildpacks/pack/internal/builder"
"github.com/buildpacks/pack/internal/buildpack"
"github.com/buildpacks/pack/internal/buildpackage"
internalConfig "github.com/buildpacks/pack/internal/config"
"github.com/buildpacks/pack/internal/dist"
"github.com/buildpacks/pack/internal/image"
"github.com/buildpacks/pack/internal/layer"
pname "github.com/buildpacks/pack/internal/name"
"github.com/buildpacks/pack/internal/stack"
"github.com/buildpacks/pack/internal/stringset"
"github.com/buildpacks/pack/internal/style"
"github.com/buildpacks/pack/logging"
"github.com/buildpacks/pack/pkg/archive"
"github.com/buildpacks/pack/project"
)
const (
minLifecycleVersionSupportingCreator = "0.7.4"
prevLifecycleVersionSupportingImage = "0.6.1"
minLifecycleVersionSupportingImage = "0.7.5"
)
// LifecycleExecutor executes the lifecycle which satisfies the Cloud Native Buildpacks Lifecycle specification.
// Implementations of the Lifecycle must execute the following phases by calling the
// phase-specific lifecycle binary in order:
//
// Detection: /cnb/lifecycle/detector
// Analysis: /cnb/lifecycle/analyzer
// Cache Restoration: /cnb/lifecycle/restorer
// Build: /cnb/lifecycle/builder
// Export: /cnb/lifecycle/exporter
//
// or invoke the single creator binary:
//
// Creator: /cnb/lifecycle/creator
//
type LifecycleExecutor interface {
// Execute is responsible for invoking each of these binaries
// with the desired configuration.
Execute(ctx context.Context, opts build.LifecycleOptions) error
}
// BuildOptions defines configuration settings for a Build.
type BuildOptions struct {
// The base directory to use to resolve relative assets
RelativeBaseDir string
// required. Name of output image.
Image string
// required. Builder image name.
Builder string
// Name of the buildpack registry. Used to
// add buildpacks to a build.
Registry string
// AppPath is the path to application bits.
// If unset it defaults to current working directory.
AppPath string
// Specify the run image the Image will be
// built atop.
RunImage string
// Address of docker daemon exposed to build container
// e.g. tcp://example.com:1234, unix:///run/user/1000/podman/podman.sock
DockerHost string
// Used to determine a run-image mirror if Run Image is empty.
// Used in combination with Builder metadata to determine to the the 'best' mirror.
// 'best' is defined as:
// - if Publish is true, the best mirror matches registry we are publishing to.
// - if Publish is false, the best mirror matches a registry specified in Image.
// - otherwise if both of the above did not match, use mirror specified in
// the builder metadata
AdditionalMirrors map[string][]string
// User provided environment variables to the buildpacks.
// Buildpacks may both read and overwrite these values.
Env map[string]string
// Option only valid if Publish is true
// Create an additional image that contains cache=true layers and push it to the registry.
CacheImage string
// Option passed directly to the lifecycle.
// If true, publishes Image directly to a registry.
// Assumes Image contains a valid registry with credentials
// provided by the docker client.
Publish bool
// Clear the build cache from previous builds.
ClearCache bool
// TrustBuilder when true optimizes builds by running
// all lifecycle phases in a single container.
// This places registry credentials on the builder's build image.
// Only trust builders from reputable sources.
TrustBuilder bool
// List of buildpack images or archives to add to a builder.
// These buildpacks may overwrite those on the builder if they
// share both an ID and Version with a buildpack on the builder.
Buildpacks []string
// Additional image tags to push to, each will contain contents identical to Image
AdditionalTags []string
// Configure the proxy environment variables,
// These variables will only be set in the build image
// and will not be used if proxy env vars are already set.
ProxyConfig *ProxyConfig
// Configure network and volume mounts for the build containers.
ContainerConfig ContainerConfig
// Process type that will be used when setting container start command.
DefaultProcessType string
// Strategy for updating local images before a build.
PullPolicy config.PullPolicy
// ProjectDescriptorBaseDir is the base directory to find relative resources referenced by the ProjectDescriptor
ProjectDescriptorBaseDir string
// ProjectDescriptor describes the project and any configuration specific to the project
ProjectDescriptor project.Descriptor
// The lifecycle image that will be used for the analysis, restore and export phases
// when using an untrusted builder.
LifecycleImage string
// The location at which to mount the AppDir in the build image.
Workspace string
// User's group id used to build the image
GroupID int
// A previous image to set to a particular tag reference, digest reference, or (when performing a daemon build) image ID;
PreviousImage string
}
// ProxyConfig specifies proxy setting to be set as environment variables in a container.
type ProxyConfig struct {
HTTPProxy string // Used to set HTTP_PROXY env var.
HTTPSProxy string // Used to set HTTPS_PROXY env var.
NoProxy string // Used to set NO_PROXY env var.
}
// ContainerConfig is additional configuration of the docker container that all build steps
// occur within.
type ContainerConfig struct {
// Configure network settings of the build containers.
// The value of Network is handed directly to the docker client.
// For valid values of this field see:
// https://docs.docker.com/network/#network-drivers
Network string
// Volumes are accessible during both detect build phases
// should have the form: /path/in/host:/path/in/container.
// For more about volume mounts, and their permissions see:
// https://docs.docker.com/storage/volumes/
//
// It is strongly recommended you do not override any of the
// paths with volume mounts at the following locations:
// - /cnb
// - /layers
// - anything below /cnb/**
Volumes []string
}
// Build configures settings for the build container(s) and lifecycle.
// It then invokes the lifecycle to build an app image.
// If any configuration is deemed invalid, or if any lifecycle phases fail,
// an error will be returned and no image produced.
func (c *Client) Build(ctx context.Context, opts BuildOptions) error {
imageRef, err := c.parseTagReference(opts.Image)
if err != nil {
return errors.Wrapf(err, "invalid image name '%s'", opts.Image)
}
appPath, err := c.processAppPath(opts.AppPath)
if err != nil {
return errors.Wrapf(err, "invalid app path '%s'", opts.AppPath)
}
proxyConfig := c.processProxyConfig(opts.ProxyConfig)
builderRef, err := c.processBuilderName(opts.Builder)
if err != nil {
return errors.Wrapf(err, "invalid builder '%s'", opts.Builder)
}
rawBuilderImage, err := c.imageFetcher.Fetch(ctx, builderRef.Name(), image.FetchOptions{Daemon: true, PullPolicy: opts.PullPolicy})
if err != nil {
return errors.Wrapf(err, "failed to fetch builder image '%s'", builderRef.Name())
}
bldr, err := c.getBuilder(rawBuilderImage)
if err != nil {
return errors.Wrapf(err, "invalid builder %s", style.Symbol(opts.Builder))
}
runImageName := c.resolveRunImage(opts.RunImage, imageRef.Context().RegistryStr(), builderRef.Context().RegistryStr(), bldr.Stack(), opts.AdditionalMirrors, opts.Publish)
runImage, err := c.validateRunImage(ctx, runImageName, opts.PullPolicy, opts.Publish, bldr.StackID)
if err != nil {
return errors.Wrapf(err, "invalid run-image '%s'", runImageName)
}
var runMixins []string
if _, err := dist.GetLabel(runImage, stack.MixinsLabel, &runMixins); err != nil {
return err
}
fetchedBPs, order, err := c.processBuildpacks(ctx, bldr.Image(), bldr.Buildpacks(), bldr.Order(), bldr.StackID, opts)
if err != nil {
return err
}
if err := c.validateMixins(fetchedBPs, bldr, runImageName, runMixins); err != nil {
return errors.Wrap(err, "validating stack mixins")
}
buildEnvs := map[string]string{}
for _, envVar := range opts.ProjectDescriptor.Build.Env {
buildEnvs[envVar.Name] = envVar.Value
}
for k, v := range opts.Env {
buildEnvs[k] = v
}
ephemeralBuilder, err := c.createEphemeralBuilder(rawBuilderImage, buildEnvs, order, fetchedBPs)
if err != nil {
return err
}
defer c.docker.ImageRemove(context.Background(), ephemeralBuilder.Name(), types.ImageRemoveOptions{Force: true})
builderPlatformAPIs := append(
ephemeralBuilder.LifecycleDescriptor().APIs.Platform.Deprecated,
ephemeralBuilder.LifecycleDescriptor().APIs.Platform.Supported...,
)
if !supportsPlatformAPI(builderPlatformAPIs) {
c.logger.Debugf("pack %s supports Platform API(s): %s", Version, strings.Join(build.SupportedPlatformAPIVersions.AsStrings(), ", "))
c.logger.Debugf("Builder %s supports Platform API(s): %s", style.Symbol(opts.Builder), strings.Join(builderPlatformAPIs.AsStrings(), ", "))
return errors.Errorf("Builder %s is incompatible with this version of pack", style.Symbol(opts.Builder))
}
imgOS, err := rawBuilderImage.OS()
if err != nil {
return errors.Wrapf(err, "getting builder OS")
}
processedVolumes, warnings, err := processVolumes(imgOS, opts.ContainerConfig.Volumes)
if err != nil {
return err
}
for _, warning := range warnings {
c.logger.Warn(warning)
}
fileFilter, err := getFileFilter(opts.ProjectDescriptor)
if err != nil {
return err
}
version := opts.ProjectDescriptor.Project.Version
sourceURL := opts.ProjectDescriptor.Project.SourceURL
runImageName, err = pname.TranslateRegistry(runImageName, c.registryMirrors, c.logger)
if err != nil {
return err
}
lifecycleOpts := build.LifecycleOptions{
AppPath: appPath,
Image: imageRef,
Builder: ephemeralBuilder,
LifecycleImage: ephemeralBuilder.Name(),
RunImage: runImageName,
ProjectMetadata: platform.ProjectMetadata{Source: &platform.ProjectSource{
Type: "project",
Version: map[string]interface{}{"declared": version},
Metadata: map[string]interface{}{"url": sourceURL},
}},
ProjectPath: "",
ClearCache: opts.ClearCache,
Publish: opts.Publish,
TrustBuilder: opts.TrustBuilder,
UseCreator: false,
DockerHost: opts.DockerHost,
CacheImage: opts.CacheImage,
HTTPProxy: proxyConfig.HTTPProxy,
HTTPSProxy: proxyConfig.HTTPSProxy,
NoProxy: proxyConfig.NoProxy,
Network: opts.ContainerConfig.Network,
AdditionalTags: opts.AdditionalTags,
Volumes: processedVolumes,
DefaultProcessType: opts.DefaultProcessType,
FileFilter: fileFilter,
Workspace: opts.Workspace,
GID: opts.GroupID,
PreviousImage: opts.PreviousImage,
}
lifecycleVersion := ephemeralBuilder.LifecycleDescriptor().Info.Version
// Technically the creator is supported as of platform API version 0.3 (lifecycle version 0.7.0+) but earlier versions
// have bugs that make using the creator problematic.
lifecycleSupportsCreator := !lifecycleVersion.LessThan(semver.MustParse(minLifecycleVersionSupportingCreator))
if lifecycleSupportsCreator && opts.TrustBuilder {
lifecycleOpts.UseCreator = true
// no need to fetch a lifecycle image, it won't be used
if err := c.lifecycleExecutor.Execute(ctx, lifecycleOpts); err != nil {
return errors.Wrap(err, "executing lifecycle")
}
return c.logImageNameAndSha(ctx, opts.Publish, imageRef)
}
if !opts.TrustBuilder {
if lifecycleImageSupported(imgOS, lifecycleVersion) {
lifecycleImageName := opts.LifecycleImage
if lifecycleImageName == "" {
lifecycleImageName = fmt.Sprintf("%s:%s", internalConfig.DefaultLifecycleImageRepo, lifecycleVersion.String())
}
imgArch, err := rawBuilderImage.Architecture()
if err != nil {
return errors.Wrapf(err, "getting builder architecture")
}
lifecycleImage, err := c.imageFetcher.Fetch(
ctx,
lifecycleImageName,
image.FetchOptions{Daemon: true, PullPolicy: opts.PullPolicy, Platform: fmt.Sprintf("%s/%s", imgOS, imgArch)},
)
if err != nil {
return errors.Wrap(err, "fetching lifecycle image")
}
lifecycleOpts.LifecycleImage = lifecycleImage.Name()
} else {
return errors.Errorf("Lifecycle %s does not have an associated lifecycle image. Builder must be trusted.", lifecycleVersion.String())
}
}
if err := c.lifecycleExecutor.Execute(ctx, lifecycleOpts); err != nil {
return errors.Wrap(err, "executing lifecycle. This may be the result of using an untrusted builder")
}
return c.logImageNameAndSha(ctx, opts.Publish, imageRef)
}
func getFileFilter(descriptor project.Descriptor) (func(string) bool, error) {
if len(descriptor.Build.Exclude) > 0 {
excludes := ignore.CompileIgnoreLines(descriptor.Build.Exclude...)
return func(fileName string) bool {
return !excludes.MatchesPath(fileName)
}, nil
}
if len(descriptor.Build.Include) > 0 {
includes := ignore.CompileIgnoreLines(descriptor.Build.Include...)
return includes.MatchesPath, nil
}
return nil, nil
}
func lifecycleImageSupported(builderOS string, lifecycleVersion *builder.Version) bool {
return lifecycleVersion.Equal(builder.VersionMustParse(prevLifecycleVersionSupportingImage)) ||
!lifecycleVersion.LessThan(semver.MustParse(minLifecycleVersionSupportingImage))
}
// supportsPlatformAPI determines whether pack can build using the builder based on the builder's supported Platform API versions.
func supportsPlatformAPI(builderPlatformAPIs builder.APISet) bool {
for _, packSupportedAPI := range build.SupportedPlatformAPIVersions {
for _, builderSupportedAPI := range builderPlatformAPIs {
supportsPlatform := packSupportedAPI.Compare(builderSupportedAPI) == 0
if supportsPlatform {
return true
}
}
}
return false
}
func (c *Client) processBuilderName(builderName string) (name.Reference, error) {
if builderName == "" {
return nil, errors.New("builder is a required parameter if the client has no default builder")
}
return name.ParseReference(builderName, name.WeakValidation)
}
func (c *Client) getBuilder(img imgutil.Image) (*builder.Builder, error) {
bldr, err := builder.FromImage(img)
if err != nil {
return nil, err
}
if bldr.Stack().RunImage.Image == "" {
return nil, errors.New("builder metadata is missing run-image")
}
lifecycleDescriptor := bldr.LifecycleDescriptor()
if lifecycleDescriptor.Info.Version == nil {
return nil, errors.New("lifecycle version must be specified in builder")
}
if len(lifecycleDescriptor.APIs.Buildpack.Supported) == 0 {
return nil, errors.New("supported Lifecycle Buildpack APIs not specified")
}
if len(lifecycleDescriptor.APIs.Platform.Supported) == 0 {
return nil, errors.New("supported Lifecycle Platform APIs not specified")
}
return bldr, nil
}
func (c *Client) validateRunImage(context context.Context, name string, pullPolicy config.PullPolicy, publish bool, expectedStack string) (imgutil.Image, error) {
if name == "" {
return nil, errors.New("run image must be specified")
}
img, err := c.imageFetcher.Fetch(context, name, image.FetchOptions{Daemon: !publish, PullPolicy: pullPolicy})
if err != nil {
return nil, err
}
stackID, err := img.Label("io.buildpacks.stack.id")
if err != nil {
return nil, err
}
if stackID != expectedStack {
return nil, fmt.Errorf("run-image stack id '%s' does not match builder stack '%s'", stackID, expectedStack)
}
return img, nil
}
func (c *Client) validateMixins(additionalBuildpacks []dist.Buildpack, bldr *builder.Builder, runImageName string, runMixins []string) error {
if err := stack.ValidateMixins(bldr.Image().Name(), bldr.Mixins(), runImageName, runMixins); err != nil {
return err
}
bps, err := allBuildpacks(bldr.Image(), additionalBuildpacks)
if err != nil {
return err
}
mixins := assembleAvailableMixins(bldr.Mixins(), runMixins)
for _, bp := range bps {
if err := bp.EnsureStackSupport(bldr.StackID, mixins, true); err != nil {
return err
}
}
return nil
}
// assembleAvailableMixins returns the set of mixins that are common between the two provided sets, plus build-only mixins and run-only mixins.
func assembleAvailableMixins(buildMixins, runMixins []string) []string {
// NOTE: We cannot simply union the two mixin sets, as this could introduce a mixin that is only present on one stack
// image but not the other. A buildpack that happens to require the mixin would fail to run properly, even though validation
// would pass.
//
// For example:
//
// Incorrect:
// Run image mixins: [A, B]
// Build image mixins: [A]
// Merged: [A, B]
// Buildpack requires: [A, B]
// Match? Yes
//
// Correct:
// Run image mixins: [A, B]
// Build image mixins: [A]
// Merged: [A]
// Buildpack requires: [A, B]
// Match? No
buildOnly := stack.FindStageMixins(buildMixins, "build")
runOnly := stack.FindStageMixins(runMixins, "run")
_, _, common := stringset.Compare(buildMixins, runMixins)
return append(common, append(buildOnly, runOnly...)...)
}
// allBuildpacks aggregates all buildpacks declared on the image with additional buildpacks passed in. They are sorted
// by ID then Version.
func allBuildpacks(builderImage imgutil.Image, additionalBuildpacks []dist.Buildpack) ([]dist.BuildpackDescriptor, error) {
var all []dist.BuildpackDescriptor
var bpLayers dist.BuildpackLayers
if _, err := dist.GetLabel(builderImage, dist.BuildpackLayersLabel, &bpLayers); err != nil {
return nil, err
}
for id, bps := range bpLayers {
for ver, bp := range bps {
desc := dist.BuildpackDescriptor{
Info: dist.BuildpackInfo{
ID: id,
Version: ver,
},
Stacks: bp.Stacks,
Order: bp.Order,
}
all = append(all, desc)
}
}
for _, bp := range additionalBuildpacks {
all = append(all, bp.Descriptor())
}
sort.Slice(all, func(i, j int) bool {
if all[i].Info.ID != all[j].Info.ID {
return all[i].Info.ID < all[j].Info.ID
}
return all[i].Info.Version < all[j].Info.Version
})
return all, nil
}
func (c *Client) processAppPath(appPath string) (string, error) {
var (
resolvedAppPath string
err error
)
if appPath == "" {
if appPath, err = os.Getwd(); err != nil {
return "", errors.Wrap(err, "get working dir")
}
}
if resolvedAppPath, err = filepath.EvalSymlinks(appPath); err != nil {
return "", errors.Wrap(err, "evaluate symlink")
}
if resolvedAppPath, err = filepath.Abs(resolvedAppPath); err != nil {
return "", errors.Wrap(err, "resolve absolute path")
}
fi, err := os.Stat(resolvedAppPath)
if err != nil {
return "", errors.Wrap(err, "stat file")
}
if !fi.IsDir() {
isZip, err := archive.IsZip(filepath.Clean(resolvedAppPath))
if err != nil {
return "", errors.Wrap(err, "check zip")
}
if !isZip {
return "", errors.New("app path must be a directory or zip")
}
}
return resolvedAppPath, nil
}
func (c *Client) processProxyConfig(config *ProxyConfig) ProxyConfig {
var (
httpProxy, httpsProxy, noProxy string
ok bool
)
if config != nil {
return *config
}
if httpProxy, ok = os.LookupEnv("HTTP_PROXY"); !ok {
httpProxy = os.Getenv("http_proxy")
}
if httpsProxy, ok = os.LookupEnv("HTTPS_PROXY"); !ok {
httpsProxy = os.Getenv("https_proxy")
}
if noProxy, ok = os.LookupEnv("NO_PROXY"); !ok {
noProxy = os.Getenv("no_proxy")
}
return ProxyConfig{
HTTPProxy: httpProxy,
HTTPSProxy: httpsProxy,
NoProxy: noProxy,
}
}
// processBuildpacks computes an order group based on the existing builder order and declared buildpacks. Additionally,
// it returns buildpacks that should be added to the builder.
//
// Visual examples:
//
// BUILDER ORDER
// ----------
// - group:
// - A
// - B
// - group:
// - A
//
// WITH DECLARED: "from=builder", X
// ----------
// - group:
// - A
// - B
// - X
// - group:
// - A
// - X
//
// WITH DECLARED: X, "from=builder", Y
// ----------
// - group:
// - X
// - A
// - B
// - Y
// - group:
// - X
// - A
// - Y
//
// WITH DECLARED: X
// ----------
// - group:
// - X
//
// WITH DECLARED: A
// ----------
// - group:
// - A
func (c *Client) processBuildpacks(ctx context.Context, builderImage imgutil.Image, builderBPs []dist.BuildpackInfo, builderOrder dist.Order, stackID string, opts BuildOptions) (fetchedBPs []dist.Buildpack, order dist.Order, err error) {
pullPolicy := opts.PullPolicy
publish := opts.Publish
registry := opts.Registry
relativeBaseDir := opts.RelativeBaseDir
declaredBPs := opts.Buildpacks
// declare buildpacks provided by project descriptor when no buildpacks are declared
if len(declaredBPs) == 0 && len(opts.ProjectDescriptor.Build.Buildpacks) != 0 {
relativeBaseDir = opts.ProjectDescriptorBaseDir
for _, bp := range opts.ProjectDescriptor.Build.Buildpacks {
switch {
case bp.ID != "" && bp.Script.Inline != "" && bp.Version == "" && bp.URI == "":
if bp.Script.API == "" {
return nil, nil, errors.New("Missing API version for inline buildpack")
}
pathToInlineBuildpack, err := createInlineBuildpack(bp, stackID)
if err != nil {
return nil, nil, errors.Wrap(err, "Could not create temporary inline buildpack")
}
declaredBPs = append(declaredBPs, pathToInlineBuildpack)
case bp.URI != "":
declaredBPs = append(declaredBPs, bp.URI)
case bp.ID != "" && bp.Version != "":
declaredBPs = append(declaredBPs, fmt.Sprintf("%s@%s", bp.ID, bp.Version))
default:
return nil, nil, errors.New("Invalid buildpack defined in project descriptor")
}
}
}
order = dist.Order{{Group: []dist.BuildpackRef{}}}
for _, bp := range declaredBPs {
locatorType, err := buildpack.GetLocatorType(bp, relativeBaseDir, builderBPs)
if err != nil {
return nil, nil, err
}
switch locatorType {
case buildpack.FromBuilderLocator:
switch {
case len(order) == 0 || len(order[0].Group) == 0:
order = builderOrder
case len(order) > 1:
// This should only ever be possible if they are using from=builder twice which we don't allow
return nil, nil, errors.New("buildpacks from builder can only be defined once")
default:
newOrder := dist.Order{}
groupToAdd := order[0].Group
for _, bOrderEntry := range builderOrder {
newEntry := dist.OrderEntry{Group: append(groupToAdd, bOrderEntry.Group...)}
newOrder = append(newOrder, newEntry)
}
order = newOrder
}
case buildpack.IDLocator:
id, version := buildpack.ParseIDLocator(bp)
order = appendBuildpackToOrder(order, dist.BuildpackInfo{
ID: id,
Version: version,
})
default:
imageOS, err := builderImage.OS()
if err != nil {
return fetchedBPs, order, errors.Wrapf(err, "getting OS from %s", style.Symbol(builderImage.Name()))
}
mainBP, depBPs, err := c.BuildpackDownloader.Download(ctx, bp, BuildpackDownloadOptions{
RegistryName: registry,
ImageOS: imageOS,
RelativeBaseDir: relativeBaseDir,
Daemon: !publish,
PullPolicy: pullPolicy,
})
if err != nil {
return fetchedBPs, order, errors.Wrap(err, "downloading buildpack")
}
fetchedBPs = append(append(fetchedBPs, mainBP), depBPs...)
order = appendBuildpackToOrder(order, mainBP.Descriptor().Info)
}
}
return fetchedBPs, order, nil
}
func appendBuildpackToOrder(order dist.Order, bpInfo dist.BuildpackInfo) (newOrder dist.Order) {
for _, orderEntry := range order {
newEntry := orderEntry
newEntry.Group = append(newEntry.Group, dist.BuildpackRef{
BuildpackInfo: bpInfo,
Optional: false,
})
newOrder = append(newOrder, newEntry)
}
return newOrder
}
// decomposeBuildpack decomposes a buildpack blob into the main builder (order buildpack) and it's dependencies buildpacks.
func decomposeBuildpack(blob blob.Blob, imageOS string) (mainBP dist.Buildpack, depBPs []dist.Buildpack, err error) {
isOCILayout, err := buildpackage.IsOCILayoutBlob(blob)
if err != nil {
return mainBP, depBPs, errors.Wrap(err, "inspecting buildpack blob")
}
if isOCILayout {
mainBP, depBPs, err = buildpackage.BuildpacksFromOCILayoutBlob(blob)
if err != nil {
return mainBP, depBPs, errors.Wrap(err, "extracting buildpacks")
}
} else {
layerWriterFactory, err := layer.NewWriterFactory(imageOS)
if err != nil {
return mainBP, depBPs, errors.Wrapf(err, "get tar writer factory for OS %s", style.Symbol(imageOS))
}
mainBP, err = dist.BuildpackFromRootBlob(blob, layerWriterFactory)
if err != nil {
return mainBP, depBPs, errors.Wrap(err, "reading buildpack")
}
}
return mainBP, depBPs, nil
}
func (c *Client) createEphemeralBuilder(rawBuilderImage imgutil.Image, env map[string]string, order dist.Order, buildpacks []dist.Buildpack) (*builder.Builder, error) {
origBuilderName := rawBuilderImage.Name()
bldr, err := builder.New(rawBuilderImage, fmt.Sprintf("pack.local/builder/%x:latest", randString(10)))
if err != nil {
return nil, errors.Wrapf(err, "invalid builder %s", style.Symbol(origBuilderName))
}
bldr.SetEnv(env)
for _, bp := range buildpacks {
bpInfo := bp.Descriptor().Info
c.logger.Debugf("Adding buildpack %s version %s to builder", style.Symbol(bpInfo.ID), style.Symbol(bpInfo.Version))
bldr.AddBuildpack(bp)
}
if len(order) > 0 && len(order[0].Group) > 0 {
c.logger.Debug("Setting custom order")
bldr.SetOrder(order)
}
if err := bldr.Save(c.logger, builder.CreatorMetadata{Version: Version}); err != nil {
return nil, err
}
return bldr, nil
}
// Returns a string iwith lowercase a-z, of length n
func randString(n int) string {
b := make([]byte, n)
_, err := rand.Read(b)
if err != nil {
panic(err)
}
for i := range b {
b[i] = 'a' + (b[i] % 26)
}
return string(b)
}
func processVolumes(imgOS string, volumes []string) (processed []string, warnings []string, err error) {
parserOS := mounts.OSLinux
if imgOS == "windows" {
parserOS = mounts.OSWindows
}
parser := mounts.NewParser(parserOS)
for _, v := range volumes {
volume, err := parser.ParseMountRaw(v, "")
if err != nil {
return nil, nil, errors.Wrapf(err, "platform volume %q has invalid format", v)
}
sensitiveDirs := []string{"/cnb", "/layers"}
if imgOS == "windows" {
sensitiveDirs = []string{`c:/cnb`, `c:\cnb`, `c:/layers`, `c:\layers`}
}
for _, p := range sensitiveDirs {
if strings.HasPrefix(strings.ToLower(volume.Spec.Target), p) {
warnings = append(warnings, fmt.Sprintf("Mounting to a sensitive directory %s", style.Symbol(volume.Spec.Target)))
}
}
processed = append(processed, fmt.Sprintf("%s:%s:%s", volume.Spec.Source, volume.Spec.Target, processMode(volume.Mode)))
}
return processed, warnings, nil
}
func processMode(mode string) string {
if mode == "" {
return "ro"
}
return mode
}
func (c *Client) logImageNameAndSha(ctx context.Context, publish bool, imageRef name.Reference) error {
// The image name and sha are printed in the lifecycle logs, and there is no need to print it again, unless output is suppressed.
if !logging.IsQuiet(c.logger) {
return nil
}
img, err := c.imageFetcher.Fetch(ctx, imageRef.Name(), image.FetchOptions{Daemon: !publish, PullPolicy: config.PullNever})
if err != nil {
return errors.Wrap(err, "fetching built image")
}
id, err := img.Identifier()
if err != nil {
return errors.Wrap(err, "reading image sha")
}
// Remove tag, if it exists, from the image name
imgName := strings.TrimSuffix(imageRef.String(), imageRef.Identifier())
imgNameAndSha := fmt.Sprintf("%s@%s\n", imgName, parseDigestFromImageID(id))
// Access the logger's Writer directly to bypass ReportSuccessfulQuietBuild mode
_, err = c.logger.Writer().Write([]byte(imgNameAndSha))
return err
}
func parseDigestFromImageID(id imgutil.Identifier) string {
var digest string
switch v := id.(type) {
case local.IDIdentifier:
digest = v.String()
case remote.DigestIdentifier:
digest = v.Digest.DigestStr()
}
digest = strings.TrimPrefix(digest, "sha256:")
return fmt.Sprintf("sha256:%s", digest)
}
func createInlineBuildpack(bp project.Buildpack, stackID string) (string, error) {
pathToInlineBuilpack, err := ioutil.TempDir("", "inline-cnb")
if err != nil {
return pathToInlineBuilpack, err
}
if err = createBuildpackTOML(pathToInlineBuilpack, bp.ID, "0.0.0", bp.Script.API, []dist.Stack{{ID: stackID}}, nil); err != nil {
return pathToInlineBuilpack, err
}
shell := bp.Script.Shell
if shell == "" {
shell = "/bin/sh"
}
binBuild := fmt.Sprintf(`#!%s
%s
`, shell, bp.Script.Inline)
binDetect := fmt.Sprintf(`#!%s
exit 0
`, shell)
if err = createBinScript(pathToInlineBuilpack, "build", binBuild, nil); err != nil {
return pathToInlineBuilpack, err
}
if err = createBinScript(pathToInlineBuilpack, "build.bat", bp.Script.Inline, nil); err != nil {
return pathToInlineBuilpack, err
}
if err = createBinScript(pathToInlineBuilpack, "detect", binDetect, nil); err != nil {
return pathToInlineBuilpack, err
}
if err = createBinScript(pathToInlineBuilpack, "detect.bat", bp.Script.Inline, nil); err != nil {
return pathToInlineBuilpack, err
}
return pathToInlineBuilpack, nil
}
|
[
"\"http_proxy\"",
"\"https_proxy\"",
"\"no_proxy\""
] |
[] |
[
"http_proxy",
"no_proxy",
"https_proxy"
] |
[]
|
["http_proxy", "no_proxy", "https_proxy"]
|
go
| 3 | 0 | |
util/util.go
|
package util
import (
"fmt"
"io"
"net/url"
"os"
"path"
"strings"
"sync"
"syscall"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/shortnames"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/signature"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/docker/distribution/registry/api/errcode"
"github.com/opencontainers/go-digest"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
minimumTruncatedIDLength = 3
// DefaultTransport is a prefix that we apply to an image name if we
// can't find one in the local Store, in order to generate a source
// reference for the image that we can then copy to the local Store.
DefaultTransport = "docker://"
)
var (
// RegistryDefaultPathPrefix contains a per-registry listing of default prefixes
// to prepend to image names that only contain a single path component.
RegistryDefaultPathPrefix = map[string]string{
"index.docker.io": "library",
"docker.io": "library",
}
)
// ResolveName checks if name is a valid image name, and if that name doesn't
// include a domain portion, returns a list of the names which it might
// correspond to in the set of configured registries, the transport used to
// pull the image, and a boolean which is true iff
// 1) the list of search registries was used, and 2) it was empty.
//
// The returned image names never include a transport: prefix, and if transport != "",
// (transport, image) should be a valid input to alltransports.ParseImageName.
// transport == "" indicates that image that already exists in a local storage,
// and the name is valid for store.Image() / storage.Transport.ParseStoreReference().
//
// NOTE: The "list of search registries is empty" check does not count blocked registries,
// and neither the implied "localhost" nor a possible firstRegistry are counted
func ResolveName(name string, firstRegistry string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) {
if name == "" {
return nil, "", false, nil
}
// Maybe it's a truncated image ID. Don't prepend a registry name, then.
if len(name) >= minimumTruncatedIDLength {
if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) {
// It's a truncated version of the ID of an image that's present in local storage;
// we need only expand the ID.
return []string{img.ID}, "", false, nil
}
}
// If we're referring to an image by digest, it *must* be local and we
// should not have any fall through/back logic.
if strings.HasPrefix(name, "sha256:") {
d, err := digest.Parse(name)
if err != nil {
return nil, "", false, err
}
img, err := store.Image(d.Encoded())
if err != nil {
return nil, "", false, err
}
return []string{img.ID}, "", false, nil
}
// Transports are not supported for local image look ups.
srcRef, err := alltransports.ParseImageName(name)
if err == nil {
return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), false, nil
}
// Figure out the list of registries.
var registries []string
searchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(sc)
if err != nil {
logrus.Debugf("unable to read configured registries to complete %q: %v", name, err)
searchRegistries = nil
}
for _, registry := range searchRegistries {
reg, err := sysregistriesv2.FindRegistry(sc, registry)
if err != nil {
logrus.Debugf("unable to read registry configuration for %#v: %v", registry, err)
continue
}
if reg == nil || !reg.Blocked {
registries = append(registries, registry)
}
}
searchRegistriesAreEmpty := len(registries) == 0
var candidates []string
// Set the first registry if requested.
if firstRegistry != "" && firstRegistry != "localhost" {
middle := ""
if prefix, ok := RegistryDefaultPathPrefix[firstRegistry]; ok && !strings.ContainsRune(name, '/') {
middle = prefix
}
candidate := path.Join(firstRegistry, middle, name)
candidates = append(candidates, candidate)
}
// Local short-name resolution.
namedCandidates, err := shortnames.ResolveLocally(sc, name)
if err != nil {
return nil, "", false, err
}
for _, named := range namedCandidates {
candidates = append(candidates, named.String())
}
return candidates, DefaultTransport, searchRegistriesAreEmpty, nil
}
// StartsWithValidTransport validates the name starts with Buildah supported transport
// to avoid the corner case image name same as the transport name
func StartsWithValidTransport(name string) bool {
return strings.HasPrefix(name, "dir:") || strings.HasPrefix(name, "docker://") || strings.HasPrefix(name, "docker-archive:") || strings.HasPrefix(name, "docker-daemon:") || strings.HasPrefix(name, "oci:") || strings.HasPrefix(name, "oci-archive:")
}
// ExpandNames takes unqualified names, parses them as image names, and returns
// the fully expanded result, including a tag. Names which don't include a registry
// name will be marked for the most-preferred registry (i.e., the first one in our
// configuration).
func ExpandNames(names []string, firstRegistry string, systemContext *types.SystemContext, store storage.Store) ([]string, error) {
expanded := make([]string, 0, len(names))
for _, n := range names {
var name reference.Named
nameList, _, _, err := ResolveName(n, firstRegistry, systemContext, store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", n)
}
if len(nameList) == 0 {
named, err := reference.ParseNormalizedNamed(n)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", n)
}
name = named
} else {
named, err := reference.ParseNormalizedNamed(nameList[0])
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", nameList[0])
}
name = named
}
name = reference.TagNameOnly(name)
expanded = append(expanded, name.String())
}
return expanded, nil
}
// FindImage locates the locally-stored image which corresponds to a given name.
func FindImage(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) {
var ref types.ImageReference
var img *storage.Image
var err error
names, _, _, err := ResolveName(image, firstRegistry, systemContext, store)
if err != nil {
return nil, nil, errors.Wrapf(err, "error parsing name %q", image)
}
for _, name := range names {
ref, err = is.Transport.ParseStoreReference(store, name)
if err != nil {
logrus.Debugf("error parsing reference to image %q: %v", name, err)
continue
}
img, err = is.Transport.GetStoreImage(store, ref)
if err != nil {
img2, err2 := store.Image(name)
if err2 != nil {
logrus.Debugf("error locating image %q: %v", name, err2)
continue
}
img = img2
}
break
}
if ref == nil || img == nil {
return nil, nil, errors.Wrapf(err, "error locating image with name %q (%v)", image, names)
}
return ref, img, nil
}
// ResolveNameToReferences tries to create a list of possible references
// (including their transports) from the provided image name.
func ResolveNameToReferences(
store storage.Store,
systemContext *types.SystemContext,
image string,
) (refs []types.ImageReference, err error) {
names, transport, _, err := ResolveName(image, "", systemContext, store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", image)
}
if transport != DefaultTransport {
transport += ":"
}
for _, name := range names {
ref, err := alltransports.ParseImageName(transport + name)
if err != nil {
logrus.Debugf("error parsing reference to image %q: %v", name, err)
continue
}
refs = append(refs, ref)
}
if len(refs) == 0 {
return nil, errors.Errorf("error locating images with names %v", names)
}
return refs, nil
}
// AddImageNames adds the specified names to the specified image.
func AddImageNames(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error {
names, err := ExpandNames(addNames, firstRegistry, systemContext, store)
if err != nil {
return err
}
err = store.SetNames(image.ID, append(image.Names, names...))
if err != nil {
return errors.Wrapf(err, "error adding names (%v) to image %q", names, image.ID)
}
return nil
}
// GetFailureCause checks the type of the error "err" and returns a new
// error message that reflects the reason of the failure.
// In case err type is not a familiar one the error "defaultError" is returned.
func GetFailureCause(err, defaultError error) error {
switch nErr := errors.Cause(err).(type) {
case errcode.Errors:
return err
case errcode.Error, *url.Error:
return nErr
default:
return defaultError
}
}
// WriteError writes `lastError` into `w` if not nil and return the next error `err`
func WriteError(w io.Writer, err error, lastError error) error {
if lastError != nil {
fmt.Fprintln(w, lastError)
}
return err
}
// Runtime is the default command to use to run the container.
func Runtime() string {
runtime := os.Getenv("BUILDAH_RUNTIME")
if runtime != "" {
return runtime
}
// Need to switch default until runc supports cgroups v2
if unified, _ := IsCgroup2UnifiedMode(); unified {
return "crun"
}
conf, err := config.Default()
if err != nil {
logrus.Warnf("Error loading container config when searching for local runtime: %v", err)
return DefaultRuntime
}
return conf.Engine.OCIRuntime
}
// StringInSlice returns a boolean indicating if the exact value s is present
// in the slice slice.
func StringInSlice(s string, slice []string) bool {
for _, v := range slice {
if v == s {
return true
}
}
return false
}
// GetContainerIDs uses ID mappings to compute the container-level IDs that will
// correspond to a UID/GID pair on the host.
func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) {
uidMapped := true
for _, m := range uidmap {
uidMapped = false
if uid >= m.HostID && uid < m.HostID+m.Size {
uid = (uid - m.HostID) + m.ContainerID
uidMapped = true
break
}
}
if !uidMapped {
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
}
gidMapped := true
for _, m := range gidmap {
gidMapped = false
if gid >= m.HostID && gid < m.HostID+m.Size {
gid = (gid - m.HostID) + m.ContainerID
gidMapped = true
break
}
}
if !gidMapped {
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
}
return uid, gid, nil
}
// GetHostIDs uses ID mappings to compute the host-level IDs that will
// correspond to a UID/GID pair in the container.
func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) {
uidMapped := true
for _, m := range uidmap {
uidMapped = false
if uid >= m.ContainerID && uid < m.ContainerID+m.Size {
uid = (uid - m.ContainerID) + m.HostID
uidMapped = true
break
}
}
if !uidMapped {
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
}
gidMapped := true
for _, m := range gidmap {
gidMapped = false
if gid >= m.ContainerID && gid < m.ContainerID+m.Size {
gid = (gid - m.ContainerID) + m.HostID
gidMapped = true
break
}
}
if !gidMapped {
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
}
return uid, gid, nil
}
// GetHostRootIDs uses ID mappings in spec to compute the host-level IDs that will
// correspond to UID/GID 0/0 in the container.
func GetHostRootIDs(spec *specs.Spec) (uint32, uint32, error) {
if spec == nil || spec.Linux == nil {
return 0, 0, nil
}
return GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, 0, 0)
}
// GetPolicyContext sets up, initializes and returns a new context for the specified policy
func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) {
policy, err := signature.DefaultPolicy(ctx)
if err != nil {
return nil, err
}
policyContext, err := signature.NewPolicyContext(policy)
if err != nil {
return nil, err
}
return policyContext, nil
}
// logIfNotErrno logs the error message unless err is either nil or one of the
// listed syscall.Errno values. It returns true if it logged an error.
func logIfNotErrno(err error, what string, ignores ...syscall.Errno) (logged bool) {
if err == nil {
return false
}
if errno, isErrno := err.(syscall.Errno); isErrno {
for _, ignore := range ignores {
if errno == ignore {
return false
}
}
}
logrus.Error(what)
return true
}
// LogIfNotRetryable logs "what" if err is set and is not an EINTR or EAGAIN
// syscall.Errno. Returns "true" if we can continue.
func LogIfNotRetryable(err error, what string) (retry bool) {
return !logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN)
}
// LogIfUnexpectedWhileDraining logs "what" if err is set and is not an EINTR
// or EAGAIN or EIO syscall.Errno.
func LogIfUnexpectedWhileDraining(err error, what string) {
logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN, syscall.EIO)
}
// TruncateString trims the given string to the provided maximum amount of
// characters and shortens it with `...`.
func TruncateString(str string, to int) string {
newStr := str
if len(str) > to {
const tr = "..."
if to > len(tr) {
to -= len(tr)
}
newStr = str[0:to] + tr
}
return newStr
}
var (
isUnifiedOnce sync.Once
isUnified bool
isUnifiedErr error
)
// fileExistsAndNotADir - Check to see if a file exists
// and that it is not a directory.
func fileExistsAndNotADir(path string) bool {
file, err := os.Stat(path)
if file == nil || err != nil || os.IsNotExist(err) {
return false
}
return !file.IsDir()
}
// FindLocalRuntime find the local runtime of the
// system searching through the config file for
// possible locations.
func FindLocalRuntime(runtime string) string {
var localRuntime string
conf, err := config.Default()
if err != nil {
logrus.Debugf("Error loading container config when searching for local runtime.")
return localRuntime
}
for _, val := range conf.Engine.OCIRuntimes[runtime] {
if fileExistsAndNotADir(val) {
localRuntime = val
break
}
}
return localRuntime
}
// MergeEnv merges two lists of environment variables, avoiding duplicates.
func MergeEnv(defaults, overrides []string) []string {
s := make([]string, 0, len(defaults)+len(overrides))
index := make(map[string]int)
for _, envSpec := range append(defaults, overrides...) {
envVar := strings.SplitN(envSpec, "=", 2)
if i, ok := index[envVar[0]]; ok {
s[i] = envSpec
continue
}
s = append(s, envSpec)
index[envVar[0]] = len(s) - 1
}
return s
}
|
[
"\"BUILDAH_RUNTIME\""
] |
[] |
[
"BUILDAH_RUNTIME"
] |
[]
|
["BUILDAH_RUNTIME"]
|
go
| 1 | 0 | |
rrhh/rrhh/wsgi.py
|
"""
WSGI config for rrhh project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rrhh.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
misc/python/materialize/cli/mzcompose.py
|
# Copyright Materialize, Inc. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
#
# mzcompose.py — runs Docker Compose with Materialize customizations.
from pathlib import Path
from typing import IO, List, Tuple, Text, Optional, Sequence
from typing_extensions import NoReturn
import argparse
import json
import os
import sys
import webbrowser
from materialize import errors
from materialize import mzbuild
from materialize import mzcompose
from materialize import spawn
from materialize import ui
announce = ui.speaker("==> ")
say = ui.speaker("")
MIN_COMPOSE_VERSION = (1, 24, 0)
def main(argv: List[str]) -> int:
# Lightly parse the arguments so we know what to do.
args, unknown_args = ArgumentParser().parse_known_args(argv)
if args.file:
raise errors.MzConfigurationError("-f/--file option not supported")
elif args.project_directory:
raise errors.MzConfigurationError("--project-directory option not supported")
ui.Verbosity.init_from_env(args.mz_quiet)
# Load repository.
root = Path(os.environ["MZ_ROOT"])
repo = mzbuild.Repository(root)
# Handle special mzcompose commands that apply to the repo.
if args.command == "gen-shortcuts":
return gen_shortcuts(repo)
elif args.command == "lint":
return lint(repo)
elif args.command == "list-compositions":
for name in repo.compositions:
print(name)
return 0
# Load composition.
try:
composition = mzcompose.Composition(repo, args.mz_find or Path.cwd().name)
except errors.UnknownComposition:
if args.mz_find:
print(f"unknown composition {args.mz_find!r}", file=sys.stderr)
print("hint: available compositions:", file=sys.stderr)
for name in repo.compositions:
print(f" {name}", file=sys.stderr)
else:
print("error: directory does not contain mzcompose.yml", file=sys.stderr)
print(
"hint: enter one of the following directories and run ./mzcompose:",
file=sys.stderr,
)
for path in repo.compositions.values():
print(f" {path.relative_to(Path.cwd())}", file=sys.stderr)
return 1
# Handle special mzcompose commands that apply to the composition.
if args.command == "list-workflows":
for name in composition.workflows:
print(name)
return 0
# From here on out we're definitely invoking Docker Compose, so make sure
# it's new enough.
output = spawn.capture(
["docker-compose", "version", "--short"], unicode=True
).strip()
version = tuple(int(i) for i in output.split("."))
if version < MIN_COMPOSE_VERSION:
msg = f"Unsupported docker-compose version: {version}, min required: {MIN_COMPOSE_VERSION}"
raise errors.MzConfigurationError(msg)
announce("Collecting mzbuild dependencies")
deps = repo.resolve_dependencies(composition.images)
for d in deps:
say(d.spec())
# Check if the command is going to create or start containers, and if so
# build the dependencies. This can be slow, so we don't want to do it if we
# can help it (e.g., for `down` or `ps`).
if args.command in ["create", "run", "start", "up"]:
deps.acquire()
# Check if this is a run command that names a workflow. If so, run the
# workflow instead of Docker Compose.
if args.command == "run":
workflow = composition.workflows.get(args.first_command_arg, None)
if workflow is not None:
if args.remainder:
raise errors.MzRuntimeError(
f"cannot specify extra arguments ({' '.join(args.remainder)}) "
"when specifying a workflow (rather than a container)"
)
workflow.run()
return 0
# Check if we are being asked to list ports
elif args.command == "list-ports":
for port in composition.find_host_ports(args.first_command_arg):
print(port)
return 0
# Check if we are being asked to open a web connection to this service
elif args.command == "web":
ports = composition.find_host_ports(args.first_command_arg)
if len(ports) == 1:
webbrowser.open(f"http://localhost:{ports[0]}")
elif not ports:
raise errors.MzRuntimeError(
f"No running services matched {args.first_command_arg}"
)
else:
raise errors.MzRuntimeError(
f"Too many ports matched {args.first_command_arg}, found: {ports}"
)
# Hand over control to Docker Compose.
announce("Delegating to Docker Compose")
proc = composition.run(
[
*unknown_args,
*([args.command] if args.command is not None else []),
*([args.first_command_arg] if args.first_command_arg is not None else []),
*args.remainder,
],
check=False,
)
return proc.returncode
def gen_shortcuts(repo: mzbuild.Repository) -> int:
template = """#!/usr/bin/env bash
# Copyright Materialize, Inc. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
#
# mzcompose — runs Docker Compose with Materialize customizations.
exec "$(dirname "$0")/{}/bin/mzcompose" "$@"
"""
for path in repo.compositions.values():
mzcompose_path = path / "mzcompose"
with open(mzcompose_path, "w") as f:
f.write(template.format(os.path.relpath(repo.root, path)))
mzbuild.chmod_x(mzcompose_path)
return 0
def lint(repo: mzbuild.Repository) -> int:
errors = []
for name in repo.compositions:
errors += mzcompose.Composition.lint(repo, name)
for error in sorted(errors):
print(error)
return 1 if errors else 0
# We subclass `argparse.ArgumentParser` so that we can override its default
# behavior of exiting on error. We want Docker Compose to be responsible for
# generating option-parsing errors.
class ArgumentParser(argparse.ArgumentParser):
def __init__(self) -> None:
super().__init__(add_help=False)
self.add_argument("--mz-quiet", action="store_true", default=None)
self.add_argument("--mz-find")
self.add_argument("-f", "--file")
self.add_argument("--project-directory")
self.add_argument("command", nargs="?")
self.add_argument("first_command_arg", nargs="?")
self.add_argument("remainder", nargs=argparse.REMAINDER)
def parse_known_args(
self,
args: Optional[Sequence[Text]] = None,
namespace: Optional[argparse.Namespace] = None,
) -> Tuple[argparse.Namespace, List[str]]:
ns = argparse.Namespace()
try:
return super().parse_known_args(args, namespace=ns)
except ValueError:
return (ns, [])
def error(self, message: str) -> NoReturn:
raise ValueError(message)
if __name__ == "__main__":
with errors.error_handler(lambda *args: print(*args, file=sys.stderr)):
sys.exit(main(sys.argv[1:]))
|
[] |
[] |
[
"MZ_ROOT"
] |
[]
|
["MZ_ROOT"]
|
python
| 1 | 0 | |
config.py
|
import os
import configparser
FB_AUTHORIZATION_BASE_URL = "https://www.facebook.com/dialog/oauth"
FB_TOKEN_URL = "https://graph.facebook.com/oauth/access_token"
## TODO get from config.ini
config = configparser.ConfigParser()
configfilename = os.environ.get("VMACONFIG")
config.read(configfilename)
BASE_URL = config['parent']['baseurl']
print(os.environ.get("VMACONFIG") )
def get_config_obj(inifile):
# TODO generate config object from inifile
pass
class BaseConfig:
"""Base configuration"""
TESTING = False
SECRET_KEY = os.environ.get('SECRET_KEY', 'mysecretkey')
SQLALCHEMY_TRACK_MODIFICATIONS = False
PARENT_BASE_URL = os.environ.get("PARENT_BASE_URL")
class DevelopmentConfig(BaseConfig):
"""Development configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
FB_CLIENT_ID = os.environ.get('FB_CLIENT_ID')
FB_CLIENT_SECRET = os.environ.get('FB_CLIENT_SECRET')
DEVELOPMENT = True
DEBUG = True
class TestingConfig(BaseConfig):
"""Testing configuration"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_TEST_URL')
class StagingConfig(BaseConfig):
"""Staging configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
class ProductionConfig(BaseConfig):
"""Production configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
|
[] |
[] |
[
"FB_CLIENT_ID",
"DATABASE_URL",
"PARENT_BASE_URL",
"SECRET_KEY",
"VMACONFIG",
"DATABASE_TEST_URL",
"FB_CLIENT_SECRET"
] |
[]
|
["FB_CLIENT_ID", "DATABASE_URL", "PARENT_BASE_URL", "SECRET_KEY", "VMACONFIG", "DATABASE_TEST_URL", "FB_CLIENT_SECRET"]
|
python
| 7 | 0 | |
cmd/kaniko-docker/main.go
|
package main
import (
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/joho/godotenv"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
kaniko "github.com/drone/drone-kaniko"
"github.com/drone/drone-kaniko/pkg/artifact"
)
const (
// Docker file path
dockerPath string = "/kaniko/.docker"
dockerConfigPath string = "/kaniko/.docker/config.json"
v1RegistryURL string = "https://index.docker.io/v1/" // Default registry
v2RegistryURL string = "https://index.docker.io/v2/" // v2 registry is not supported
v2HubRegistryURL string = "https://registry.hub.docker.com/v2/"
defaultDigestFile string = "/kaniko/digest-file"
)
var (
version = "unknown"
)
func main() {
// Load env-file if it exists first
if env := os.Getenv("PLUGIN_ENV_FILE"); env != "" {
if err := godotenv.Load(env); err != nil {
logrus.Fatal(err)
}
}
app := cli.NewApp()
app.Name = "kaniko docker plugin"
app.Usage = "kaniko docker plugin"
app.Action = run
app.Version = version
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "dockerfile",
Usage: "build dockerfile",
Value: "Dockerfile",
EnvVar: "PLUGIN_DOCKERFILE",
},
cli.StringFlag{
Name: "context",
Usage: "build context",
Value: ".",
EnvVar: "PLUGIN_CONTEXT",
},
cli.StringFlag{
Name: "drone-commit-ref",
Usage: "git commit ref passed by Drone",
EnvVar: "DRONE_COMMIT_REF",
},
cli.StringFlag{
Name: "drone-repo-branch",
Usage: "git repository default branch passed by Drone",
EnvVar: "DRONE_REPO_BRANCH",
},
cli.StringSliceFlag{
Name: "tags",
Usage: "build tags",
Value: &cli.StringSlice{"latest"},
EnvVar: "PLUGIN_TAGS",
FilePath: ".tags",
},
cli.BoolFlag{
Name: "expand-tag",
Usage: "enable for semver tagging",
EnvVar: "PLUGIN_EXPAND_TAG",
},
cli.BoolFlag{
Name: "auto-tag",
Usage: "enable auto generation of build tags",
EnvVar: "PLUGIN_AUTO_TAG",
},
cli.StringFlag{
Name: "auto-tag-suffix",
Usage: "the suffix of auto build tags",
EnvVar: "PLUGIN_AUTO_TAG_SUFFIX",
},
cli.StringSliceFlag{
Name: "args",
Usage: "build args",
EnvVar: "PLUGIN_BUILD_ARGS",
},
cli.StringFlag{
Name: "target",
Usage: "build target",
EnvVar: "PLUGIN_TARGET",
},
cli.StringFlag{
Name: "repo",
Usage: "docker repository",
EnvVar: "PLUGIN_REPO",
},
cli.StringSliceFlag{
Name: "custom-labels",
Usage: "additional k=v labels",
EnvVar: "PLUGIN_CUSTOM_LABELS",
},
cli.StringFlag{
Name: "registry",
Usage: "docker registry",
Value: v1RegistryURL,
EnvVar: "PLUGIN_REGISTRY",
},
cli.StringSliceFlag{
Name: "registry-mirrors",
Usage: "docker registry mirrors",
EnvVar: "PLUGIN_REGISTRY_MIRRORS",
},
cli.StringFlag{
Name: "username",
Usage: "docker username",
EnvVar: "PLUGIN_USERNAME",
},
cli.StringFlag{
Name: "password",
Usage: "docker password",
EnvVar: "PLUGIN_PASSWORD",
},
cli.BoolFlag{
Name: "skip-tls-verify",
Usage: "Skip registry tls verify",
EnvVar: "PLUGIN_SKIP_TLS_VERIFY",
},
cli.StringFlag{
Name: "snapshot-mode",
Usage: "Specify one of full, redo or time as snapshot mode",
EnvVar: "PLUGIN_SNAPSHOT_MODE",
},
cli.BoolFlag{
Name: "enable-cache",
Usage: "Set this flag to opt into caching with kaniko",
EnvVar: "PLUGIN_ENABLE_CACHE",
},
cli.StringFlag{
Name: "cache-repo",
Usage: "Remote repository that will be used to store cached layers. enable-cache needs to be set to use this flag",
EnvVar: "PLUGIN_CACHE_REPO",
},
cli.IntFlag{
Name: "cache-ttl",
Usage: "Cache timeout in hours. Defaults to two weeks.",
EnvVar: "PLUGIN_CACHE_TTL",
},
cli.StringFlag{
Name: "artifact-file",
Usage: "Artifact file location that will be generated by the plugin. This file will include information of docker images that are uploaded by the plugin.",
EnvVar: "PLUGIN_ARTIFACT_FILE",
},
cli.BoolFlag{
Name: "no-push",
Usage: "Set this flag if you only want to build the image, without pushing to a registry",
EnvVar: "PLUGIN_NO_PUSH",
},
cli.StringFlag{
Name: "verbosity",
Usage: "Set this flag with value as oneof <panic|fatal|error|warn|info|debug|trace> to set the logging level for kaniko. Defaults to info.",
EnvVar: "PLUGIN_VERBOSITY",
},
cli.StringFlag{
Name: "platform",
Usage: "Allows to build with another default platform than the host, similarly to docker build --platform",
EnvVar: "PLUGIN_PLATFORM",
},
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}
func run(c *cli.Context) error {
username := c.String("username")
noPush := c.Bool("no-push")
// only setup auth when pushing or credentials are defined
if !noPush || username != "" {
if err := createDockerCfgFile(username, c.String("password"), c.String("registry")); err != nil {
return err
}
}
plugin := kaniko.Plugin{
Build: kaniko.Build{
DroneCommitRef: c.String("drone-commit-ref"),
DroneRepoBranch: c.String("drone-repo-branch"),
Dockerfile: c.String("dockerfile"),
Context: c.String("context"),
Tags: c.StringSlice("tags"),
AutoTag: c.Bool("auto-tag"),
AutoTagSuffix: c.String("auto-tag-suffix"),
ExpandTag: c.Bool("expand-tag"),
Args: c.StringSlice("args"),
Target: c.String("target"),
Repo: buildRepo(c.String("registry"), c.String("repo")),
Mirrors: c.StringSlice("registry-mirrors"),
Labels: c.StringSlice("custom-labels"),
SkipTlsVerify: c.Bool("skip-tls-verify"),
SnapshotMode: c.String("snapshot-mode"),
EnableCache: c.Bool("enable-cache"),
CacheRepo: buildRepo(c.String("registry"), c.String("cache-repo")),
CacheTTL: c.Int("cache-ttl"),
DigestFile: defaultDigestFile,
NoPush: noPush,
Verbosity: c.String("verbosity"),
Platform: c.String("platform"),
},
Artifact: kaniko.Artifact{
Tags: c.StringSlice("tags"),
Repo: buildRepo(c.String("registry"), c.String("repo")),
Registry: c.String("registry"),
ArtifactFile: c.String("artifact-file"),
RegistryType: artifact.Docker,
},
}
return plugin.Exec()
}
// Create the docker config file for authentication
func createDockerCfgFile(username, password, registry string) error {
if username == "" {
return fmt.Errorf("Username must be specified")
}
if password == "" {
return fmt.Errorf("Password must be specified")
}
if registry == "" {
return fmt.Errorf("Registry must be specified")
}
if registry == v2RegistryURL || registry == v2HubRegistryURL {
fmt.Println("Docker v2 registry is not supported in kaniko. Refer issue: https://github.com/GoogleContainerTools/kaniko/issues/1209")
fmt.Printf("Using v1 registry instead: %s\n", v1RegistryURL)
registry = v1RegistryURL
}
err := os.MkdirAll(dockerPath, 0600)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create %s directory", dockerPath))
}
authBytes := []byte(fmt.Sprintf("%s:%s", username, password))
encodedString := base64.StdEncoding.EncodeToString(authBytes)
jsonBytes := []byte(fmt.Sprintf(`{"auths": {"%s": {"auth": "%s"}}}`, registry, encodedString))
err = ioutil.WriteFile(dockerConfigPath, jsonBytes, 0644)
if err != nil {
return errors.Wrap(err, "failed to create docker config file")
}
return nil
}
func buildRepo(registry, repo string) string {
if registry == "" {
// No custom registry, just return the repo name
return repo
}
// Trim off trailing slash to prevent double slash when combining with repo
registry = strings.TrimSuffix(registry, "/")
if strings.HasPrefix(repo, registry+"/") {
// Repo already includes the registry prefix
// For backward compatibility, we won't add the prefix again.
return repo
}
// Prefix the repo with the registry
return registry + "/" + repo
}
|
[
"\"PLUGIN_ENV_FILE\""
] |
[] |
[
"PLUGIN_ENV_FILE"
] |
[]
|
["PLUGIN_ENV_FILE"]
|
go
| 1 | 0 | |
scripts/data_availability.py
|
"""
Check which images are available.
Script that looks at the ADNIMERGE file and checks how many of the
subjects have either a MRI or a PET scan downloaded. It also checks for
the same file existance in the BIDS directory. The output of this Script is
a .csv file with the availabiliy information of all subjects mentioned in ADNIMERGE.
"""
import pandas as pd
import os
import shutil
import glob
import bids.layout
import bids.tests
# Path to ADNIMERGE and directories
ADNIMERGE_file = "/homedtic/gmarti/DATA/ADNIMRIStandard1.5/ADNIMERGE.csv"
BIDS_DIR = "ADNI_BIDS/"
ADNI_DIR = "ADNI/"
# Load ADNIMERGE
df_adnimerge = pd.read_csv(ADNIMERGE_file)
# Get only PTID, month and study columns
df_metadata = df_adnimerge[["PTID", "VISCODE", "COLPROT", "IMAGEUID"]].copy()
# Get BIDS directory info with layouts
layout = bids.layout.BIDSLayout(BIDS_DIR)
assert len(layout.get_subjects()) > 0, "No subjects in directory!"
# Create empty lists with the availability
# These will contain either "Yes" or "No" depending on availability
MRI_ADNI = []
PET_ADNI = []
MRI_BIDS = []
PET_BIDS = []
# for each line:
for row in df_adnimerge.itertuples():
PET_BIDS.append('No')
PET_ADNI.append('No')
subj = row.PTID
# Image.id is the third field (corrseponding to 2) of the row.
try:
imageid = str(int(row.IMAGEUID))
except:
MRI_ADNI.append("No")
MRI_BIDS.append("No")
continue
f = glob.glob(ADNI_DIR + subj + "/*/*/*/*I" + imageid + ".nii")
# if found, add information to columns
if f:
MRI_ADNI.append("Yes")
# If not, add missing data
else:
MRI_ADNI.append("No")
# Test for BIDS
# Get session name
session = ''
if row.VISCODE == 'bl':
session = 'M00'
else:
session = 'M' + row.VISCODE[1:]
patient_id = 'ADNI' + subj[0:3] + 'S' + subj[6:]
imgs = layout.get(subject=patient_id, modality='anat', session=session)
# If exists
if imgs:
MRI_BIDS.append("Yes")
else:
MRI_BIDS.append("No")
df_metadata.loc[:, "MRI_ADNI"] = MRI_ADNI
df_metadata.loc[:, "PET_ADNI"] = PET_ADNI
df_metadata.loc[:, "MRI_BIDS"] = MRI_BIDS
df_metadata.loc[:, "PET_BIDS"] = PET_BIDS
df_metadata.to_csv("summary_files.csv")
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
work/conda_smithy_testing/make_prs.py
|
#!/user/bin/env python
import os
import sys
import github
import subprocess
import yaml
META = """\
{% set name = "cf-autotick-bot-test-package" %}
{% set version = "0.9" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
url: https://github.com/regro/cf-autotick-bot-test-package/archive/v{{ version }}.tar.gz
sha256: 74d5197d4ca8afb34b72a36fc8763cfaeb06bdbc3f6d63e55099fe5e64326048
build:
number: {{ build }}
string: "{{ cislug }}_py{{ py }}h{{ PKG_HASH }}_{{ build }}"
requirements:
host:
- python
- pip
run:
- python
test:
commands:
- echo "works!"
about:
home: https://github.com/regro/cf-scripts
license: BSD-3-Clause
license_family: BSD
license_file: LICENSE
summary: testing feedstock for the regro-cf-autotick-bot
extra:
recipe-maintainers:
- beckermr
- conda-forge/bot
""" # noqa
assert sys.argv[3][0] == "v"
assert isinstance(int(sys.argv[3][1:]), int)
BUILD_SLUG = "{% set build = " + str(int(sys.argv[3][1:]) + 14) + " %}\n"
CI_SLUG = '{% set cislug = "' + sys.argv[1] + sys.argv[2] + '" %}\n'
TST = sys.argv[3]
BRANCH = TST + "-" + sys.argv[1] + "-" + sys.argv[2]
print("\n\n=========================================")
print("making the head branch")
subprocess.run(
["git", "checkout", "%s-%s-%s" % (sys.argv[1], sys.argv[2], sys.argv[3])],
check=True,
)
subprocess.run(
["git", "pull", "upstream", "%s-%s-%s" % (sys.argv[1], sys.argv[2], sys.argv[3])],
check=True,
)
subprocess.run(
["git", "checkout", "-b", BRANCH],
check=True,
)
print("\n\n=========================================")
print("editing the recipe")
with open("recipe/meta.yaml", "w") as fp:
fp.write(CI_SLUG)
fp.write(BUILD_SLUG)
fp.write(META)
with open("conda-forge.yml", "r") as fp:
cfg = yaml.safe_load(fp)
cfg["provider"]["linux"] = sys.argv[1]
cfg["provider"]["osx"] = sys.argv[2]
cfg["provider"]["win"] = "azure"
cfg["provider"]["linux_aarch64"] = "drone"
cfg["provider"]["linux_ppc64le"] = "travis"
cfg["conda_forge_output_validation"] = True
with open("conda-forge.yml", "w") as fp:
yaml.dump(cfg, fp)
with open("recipe/conda_build_config.yaml", "w") as fp:
fp.write("""\
python:
- 3.6.* *_cpython
""")
subprocess.run(
["git", "add", "conda-forge.yml",
"recipe/meta.yaml", "recipe/conda_build_config.yaml"],
check=True,
)
print("\n\n=========================================")
print("rerendering")
subprocess.run(
["conda", "smithy", "rerender", "-c", "auto"],
check=True
)
print("\n\n=========================================")
print("pushing to the fork")
subprocess.run(
["git", "push", "--set-upstream", "origin", BRANCH]
)
print("\n\n=========================================")
print("making the PR")
gh = github.Github(os.environ["GITHUB_TOKEN"])
repo = gh.get_repo("conda-forge/cf-autotick-bot-test-package-feedstock")
pr = repo.create_pull(
title="TST test conda-smithy " + BRANCH,
body=(
"This PR is an autogenerated test for conda-smithy. Merge once it "
"passes and then check the CI on the upstream branch has worked."),
head="beckermr:" + BRANCH,
base="%s-%s-%s" % (sys.argv[1], sys.argv[2], sys.argv[3]),
maintainer_can_modify=True,
)
pr.add_to_labels("automerge")
|
[] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
python
| 1 | 0 | |
providers/ibm/vpc_cluster.go
|
// Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibm
import (
"fmt"
"os"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
bluemix "github.com/IBM-Cloud/bluemix-go"
"github.com/IBM-Cloud/bluemix-go/api/container/containerv2"
"github.com/IBM-Cloud/bluemix-go/session"
)
type VPCClusterGenerator struct {
IBMService
}
func (g VPCClusterGenerator) loadcluster(clustersID, clusterName string) terraformutils.Resource {
var resources terraformutils.Resource
resources = terraformutils.NewSimpleResource(
clustersID,
clusterName,
"ibm_container_vpc_cluster",
"ibm",
[]string{})
return resources
}
func (g VPCClusterGenerator) loadWorkerPools(clustersID, poolID string, dependsOn []string) terraformutils.Resource {
var resources terraformutils.Resource
resources = terraformutils.NewResource(
fmt.Sprintf("%s/%s", clustersID, poolID),
poolID,
"ibm_container_vpc_worker_pool",
"ibm",
map[string]string{},
[]string{},
map[string]interface{}{
"depends_on": dependsOn,
})
return resources
}
func (g *VPCClusterGenerator) InitResources() error {
bmxConfig := &bluemix.Config{
BluemixAPIKey: os.Getenv("IC_API_KEY"),
}
sess, err := session.New(bmxConfig)
if err != nil {
return err
}
client, err := containerv2.New(sess)
if err != nil {
return err
}
clusters, err := client.Clusters().List(containerv2.ClusterTargetHeader{})
if err != nil {
return err
}
for _, cs := range clusters {
g.Resources = append(g.Resources, g.loadcluster(cs.ID, cs.Name))
workerPools, err := client.WorkerPools().ListWorkerPools(cs.ID, containerv2.ClusterTargetHeader{})
if err != nil {
return err
}
for _, pool := range workerPools {
if pool.PoolName != "default" {
var dependsOn []string
dependsOn = append(dependsOn,
"ibm_container_vpc_cluster."+terraformutils.TfSanitize(cs.Name))
g.Resources = append(g.Resources, g.loadWorkerPools(cs.ID, pool.ID, dependsOn))
}
}
}
return nil
}
|
[
"\"IC_API_KEY\""
] |
[] |
[
"IC_API_KEY"
] |
[]
|
["IC_API_KEY"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"encoding/json"
"io/ioutil"
"log"
"os"
"github.com/mitchellh/cli"
"github.com/pragkent/aliyun-disk/command"
_ "github.com/pragkent/aliyun-disk/logs"
"github.com/pragkent/aliyun-disk/volume"
)
func main() {
args := os.Args[1:]
meta := newMeta()
cli := &cli.CLI{
Args: args,
Commands: Commands(meta),
Version: Version,
HelpFunc: cli.BasicHelpFunc(Name),
HelpWriter: os.Stdout,
}
exitStatus, err := cli.Run()
if err != nil {
meta.Ui.Error(err.Error())
}
os.Exit(exitStatus)
}
func newMeta() *command.Meta {
ui := &cli.BasicUi{
Writer: os.Stdout,
ErrorWriter: os.Stderr,
Reader: os.Stdin,
}
driver := volume.NewDriver(getDriverConfig())
return &command.Meta{
Ui: ui,
Driver: driver,
}
}
func getDriverConfig() *volume.DriverConfig {
cfg := loadDriverConfig()
if cfg != nil {
return cfg
}
return &volume.DriverConfig{
Region: os.Getenv("ALIYUN_REGION"),
AccessKey: os.Getenv("ALIYUN_ACCESS_KEY"),
SecretKey: os.Getenv("ALIYUN_SECRET_KEY"),
Cluster: os.Getenv("ALIYUN_CLUSTER"),
}
}
func loadDriverConfig() *volume.DriverConfig {
const path = "/etc/kubernetes/cloud/cloud.json"
raw, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("ioutil.ReadFile error: %v", err)
return nil
}
var cfg volume.DriverConfig
if err = json.Unmarshal(raw, &cfg); err != nil {
log.Printf("json.Unmarshal error: %v", err)
return nil
}
return &cfg
}
|
[
"\"ALIYUN_REGION\"",
"\"ALIYUN_ACCESS_KEY\"",
"\"ALIYUN_SECRET_KEY\"",
"\"ALIYUN_CLUSTER\""
] |
[] |
[
"ALIYUN_CLUSTER",
"ALIYUN_ACCESS_KEY",
"ALIYUN_SECRET_KEY",
"ALIYUN_REGION"
] |
[]
|
["ALIYUN_CLUSTER", "ALIYUN_ACCESS_KEY", "ALIYUN_SECRET_KEY", "ALIYUN_REGION"]
|
go
| 4 | 0 | |
pomodoro_timer/main.py
|
from tkinter import *
import math
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
HEADING_FONT_NAME = "Segoe UI"
WORK_MIN = 25
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
reps = 0
timer = None
# ---------------------------- TIMER RESET ------------------------------- #
def reset_timer():
global timer
global reps
reps = 0
title_label.config(text="Timer", fg=GREEN)
checkmarks_label.config(text="")
canvas.itemconfig(timer_text, text="00:00")
window.after_cancel(timer)
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
global reps
reps += 1
work_sec = WORK_MIN * 60
short_break_sec = SHORT_BREAK_MIN * 60
long_break_sec = LONG_BREAK_MIN * 60
if reps % 8 == 0:
title_label.config(text="Break", fg=RED)
checkmarks_label.config(text=checkmarks_label['text'] + '✔')
count_down(long_break_sec)
elif reps % 2 == 0:
title_label.config(text="Break", fg=PINK)
checkmarks_label.config(text=checkmarks_label['text'] + '✔')
count_down(short_break_sec)
else:
title_label.config(text="Work", fg=GREEN)
count_down(work_sec)
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(count):
global timer
count_min = math.floor(count / 60)
count_sec = count % 60
if count_sec < 10:
count_sec = f"0{count_sec}"
canvas.itemconfig(timer_text, text=f"{count_min}:{count_sec}")
if count > 0:
timer = window.after(1000, count_down, count - 1)
else:
start_timer()
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Arihant's Pomodoro Timer")
window.config(padx=100, pady=50, bg=YELLOW)
title_label = Label(text="Timer", font=(HEADING_FONT_NAME, 40, "bold"), bg=YELLOW, fg=GREEN)
title_label.grid(column=1, row=0)
canvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
tomato_img = PhotoImage(file="tomato.png")
canvas.create_image(100, 112, image=tomato_img)
timer_text = canvas.create_text(103, 130, text="00:00", fill="white", font=(FONT_NAME, 35, "bold"))
canvas.grid(column=1, row=1)
Button(text="Start", command=start_timer, highlightthickness=0).grid(column=0, row=2)
Button(text="Reset", command=reset_timer, highlightthickness=0).grid(column=2, row=2)
checkmarks_label = Label(bg=YELLOW, fg=GREEN, font=(FONT_NAME, 25, "bold"))
checkmarks_label.grid(column=1, row=3)
window.mainloop()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.