filename
stringlengths
4
198
content
stringlengths
25
939k
environment
sequence
variablearg
sequence
constarg
sequence
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
connection.go
package gremlin import ( "encoding/base64" "encoding/json" "errors" "net/url" "os" "strings" "github.com/gorilla/websocket" "github.com/jessicacglenn/pool" "fmt" ) // Clients include the necessary info to connect to the server and the underlying socket type Client struct { Remote *url.URL pool pool.Pool Auth []OptAuth factory *EndpointFactory } var ( MalformedClusterStringErr = errors.New("connection string is not in expected format. An example of the expected format is 'ws://server1:8182, ws://server2:8182'") UserNotSetErr = errors.New("variable GREMLIN_USER is not set") PassNotSetErr = errors.New("variable GREMLIN_PASS is not set") UnknownErr = errors.New("an unknown error occurred") NoEndpointsError = errors.New("no valid endpoints provided") NoServersSetError = errors.New("no servers set, configure servers to connect to using the GREMLIN_SERVERS environment variable") ) // perhaps if we change the urlStr to interface{} we can have either a slice or a string passed through and // we will be able to use this for both the func NewClient(urlStr string, options ...OptAuth) (*Client, error) { fact, err := NewEndpointFactory(urlStr) if err != nil { return nil, err } c := &Client{Auth: options, factory: fact} p, err := pool.NewChannelPool(1, 30, fact.connectSocket) if err != nil { return nil, err } c.pool = p return c, nil } // if the server is not reachable then we should mark it as unavailable // and then try again a little later. func (c *Client) Close() { c.pool.Close() } // Client executes the provided request func (c *Client) ExecQuery(query string) ([]byte, error) { req := Query(query) return c.Exec(req) } func (c *Client) Exec(req *Request) ([]byte, error) { con, err := c.pool.Get() if err != nil { return nil, err } return c.executeForConn(req, con) } func (c *Client) executeForConn(req *Request, con *pool.PoolConn) ([]byte, error) { requestMessage, err := GraphSONSerializer(req) if err != nil { return nil, err } // todo : remove me fmt.Printf("Sending Message:\n%s\n", requestMessage) if err := con.WriteMessage(websocket.BinaryMessage, requestMessage); err != nil { // todo : remove me print("error", err) return nil, err } b, err := c.readResponse(con) // todo : remove me fmt.Printf("Receiving Response:\n%s\n", b) // update the endpoint to mark success/error, this allows us to back off endpoints that are continuing to fail if err != nil { // todo : remove me fmt.Printf("ERROR in Response:\n%s\n", err.Error()) c.factory.failedEndpoint(con) } else { c.factory.successfulEndpoint(con) } // if the request was successful, return to the pool, otherwise close and remove from the pool. err = con.Close() return b, err } // this doesn't seem to be useful outside of the Exec function (in this context) func (c *Client) readResponse(con *pool.PoolConn) (data []byte, err error) { // Data buffer var message []byte var dataItems []json.RawMessage inBatchMode := false // Receive data for { if _, message, err = con.ReadMessage(); err != nil { return } var res *Response if err = json.Unmarshal(message, &res); err != nil { return } var items []json.RawMessage switch res.Status.Code { case StatusNoContent: return case StatusAuthenticate: return c.authenticate(con, res.RequestId) case StatusPartialContent: inBatchMode = true if err = json.Unmarshal(res.Result.Data, &items); err != nil { return } dataItems = append(dataItems, items...) case StatusSuccess: if inBatchMode { if err = json.Unmarshal(res.Result.Data, &items); err != nil { return } dataItems = append(dataItems, items...) data, err = json.Marshal(dataItems) } else { data = res.Result.Data } return default: if errmsg, exists := ConnectionErrors[res.Status.Code]; exists { err = errmsg } else { err = UnknownErr } return } } return } // AuthInfo includes all info related with SASL authentication with the Gremlin server // ChallengeId is the requestID in the 407 status (AUTHENTICATE) response given by the server. // We have to send an authentication request with that same RequestID in order to solve the challenge. type AuthInfo struct { ChallengeId string User string Pass string } type OptAuth func(*AuthInfo) error // Constructor for different authentication possibilities func NewAuthInfo(options ...OptAuth) (*AuthInfo, error) { auth := &AuthInfo{} for _, op := range options { err := op(auth) if err != nil { return nil, err } } return auth, nil } // Sets authentication info from environment variables GREMLIN_USER and GREMLIN_PASS func OptAuthEnv() OptAuth { return func(auth *AuthInfo) error { user, ok := os.LookupEnv("GREMLIN_USER") if !ok { return UserNotSetErr } pass, ok := os.LookupEnv("GREMLIN_PASS") if !ok { return PassNotSetErr } auth.User = user auth.Pass = pass return nil } } // Sets authentication information from username and password func OptAuthUserPass(user, pass string) OptAuth { return func(auth *AuthInfo) error { auth.User = user auth.Pass = pass return nil } } func (c *Client) authenticate(con *pool.PoolConn, requestId string) ([]byte, error) { auth, err := NewAuthInfo(c.Auth...) if err != nil { return nil, err } var sasl []byte sasl = append(sasl, 0) sasl = append(sasl, []byte(auth.User)...) sasl = append(sasl, 0) sasl = append(sasl, []byte(auth.Pass)...) saslEnc := base64.StdEncoding.EncodeToString(sasl) args := &RequestArgs{Sasl: saslEnc} authReq := &Request{ RequestId: requestId, Processor: "trasversal", Op: "authentication", Args: args, } return c.executeForConn(authReq, con) } // LEGACY var defaultClient *Client func NewCluster(s ...string) (err error) { var connString string // If no arguments use environment variable if len(s) == 0 { connString = strings.TrimSpace(os.Getenv("GREMLIN_SERVERS")) } else { connString = strings.Join(s, ",") } if connString == "" { err = NoServersSetError return } defaultClient, err = NewClient(connString) return }
[ "\"GREMLIN_SERVERS\"" ]
[]
[ "GREMLIN_SERVERS" ]
[]
["GREMLIN_SERVERS"]
go
1
0
openmcp-metric-collector/member/src/scrap/scrap.go
package scrap import ( "fmt" "openmcp/openmcp/openmcp-metric-collector/member/src/clock" "openmcp/openmcp/openmcp-metric-collector/member/src/decode" "openmcp/openmcp/openmcp-metric-collector/member/src/kubeletClient" "openmcp/openmcp/openmcp-metric-collector/member/src/storage" "os" corev1 "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/rest" ) func Scrap(config *rest.Config, kubelet_client *kubeletClient.KubeletClient, nodes []corev1.Node) (*storage.Collection, error) { fmt.Println("Func Scrap Called") responseChannel := make(chan *storage.MetricsBatch, len(nodes)) errChannel := make(chan error, len(nodes)) defer close(responseChannel) defer close(errChannel) startTime := clock.MyClock.Now() for _, node := range nodes { go func(node corev1.Node) { //defer wait.Done() metrics, err := CollectNode(config, kubelet_client, node) if err != nil { err = fmt.Errorf("unable to fully scrape metrics from node %s: %v", node.Name, err) } responseChannel <- metrics errChannel <- err }(node) } var errs []error res := &storage.Collection{} nodeNum := 0 podNum := 0 for range nodes { err := <-errChannel srcBatch := <-responseChannel if err != nil { errs = append(errs, err) // NB: partial node results are still worth saving, so // don't skip storing results if we got an error } if srcBatch == nil { continue } res.Metricsbatchs = append(res.Metricsbatchs, *srcBatch) nodeNum += 1 podNum += len(srcBatch.Pods) } res.ClusterName = os.Getenv("CLUSTER_NAME") //config.Username fmt.Println("ScrapeMetrics: time: ", clock.MyClock.Since(startTime), "nodes: ", nodeNum, "pods: ", podNum) return res, utilerrors.NewAggregate(errs) } func CollectNode(config *rest.Config, kubelet_client *kubeletClient.KubeletClient, node corev1.Node) (*storage.MetricsBatch, error) { fmt.Println("Func CollectNode Called") fmt.Println("Collect Node Start goroutine : '", node.Name, "'") host := node.Status.Addresses[0].Address token := config.BearerToken summary, err := kubelet_client.GetSummary(host, token) fmt.Println("summary : ", summary) if err != nil { return nil, fmt.Errorf("unable to fetch metrics from Kubelet %s (%s): %v", node.Name, node.Status.Addresses[0].Address, err) } return decode.DecodeBatch(summary) }
[ "\"CLUSTER_NAME\"" ]
[]
[ "CLUSTER_NAME" ]
[]
["CLUSTER_NAME"]
go
1
0
tensorflow_probability/python/internal/hypothesis_testlib.py
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Hypothesis strategies for TFP.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import os import traceback # Dependency imports import hypothesis as hp from hypothesis.extra import numpy as hpnp import hypothesis.strategies as hps import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions.deprecated_linalg import matrix_diag_transform from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.util.deferred_tensor import DeferredTensor def randomize_hypothesis(): # Use --test_env=TFP_RANDOMIZE_HYPOTHESIS=1 to get random coverage. return bool(int(os.environ.get('TFP_RANDOMIZE_HYPOTHESIS', 0))) def hypothesis_max_examples(default=None): # Use --test_env=TFP_HYPOTHESIS_MAX_EXAMPLES=1000 to get fuller coverage. return int(os.environ.get('TFP_HYPOTHESIS_MAX_EXAMPLES', default or 20)) def tfp_hp_settings(default_max_examples=None, **kwargs): """Default TFP-specific Hypothesis settings.""" # Rationales for deviating from Hypothesis default settings # - Derandomize by default because flaky tests are horrible # - Turn off example database because # - It makes tests flaky on our cluster even if derandomized at the current # internal Hypothesis version (3.65) # - In the future, derandomization will imply ignoring the database setting # anyway # - Having one can't make example runs any faster # - No deadline because our test functions are too slow # - No too_slow health check for the same reason # - Fewer examples by default for the same reason # - Always print `@reproduce_failure` blobs because one never doesn't want # them in the logs kwds = dict( derandomize=not randomize_hypothesis(), database=None, deadline=None, suppress_health_check=[hp.HealthCheck.too_slow], max_examples=hypothesis_max_examples(default=default_max_examples), print_blob=hp.PrintSettings.ALWAYS) kwds.update(kwargs) return hp.settings(**kwds) VAR_USAGES = {} def usage_counting_identity(var): key = (id(var), var.name) VAR_USAGES[key] = VAR_USAGES.get(key, []) + [traceback.format_stack(limit=25)] return tf.identity(var) def defer_and_count_usage(var): return DeferredTensor(var, usage_counting_identity) @contextlib.contextmanager def assert_no_excessive_var_usage(name, max_permissible=2): """Fails if a tagged DeferredTensor is convert_to_tensor'd too much. To set this up, wrap some Variables in `defer_and_count_usage`. Then, if any of them is accessed more than `max_permissible` times in the wrapped block, this will signal an informative error. Args: name: Python `str` naming this var usage counter. max_permissible: Python `int` giving the maximum OK number of times each tagged DeferredTensor may be read. Yields: Nothing (it's a context manager). """ VAR_USAGES.clear() yield # TODO(jvdillon): Reduce max_permissible to 1? var_nusages = {var_id_and_name: len(usages) for var_id_and_name, usages in VAR_USAGES.items()} if any(len(usages) > max_permissible for usages in VAR_USAGES.values()): for (_, var_name), usages in VAR_USAGES.items(): if len(usages) > max_permissible: print('While executing {}, saw {} Tensor conversions of {}:'.format( name, len(usages), var_name)) for i, usage in enumerate(usages): print('Conversion {} of {}:\n{}'.format(i + 1, len(usages), ''.join(usage))) raise AssertionError( 'More than {} tensor conversions detected for {}: {}'.format( max_permissible, name, var_nusages)) class Support(object): """Classification of sample spaces and bijector domains and codomains.""" SCALAR_UNCONSTRAINED = 'SCALAR_UNCONSTRAINED' SCALAR_NON_NEGATIVE = 'SCALAR_NON_NEGATIVE' SCALAR_NON_ZERO = 'SCALAR_NON_ZERO' SCALAR_POSITIVE = 'SCALAR_POSITIVE' SCALAR_GT_NEG1 = 'SCALAR_GT_NEG1' SCALAR_IN_NEG1_1 = 'SCALAR_IN_NEG1_1' SCALAR_IN_0_1 = 'SCALAR_IN_0_1' VECTOR_UNCONSTRAINED = 'VECTOR_UNCONSTRAINED' VECTOR_SIZE_TRIANGULAR = 'VECTOR_SIZE_TRIANGULAR' VECTOR_WITH_L1_NORM_1_SIZE_GT1 = 'VECTOR_WITH_L1_NORM_1_SIZE_GT1' VECTOR_STRICTLY_INCREASING = 'VECTOR_STRICTLY_INCREASING' MATRIX_UNCONSTRAINED = 'MATRIX_UNCONSTRAINED' MATRIX_LOWER_TRIL = 'MATRIX_LOWER_TRIL' MATRIX_LOWER_TRIL_POSITIVE_DEFINITE = 'MATRIX_LOWER_TRIL_POSITIVE_DEFINITE' MATRIX_POSITIVE_DEFINITE = 'MATRIX_POSITIVE_DEFINITE' CORRELATION_CHOLESKY = 'CORRELATION_CHOLESKY' OTHER = 'OTHER' ALL_SUPPORTS = None def all_supports(): global ALL_SUPPORTS cls = Support ALL_SUPPORTS = [attr for attr in dir(cls) if not callable(getattr(cls, attr)) and not attr.startswith('__')] all_supports() del all_supports def _scalar_constrainer(support): """Helper for `constrainer` for scalar supports.""" def nonzero(x): return tf.where(tf.equal(x, 0), 1e-6, x) constrainers = { Support.SCALAR_IN_0_1: tf.math.sigmoid, Support.SCALAR_GT_NEG1: softplus_plus_eps(-1 + 1e-6), Support.SCALAR_NON_ZERO: nonzero, Support.SCALAR_IN_NEG1_1: lambda x: tf.math.tanh(x) * (1 - 1e-6), Support.SCALAR_NON_NEGATIVE: tf.math.softplus, Support.SCALAR_POSITIVE: softplus_plus_eps(), Support.SCALAR_UNCONSTRAINED: tf.identity, } if support not in constrainers: raise NotImplementedError(support) return constrainers[support] def _vector_constrainer(support): """Helper for `constrainer` for vector supports.""" def l1norm(x): x = tf.concat([x, tf.ones_like(x[..., :1]) * 1e-6], axis=-1) x = x / tf.linalg.norm(x, ord=1, axis=-1, keepdims=True) return x constrainers = { Support.VECTOR_UNCONSTRAINED: identity_fn, Support.VECTOR_STRICTLY_INCREASING: lambda x: tf.cumsum(tf.abs(x) + 1e-3, axis=-1), Support.VECTOR_WITH_L1_NORM_1_SIZE_GT1: l1norm, Support.VECTOR_SIZE_TRIANGULAR: identity_fn, } if support not in constrainers: raise NotImplementedError(support) return constrainers[support] def _matrix_constrainer(support): """Helper for `constrainer` for matrix supports.""" constrainers = { Support.MATRIX_UNCONSTRAINED: identity_fn, Support.MATRIX_POSITIVE_DEFINITE: positive_definite, Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE: lower_tril_positive_definite, Support.MATRIX_LOWER_TRIL: lower_tril, } if support not in constrainers: raise NotImplementedError(support) return constrainers[support] def constrainer(support): """Determines a constraining transformation into the given support.""" if support.startswith('SCALAR_'): return _scalar_constrainer(support) if support.startswith('VECTOR_'): return _vector_constrainer(support) if support.startswith('MATRIX_'): return _matrix_constrainer(support) raise NotImplementedError(support) def min_rank_for_support(support): """Reports the minimum rank of a Tensor in the given support.""" if support.startswith('SCALAR_'): return 0 if support.startswith('VECTOR_'): return 1 if support.startswith('MATRIX_'): return 2 raise NotImplementedError(support) def constrained_tensors(constraint_fn, shape, dtype=np.float32): """Strategy for drawing a constrained Tensor. Args: constraint_fn: Function mapping the unconstrained space to the desired constrained space. shape: Shape of the desired Tensors as a Python list. dtype: Dtype for constrained Tensors. Returns: tensors: A strategy for drawing constrained Tensors of the given shape. """ # TODO(bjp): Allow a wider range of floats. # float32s = hps.floats( # np.finfo(np.float32).min / 2, np.finfo(np.float32).max / 2, # allow_nan=False, allow_infinity=False) floats = hps.floats(-200, 200, allow_nan=False, allow_infinity=False) def mapper(x): x = constraint_fn(tf.convert_to_tensor(x, dtype_hint=dtype)) if dtype_util.is_floating(x.dtype) and tf.executing_eagerly(): # We'll skip this check in graph mode; too expensive. if not np.all(np.isfinite(x.numpy())): raise AssertionError('{} generated non-finite param value: {}'.format( constraint_fn, x.numpy())) return x return hpnp.arrays(dtype=dtype, shape=shape, elements=floats).map(mapper) # pylint: disable=no-value-for-parameter @hps.composite def tensors_in_support(draw, support, batch_shape=None, event_dim=None): """Strategy for drawing Tensors in the given support. Supports have a notion of event shape, which is the trailing dimensions in which the support region may not be axis-aligned (e.g., the event ndims of `VECTOR_STRICTLY_INCREASING` is 1). This strategy produces Tensors with at least the support's event rank, and also an optional batch shape. Args: draw: Hypothesis strategy sampler supplied by `@hps.composite`. support: The `Support` in which the Tensor should live. batch_shape: Optional shape. The returned Tensors will have this batch shape. Hypothesis will pick one if omitted. event_dim: Optional Python int giving the size of each event dimension. This is shared across all event dimensions, permitting square event matrices, etc. If omitted, Hypothesis will choose one. Returns: tensors: A strategy for drawing such Tensors. """ if event_dim is None: event_dim = draw(hps.integers(min_value=2, max_value=6)) if batch_shape is None: batch_shape = tensorshape_util.as_list(draw(shapes())) shape = batch_shape + [event_dim] * min_rank_for_support(support) constraint_fn = constrainer(support) return draw(constrained_tensors(constraint_fn, shape)) @hps.composite def shapes(draw, min_ndims=0, max_ndims=3, min_lastdimsize=1, max_side=None): """Strategy for drawing TensorShapes with some control over rank/dim sizes. Args: draw: Hypothesis strategy sampler supplied by `@hps.composite`. min_ndims: Python `int` giving the minimum rank. max_ndims: Python `int` giving the maximum rank. min_lastdimsize: Python `int`. The trailing dimension will always be at least this large. Ignored if the rank turns out to be 0. max_side: Python `int` giving the maximum size of each dimension Returns: shapes: A strategy for drawing fully-specified TensorShapes obeying these constraints. """ rank = draw(hps.integers(min_value=min_ndims, max_value=max_ndims)) shape = tf.TensorShape(None).with_rank(rank) if rank > 0: def resize_lastdim(x): return x[:-1] + (max(x[-1], min_lastdimsize),) if max_side is None: # Apparently we can't pass an explicit None to the Hypothesis strategy? shps = hpnp.array_shapes(min_dims=rank, max_dims=rank) else: shps = hpnp.array_shapes(min_dims=rank, max_dims=rank, max_side=max_side) shape = draw(shps.map(resize_lastdim).map(tf.TensorShape)) return shape def identity_fn(x): return x @hps.composite def broadcasting_params(draw, batch_shape, params_event_ndims, event_dim=None, enable_vars=False, constraint_fn_for=lambda param: identity_fn, mutex_params=(), dtype=np.float32): """Streategy for drawing parameters which jointly have the given batch shape. Specifically, the batch shapes of the returned parameters will broadcast to the requested batch shape. The dtypes of the returned parameters are determined by their respective constraint functions. Args: draw: Hypothesis strategy sampler supplied by `@hps.composite`. batch_shape: A `TensorShape`. The returned parameters' batch shapes will broadcast to this. params_event_ndims: Python `dict` mapping the name of each parameter to a Python `int` giving the event ndims for that parameter. event_dim: Optional Python int giving the size of each parameter's event dimensions (except where overridden by any applicable constraint functions). This is shared across all parameters, permitting square event matrices, compatible location and scale Tensors, etc. If omitted, Hypothesis will choose one. enable_vars: TODO(bjp): Make this `True` all the time and put variable initialization in slicing_test. If `False`, the returned parameters are all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor` `tfp.util.TransformedVariable`}. constraint_fn_for: Python callable mapping parameter name to constraint function. The latter is itself a Python callable which converts an unconstrained Tensor (currently with float32 values from -200 to +200) into one that meets the parameter's validity constraints. mutex_params: Python iterable of Python sets. Each set gives a clique of mutually exclusive parameters (e.g., the 'probs' and 'logits' of a Categorical). At most one parameter from each set will appear in the result. dtype: Dtype for generated parameters. Returns: params: A Hypothesis strategy for drawing Python `dict`s mapping parameter name to a `tf.Tensor`, `tf.Variable`, `tfp.util.DeferredTensor`, or `tfp.util.TransformedVariable`. The batch shapes of the returned parameters broadcast together to the supplied `batch_shape`. Only parameters whose names appear as keys in `params_event_ndims` will appear (but possibly not all of them, depending on `mutex_params`). """ if event_dim is None: event_dim = draw(hps.integers(min_value=2, max_value=6)) params_event_ndims = params_event_ndims or {} remaining_params = set(params_event_ndims.keys()) params_to_use = [] while remaining_params: param = draw(hps.sampled_from(sorted(remaining_params))) params_to_use.append(param) remaining_params.remove(param) for mutex_set in mutex_params: if param in mutex_set: remaining_params -= mutex_set param_batch_shapes = draw( broadcasting_named_shapes(batch_shape, params_to_use)) params_kwargs = dict() for param in params_to_use: param_batch_shape = param_batch_shapes[param] param_event_rank = params_event_ndims[param] param_shape = (tensorshape_util.as_list(param_batch_shape) + [event_dim] * param_event_rank) # Reduce our risk of exceeding TF kernel broadcast limits. hp.assume(len(param_shape) < 6) # TODO(axch): Can I replace `params_event_ndims` and `constraint_fn_for` # with a map from params to `Suppport`s, and use `tensors_in_support` here # instead of this explicit `constrained_tensors` function? param_strategy = constrained_tensors( constraint_fn_for(param), param_shape, dtype=dtype) params_kwargs[param] = draw(maybe_variable( param_strategy, enable_vars=enable_vars, dtype=dtype, name=param)) return params_kwargs @hps.composite def maybe_variable(draw, strategy, enable_vars=False, dtype=None, name=None): """Strategy for drawing objects that should sometimes be tf.Variables. Args: draw: Hypothesis strategy sampler supplied by `@hps.composite`. strategy: Hypothesis strategy for drawing suitable values enable_vars: TODO(bjp): Make this `True` all the time and put variable initialization in slicing_test. If `False`, the returned parameters are never {`tf.Variable`, `tfp.util.DeferredTensor` `tfp.util.TransformedVariable`}. dtype: Dtype for generated parameters. name: Name for the produced `Tensor`s and `Variable`s, if any. Returns: strategy: A Hypothesis strategy for drawing a value, `tf.Variable`, `tfp.util.DeferredTensor`, or `tfp.util.TransformedVariable`. The `DeferredTensor`s are sometimes instrumented to count how many times they are concretized. """ result = tf.convert_to_tensor(draw(strategy), dtype_hint=dtype, name=name) if enable_vars and draw(hps.booleans()): result = tf.Variable(result, name=name) if name is None: alt_name = None else: alt_name = '{}_alt_value'.format(name) alt_value = tf.convert_to_tensor( draw(strategy), dtype_hint=dtype, name=alt_name) # This field provides an acceptable alternate value, to enable tests that # mutate the Variable (once). setattr(result, '_tfp_alt_value', alt_value) if draw(hps.booleans()): result = defer_and_count_usage(result) return result @hps.composite def broadcasting_named_shapes(draw, batch_shape, param_names): """Strategy for drawing a set of batch shapes that broadcast to `batch_shape`. For each parameter we need to choose its batch rank, and whether or not each axis i is 1 or batch_shape[i]. This function chooses a set of shapes that have possibly mismatched ranks, and possibly broadcasting axes, with the promise that the broadcast of the set of all shapes matches `batch_shape`. Args: draw: Hypothesis strategy sampler supplied by `@hps.composite`. batch_shape: `tf.TensorShape`, the target (fully-defined) batch shape. param_names: Iterable of `str`, the parameters whose batch shapes need determination. Returns: param_batch_shapes: A strategy for drawing `dict`s of `str->tf.TensorShape` where the set of shapes broadcast to `batch_shape`. The shapes are fully defined. """ n = len(param_names) return dict( zip(draw(hps.permutations(param_names)), draw(broadcasting_shapes(batch_shape, n)))) def _compute_rank_and_fullsize_reqd(draw, target_shape, current_shape, is_last): """Returns a param rank and a list of bools for full-size-required by axis. Args: draw: Hypothesis data sampler. target_shape: `tf.TensorShape`, the target broadcasted shape. current_shape: `tf.TensorShape`, the broadcasted shape of the shapes selected thus far. This is ignored for non-last shapes. is_last: bool indicator of whether this is the last shape (in which case, we must achieve the target shape). Returns: next_rank: Sampled rank for the next shape. force_fullsize_dim: `next_rank`-sized list of bool indicating whether the corresponding axis of the shape must be full-sized (True) or is allowed to be 1 (i.e., broadcast) (False). """ target_rank = target_shape.ndims if is_last: # We must force full size dim on any mismatched axes, and proper rank. full_rank_current = tf.broadcast_static_shape( current_shape, tf.TensorShape([1] * target_rank)) # Identify axes in which the target shape is not yet matched. axis_is_mismatched = [ full_rank_current[i] != target_shape[i] for i in range(target_rank) ] min_rank = target_rank if current_shape.ndims == target_rank: # Current rank might be already correct, but we could have a case like # batch_shape=[4,3,2] and current_batch_shape=[4,1,2], in which case # we must have at least 2 axes on this param's batch shape. min_rank -= (axis_is_mismatched + [True]).index(True) next_rank = draw(hps.integers(min_value=min_rank, max_value=target_rank)) # Get the last param_batch_rank (possibly 0!) items. force_fullsize_dim = axis_is_mismatched[target_rank - next_rank:] else: # There are remaining params to be drawn, so we will be able to force full # size axes on subsequent params. next_rank = draw(hps.integers(min_value=0, max_value=target_rank)) force_fullsize_dim = [False] * next_rank return next_rank, force_fullsize_dim def broadcast_compatible_shape(shape): """Strategy for drawing shapes broadcast-compatible with `shape`.""" # broadcasting_shapes draws a sequence of shapes, so that the last "completes" # the broadcast to fill out batch_shape. Here we just draw two and take the # first (incomplete) one. return broadcasting_shapes(shape, 2).map(lambda shapes: shapes[0]) @hps.composite def broadcasting_shapes(draw, target_shape, n): """Strategy for drawing a set of `n` shapes that broadcast to `target_shape`. For each shape we need to choose its rank, and whether or not each axis i is 1 or target_shape[i]. This function chooses a set of `n` shapes that have possibly mismatched ranks, and possibly broadcasting axes, with the promise that the broadcast of the set of all shapes matches `target_shape`. Args: draw: Hypothesis strategy sampler supplied by `@hps.composite`. target_shape: The target (fully-defined) batch shape. n: Python `int`, the number of shapes to draw. Returns: shapes: A strategy for drawing sequences of `tf.TensorShape` such that the set of shapes in each sequence broadcast to `target_shape`. The shapes are fully defined. """ target_shape = tf.TensorShape(target_shape) target_rank = target_shape.ndims result = [] current_shape = tf.TensorShape([]) for is_last in [False] * (n - 1) + [True]: next_rank, force_fullsize_dim = _compute_rank_and_fullsize_reqd( draw, target_shape, current_shape, is_last=is_last) # Get the last next_rank (possibly 0!) dimensions. next_shape = target_shape[target_rank - next_rank:].as_list() for i, force_fullsize in enumerate(force_fullsize_dim): if not force_fullsize and draw(hps.booleans()): # Choose to make this param broadcast against some other param. next_shape[i] = 1 next_shape = tf.TensorShape(next_shape) current_shape = tf.broadcast_static_shape(current_shape, next_shape) result.append(next_shape) return result # Utility functions for constraining parameters and/or domain/codomain members. def softplus_plus_eps(eps=1e-6): return lambda x: tf.nn.softplus(x) + eps def symmetric(x): return (x + tf.linalg.matrix_transpose(x)) / 2 def positive_definite(x): shp = tensorshape_util.as_list(x.shape) psd = ( tf.matmul(x, x, transpose_b=True) + .1 * tf.linalg.eye(shp[-1], batch_shape=shp[:-2])) return symmetric(psd) def lower_tril_positive_definite(x): return tf.linalg.band_part( matrix_diag_transform(x, softplus_plus_eps()), -1, 0) def lower_tril(x): return tf.linalg.band_part(x, -1, 0)
[]
[]
[ "TFP_HYPOTHESIS_MAX_EXAMPLES", "TFP_RANDOMIZE_HYPOTHESIS" ]
[]
["TFP_HYPOTHESIS_MAX_EXAMPLES", "TFP_RANDOMIZE_HYPOTHESIS"]
python
2
0
proto-build/gui/UcsSdk-0.5/src/UcsSdk/WatchUcsGui.py
#!/usr/bin/python import os, sys, platform, time, glob import re import xml.dom import xml.dom.minidom from os.path import dirname from Constants import * from UcsHandle import _AffirmativeList from UcsBase import * # variable declaration displayXml=False outFileFlag=False outFilePath=None outFile=None andCount = 0 orCount = 0 notCount = 0 multiLineMethod = ["<configConfMo","<lsInstantiateNNamedTemplate","<statsClearInterval","<lsClone","<lsTemplatise","<lsInstantiateTemplate","<lsInstantiateNTemplate"] singleLineMethod = ["</configConfMo","</lsInstantiateNNamedTemplate","</statsClearInterval","</lsClone","</lsTemplatise","</lsInstantiateTemplate","</lsInstantiateNTemplate","/>"] # class declaration class ClassStatus: none = 0 created = 1 modified = 2 removed = 4 deleted = 8 ### Function Definition #-------------------------------------- START - OF - GENERIC - FUCNTION ----------------------------------- ## Get the ClassID for a given DN #==================================================== def GetClassIdForDn(dn): rns = dn.split('/') rnCount = len(rns) classId = None parentClassId =None for rn in rns: classId = GetClassIdForRn(rn, parentClassId) if classId == None: break parentClassId = classId return classId #====================== End Of Function <GetClassIdForDn> ================ ### Get the ClassID for a given RN #==================================================== def GetClassIdForRn(rn, prevClassId=None): if not prevClassId: prevClassId = "TopRoot" metaClassId = UcsUtils.FindClassIdInMoMetaIgnoreCase(prevClassId) if (metaClassId == None): WriteUcsWarning('[Error]: GetManagedObject: classId [%s] is not valid' %(prevClassId)) return None moMeta = UcsUtils.GetUcsPropertyMeta(metaClassId, "Meta") if moMeta == None: WriteUcsWarning('[Error]: GetManagedObject: moMeta for classId [%s] is not valid' %(prevClassId)) return for childClassId in moMeta.childFieldNames: # 1. If Rn does not contain [, then there is no naming property. Check directly. # 2. If Rn contains [, # 2.1) Check if the Rn contains a "-" and ChildClassId.Rn does not, then proceed to next childClassId. # substitute \[[^\]]*\] with .* and do a regex match # 2.2) If Rn does not contain a "-", then probably the childClassId has only the naming property # without any suffix or perfix. Check for "[*]". Need to see, if there is a possibility of # more than one [*] directly under a ClassId #childClassId = "orgorg" childClassId = UcsUtils.FindClassIdInMoMetaIgnoreCase(childClassId) childMoMeta = UcsUtils.GetUcsPropertyMeta(childClassId, "Meta") if childMoMeta == None: continue # 2. Check if there is a naming property match = re.search(r"(\[[^\]]+\])",childMoMeta.rn) if not match: if childMoMeta.rn == rn: return childMoMeta.name continue # 2.1 Check if it has a prefix or suffix match = re.search(r"(([^\]]\[)|(\][^\[]))",childMoMeta.rn) if match: modmetaRn = re.sub(r"\[([^\]]+)\]",r"(?P<\1>.*?)",childMoMeta.rn) if re.match(r"\|",modmetaRn): modmetaRn = re.sub(r"\|",r"\|",modmetaRn) pattern = "^" + modmetaRn + "$" if re.match(pattern,rn): return childMoMeta.name else: continue for childClassId in moMeta.childFieldNames: childMoMeta = UcsUtils.GetUcsPropertyMeta(childClassId, "Meta") if childMoMeta == None: continue match = re.match("^(\[[^\]]+\])+$",childMoMeta.rn) if match: return childMoMeta.name return None #====================== End Of Function <GetClassIdForRn> ================ ### Modify the Property Name #==================================================== def GetPropName(prop): newProp = re.sub('_+','_',re.sub('^_','',re.sub('[/\-: +]','_',re.sub('([a-z0-9])([A-Z])','\g<1>_\g<2>',prop)))).upper() return newProp #====================== End Of Function <GetPropName> ================ ### makes first letter of string capital #==================================================== def FirstCapital(string): string = string[:1].upper() + string[1:] return string #====================== End Of Function <FirstCapital> ================ ### check if node is root node #================================================ def IsRootNode(dom,tagName): rootNodeTagName = dom.documentElement.tagName if rootNodeTagName == tagName: return True else: return False #===================== End Of Function <IsRootNode> =================== ### get pairnodes in a list #================================================ def GetPairNodes(rootNode): methodElement = rootNode inConfigsElementList = methodElement.getElementsByTagName("inConfigs") inConfigsElement = inConfigsElementList[0] pairElementsList = inConfigsElement.getElementsByTagName("pair") return pairElementsList #====================== End Of Function <GetPairNodes> ================= ### use if parent only has single childnode #================================================ def GetOnlyElementChildNode(node): childList = [childNode for childNode in node.childNodes if childNode.nodeType == childNode.ELEMENT_NODE ] return childList[0] #====================== End Of Function <GetOnlyElementChildNode> ============= ### use if parent has more than one child #================================================ def GetElementChildNodes(node): childList = [childNode for childNode in node.childNodes if childNode.nodeType == childNode.ELEMENT_NODE ] return childList #====================== End Of Function <GetElementChildNodes> =============== ### used to dump xml on screen #================================================ def DumpXmlOnScreen(doc): global outFilePath, outFileFlag xmlNew = doc.toprettyxml(indent=" ") xmlNew = re.sub(r"^.*?xml version.*\n","",xmlNew) xmlNew = re.sub(r"\n[\s]*\n","\n",xmlNew) xmlNew = re.sub(r"^(.*)",r"#\1",xmlNew,flags=re.MULTILINE) if outFileFlag: outFile = open(outFilePath, 'a') print >>outFile, "\n##### Start-Of-XML #####\n%s\n##### End-Of-XML #####" %(xmlNew) outFile.close() else: print "\n##### Start-Of-XML #####\n" + xmlNew + "\n##### End-Of-XML #####\n" #===================== End Of Function <DumpXmlOnScreen> ================ ### create a string of dictionary propertyMap #================================================ def CreatePythonPropertyMap(propertyMap): s = "{" for key,value in propertyMap.iteritems(): s = s + key + ":" + value + ", " if s != "{": s = s[:-2] # removes last 2 char return (s + "}") #============================= End Of Function <CreatePythonPropertyMap> =================== ### Returns True if any of the list value present in line #================================================ def CheckIfAnyListValueInString (listx,line): flag = False for value in listx: if value in line: flag = True break return flag #============================= End Of Function <CheckIfAnyListValueInString> =================== #-------------------------------------- END - OF - GENERIC - FUCNTION ----------------------------------- #-------------------------------- START - OF - METHOD SPECIFIC - FUNCTION -------------------------------- ### Used in function GenerateConfigConfCmdlets #================================================ def GetConfigConfCmdlet(node,isPairNode): propertyMap = {} cmdlet = "" includeDnInPropMap = False if node is None: return None classNode = None key = "" if (isPairNode): key = node.getAttribute(NamingPropertyId.KEY) classNode = GetOnlyElementChildNode(node) if classNode is None: return None else: key = node.getAttribute(NamingPropertyId.DN) classNode = node className = classNode.localName cmdletMeta = None dn = "" if classNode.hasAttribute(NamingPropertyId.DN): dn = classNode.getAttribute(NamingPropertyId.DN) moTag = "" if classNode.hasChildNodes() and len(GetElementChildNodes(classNode)) > 0 : moTag = "mo" cmdlet = FormPythonCmdlet(classNode, key, moTag) if classNode.hasChildNodes() and len(GetElementChildNodes(classNode)) > 0 : callCount = 1 cmdlet = "handle.StartTransaction()" + "\n" + cmdlet for childNode in GetElementChildNodes(classNode): subCmdlet = GetConfigConfSubCmdlet(childNode, dn, moTag, callCount) callCount += 1 if subCmdlet is not None: cmdlet += "\n" + subCmdlet else: callCount -= 1 cmdlet += "\n" + "handle.CompleteTransaction()" return cmdlet #================================= End Of Function <GetConfigConfCmdlet> ============================== ### Used in function GenerateConfigConfCmdlets #========================================================================================================= def GetConfigConfSubCmdlet(classNode, parentDn, parentMoTag, parentCallCount, useGenericVersion = False): cmdlet = "" className = classNode.localName cmdletMeta = None dn ="" useParentMo = False ##if the parent mo should be used at this level propertyMap = {} if classNode.hasAttribute(NamingPropertyId.DN): dn = classNode.getAttribute(NamingPropertyId.DN) elif classNode.hasAttribute(NamingPropertyId.RN): dn = dn = parentDn + "/" + classNode.getAttribute(NamingPropertyId.RN) count = 1 tag = parentMoTag + "_" + str(parentCallCount) cmdlet = FormPythonSubCmdlet(classNode, dn, tag, parentMoTag) ## Recursively cater to subnodes for childNode in GetElementChildNodes(classNode): subCmdlet = GetConfigConfSubCmdlet(childNode, dn, tag, count) count += 1 if subCmdlet is not None: cmdlet += "\n" + subCmdlet else: count -= 1 return cmdlet #====================================== End Of Function <GetConfigConfSubCmdlet> ========================================= ### Used in function GenerateConfigConfCmdlets #===================================================================== def FormPythonCmdlet(classNode, key, tag): cmdlet = "" classStatus = ClassStatus.none propertyMap = {} if (classNode.hasAttribute(NamingPropertyId.STATUS) and classNode.getAttribute(NamingPropertyId.STATUS) is not None): cs = [] cs = classNode.getAttribute(NamingPropertyId.STATUS).split(',') cs = [ x.strip() for x in cs ] if Status.CREATED in cs: classStatus |= ClassStatus.created if Status.MODIFIED in cs: classStatus |= ClassStatus.modified if Status.DELETED in cs: classStatus |= ClassStatus.deleted if Status.REMOVED in cs: classStatus |= ClassStatus.removed else: classStatus = ClassStatus.created | ClassStatus.modified # support to handle unknown MOs if UcsUtils.FindClassIdInMoMetaIgnoreCase(classNode.localName) == None: gmoFlag = True else: gmoFlag = False parentDn = dirname(key) if not gmoFlag: peerClassId = FirstCapital(classNode.localName) peerClassIdStr = peerClassId + ".ClassId()" dnStr = '.DN' else: peerClassId = "" peerClassIdStr = '"'+(classNode.localName)+'"' dnStr = '"dn"' if GetClassIdForDn(parentDn) == None: parentClassId = "" parentClassIdStr = "None" parentDnStr = '"dn"' else: parentClassId = GetClassIdForDn(parentDn) parentClassIdStr = parentClassId + ".ClassId()" parentDnStr = '.DN' ## create property map for attributes for attr, val in classNode.attributes.items(): name = attr value = '"' + val + '"' if ( name != NamingPropertyId.DN and name != NamingPropertyId.RN and name != NamingPropertyId.STATUS ): if name.lower() == "Filter".lower(): paramNameToUse = "FilterValue" else: paramNameToUse = name if paramNameToUse is not None: if not gmoFlag and UcsUtils.GetUcsPropertyMeta(peerClassId, FirstCapital(paramNameToUse)) is not None: paramNameToUse = peerClassId + '.' + GetPropName(paramNameToUse) else: paramNameToUse = '"'+ paramNameToUse + '"' propertyMap[paramNameToUse] = value tagElement = "" if tag: tagElement = tag + " = " ## make cmdlet if (classStatus & ClassStatus.deleted == ClassStatus.deleted) or (classStatus & ClassStatus.removed == ClassStatus.removed): cmdlet = "obj = handle.GetManagedObject(None, %s, {%s%s:\"%s\"})\n%shandle.RemoveManagedObject(obj)" %(peerClassIdStr, peerClassId, dnStr, key, tagElement) #06Dec elif (classStatus & ClassStatus.created == ClassStatus.created): if not gmoFlag: propertyMap[FirstCapital(classNode.localName) + '.DN'] = '"' + key +'"' else: propertyMap['"dn"'] = '"' + key +'"' #parentDn = dirname(key) if (classStatus & ClassStatus.modified == ClassStatus.modified): cmdlet = "obj = handle.GetManagedObject(None, %s, {%s%s:\"%s\"})\n%shandle.AddManagedObject(obj, %s, %s, True)" %(parentClassIdStr, parentClassId, parentDnStr, parentDn, tagElement, peerClassIdStr, CreatePythonPropertyMap(propertyMap)) #06Dec else: cmdlet = "obj = handle.GetManagedObject(None, %s, {%s%s:\"%s\"})\n%shandle.AddManagedObject(obj, %s, %s)" %(parentClassIdStr, parentClassId, parentDnStr, parentDn, tagElement, peerClassIdStr, CreatePythonPropertyMap(propertyMap)) #06Dec elif (classStatus & ClassStatus.modified == ClassStatus.modified): cmdlet = "obj = handle.GetManagedObject(None, %s, {%s%s:\"%s\"})\n%shandle.SetManagedObject(obj, %s, %s)" %(peerClassIdStr, peerClassId, dnStr, key, tagElement, peerClassIdStr, CreatePythonPropertyMap(propertyMap)) #06Dec else: print "Throw Exception XML request status (%s) is invalid." %(classNode.getAttribute(NamingPropertyId.STATUS)) return cmdlet #======================================= End Of Function <FormPythonCmdlet> ==================================== ### Used in function GenerateConfigConfCmdlets #===================================================================== def FormPythonSubCmdlet(classNode, key, moTag, parentMoTag): cmdlet = "" classStatus = ClassStatus.none propertyMap = {} if (classNode.hasAttribute(NamingPropertyId.STATUS) and classNode.getAttribute(NamingPropertyId.STATUS) is not None): cs = [] cs = classNode.getAttribute(NamingPropertyId.STATUS).split(',') cs = [ x.strip() for x in cs ] if Status.CREATED in cs: classStatus |= ClassStatus.created if Status.MODIFIED in cs: classStatus |= ClassStatus.modified if Status.DELETED in cs: classStatus |= ClassStatus.deleted if Status.REMOVED in cs: classStatus |= ClassStatus.removed else: classStatus = ClassStatus.created | ClassStatus.modified # support for unknown MO #06Dec if UcsUtils.FindClassIdInMoMetaIgnoreCase(classNode.localName) == None: gmoFlag = True else: gmoFlag = False if not gmoFlag: peerClassId = FirstCapital(classNode.localName) peerClassIdStr = peerClassId + ".ClassId()" dnStr = '.DN' else: peerClassId = "" peerClassIdStr = '"'+(classNode.localName)+'"' dnStr = '"dn"' ## create property map for attributes for attr, val in classNode.attributes.items(): name = attr value = '"' + val + '"' if ( name != NamingPropertyId.DN and name != NamingPropertyId.RN and name != NamingPropertyId.STATUS ): if name.lower() == "Filter".lower(): paramNameToUse = "FilterValue" else: paramNameToUse = name if paramNameToUse is not None: if not gmoFlag and UcsUtils.GetUcsPropertyMeta(peerClassId, FirstCapital(paramNameToUse)) is not None: paramNameToUse = peerClassId + '.' + GetPropName(paramNameToUse) else: paramNameToUse = '"'+ paramNameToUse + '"' propertyMap[paramNameToUse] = value ## make cmdlet if (classStatus & ClassStatus.deleted == ClassStatus.deleted) or (classStatus & ClassStatus.removed == ClassStatus.removed): cmdlet = "obj = handle.GetManagedObject(None, %s, {%s%s:\"%s\"})\n%s = handle.RemoveManagedObject(obj)" %( peerClassIdStr, peerClassId, dnStr, key, moTag) #06Dec elif (classStatus & ClassStatus.created == ClassStatus.created): if not gmoFlag: propertyMap[FirstCapital(classNode.localName) + '.DN'] = '"' + key +'"' else: propertyMap['"dn"'] = '"' + key +'"' parentDn = dirname(key) if (classStatus & ClassStatus.modified == ClassStatus.modified): cmdlet = "%s = handle.AddManagedObject(%s, %s, %s, True)" %(moTag, parentMoTag, peerClassIdStr, CreatePythonPropertyMap(propertyMap)) #27Nov #06Dec else: cmdlet = "%s = handle.AddManagedObject(%s, %s, %s)" %(moTag, parentMoTag, peerClassIdStr, CreatePythonPropertyMap(propertyMap)) #27Nov #06Dec elif (classStatus & ClassStatus.Modified == ClassStatus.Modified): cmdlet = "obj = handle.GetManagedObject(None, %s, {%s%s:\"%s\"})\n%s = handle.SetManagedObject(obj, %s, %s" %( peerClassIdStr, peerClassId, dnStr, key, moTag, FirstCapital(classNode.localName), CreatePythonPropertyMap(propertyMap)) #27Nov#06Dec else: print "Throw Exception XML request status (%s) is invalid." %(classNode.getAttribute(NamingPropertyId.STATUS)) return cmdlet #================================== End Of Function <FormPythonSubCmdlet> =============================== ### Takes xmlstring, and generate script for configConfMos and configConfMos methods. #===================================================================================== def GenerateConfigConfCmdlets(xmlString): doc = xml.dom.minidom.parseString(xmlString) topNode = doc.documentElement if topNode is None: return cmdlet = "" # Added Later On if len(topNode.getElementsByTagName("inConfigs")) <> 0: pairNodes = GetElementChildNodes(topNode.getElementsByTagName("inConfigs")[0]) else: pairNodes = None if pairNodes is None or len(pairNodes) < 1 : node = topNode.getElementsByTagName("inConfig")[0] if node is None: return node = GetOnlyElementChildNode(node) if node is None: return cmdlet = GetConfigConfCmdlet(node, False) elif len(pairNodes) > 1: tempCmdlet = "" tempDn = "" tempMo = "" count = 0 dictMos = {} cmdlet = "handle.StartTransaction()" + "\n" for node in pairNodes: tempDn = node.getAttribute(NamingPropertyId.KEY) tempMo = "mo" if count > 0: tempMo += str(count) if tempDn not in dictMos: dictMos[tempDn] = tempMo ##check if parent is already there in dictionary childDn = os.path.basename(tempDn) parentDn = os.path.dirname(tempDn) if parentDn in dictMos: node.setAttribute(NamingPropertyId.KEY, childDn) tempCmdlet = GetConfigConfCmdlet(node, True) cmdlet += tempMo + " = " + dictMos[parentDn] + " | " + tempCmdlet + "\n" else: tempCmdlet = GetConfigConfCmdlet(node, True) cmdlet += tempCmdlet + "\n" count += 1 cmdlet += "handle.CompleteTransaction()" else: cmdlet = GetConfigConfCmdlet(pairNodes[0], True) if cmdlet is not None: return cmdlet #04dec #========================================= End Of Function <GenerateConfigConfCmdlets> =================================== ### Takes xmlstring, and generate script for configResolveDn, configResolveDns, configResolveClass and configResolveClasses methods. #=========================================================================================================================== def GenerateConfigResolveCmdlet(xmlString, method): ## create the document object doc = xml.dom.minidom.parseString(xmlString) cmdlet= "" if IsRootNode(doc,method): rootNodeElement = doc.documentElement else: return ##<configResolveDn> if method == "configResolveDn": topNode = doc.documentElement if topNode is None: return dn = topNode.getAttribute(NamingPropertyId.DN) inHierarchical = "" if topNode.hasAttribute("inHierarchical"): inHierarchical = topNode.getAttribute("inHierarchical") if inHierarchical.lower() == "true": inHierarchicalValue = "YesOrNo.TRUE" else: inHierarchicalValue = "YesOrNo.FALSE" cmdlet = "handle.ConfigResolveDn(\"%s\", %s)" %(dn, inHierarchicalValue) ##<configResolveDns> elif method == "configResolveDns": topNode = doc.documentElement if topNode is None: return inHierarchical = "" if topNode.hasAttribute("inHierarchical"): inHierarchical = topNode.getAttribute("inHierarchical") dnNodes = GetElementChildNodes(topNode.getElementsByTagName("inDns")[0]) if dnNodes is None: return cmdlet = "dnSet = DnSet()" + "\n" tempDn = "" for node in dnNodes: tempDn = node.getAttribute(NamingPropertyId.VALUE) cmdlet += "dn = Dn()\ndn.setattr(\"Value\",\"%s\")\ndnSet.AddChild(dn)\n" %(tempDn) if inHierarchical.lower() == "true": inHierarchicalValue = "YesOrNo.TRUE" else: inHierarchicalValue = "YesOrNo.FALSE" cmdlet += "handle.ConfigResolveDns(dnSet, %s)" %(inHierarchicalValue) ##<configResolveClass> elif method == "configResolveClass": andCount = 0 orCount = 0 notCount = 0 topNode = doc.documentElement filterNode = GetOnlyElementChildNode(topNode) print filterNode if topNode is None or filterNode is None: return inHierarchical = "" if topNode.hasAttribute("inHierarchical") is not None: inHierarchical = topNode.getAttribute("inHierarchical") classId = "" if topNode.hasAttribute("classId") is not None: classId = topNode.getAttribute("classId") if inHierarchical.lower() == "true": inHierarchicalValue = "YesOrNo.TRUE" else: inHierarchicalValue = "YesOrNo.FALSE" cmdlet = "inFilter = FilterFilter()\n" + CreatePythonFilterCode(filterNode, "inFilter") + "handle.ConfigResolveClass(\"%s\", inFilter, %s)" %(classId, inHierarchicalValue) ##<configResolveClasses> elif method == "configResolveClasses": topNode = doc.documentElement if topNode is None: return inHierarchical = "" if topNode.hasAttribute("inHierarchical") is not None: inHierarchical = topNode.getAttribute("inHierarchical") classIdNodes = GetElementChildNodes(topNode.getElementsByTagName("inIds")[0]) cmdlet = "idSet = ClassIdSet()" + "\n" tempClassId = "" for node in classIdNodes: tempClassId = node.getAttribute(NamingPropertyId.VALUE) cmdlet += "clsId = ClassId()\nclsId.setattr(\"Value\",\"%s\")\nidSet.AddChild(clsId)\n" %(tempClassId) if inHierarchical.lower() == "true": inHierarchicalValue = "YesOrNo.TRUE" else: inHierarchicalValue = "YesOrNo.FALSE" cmdlet += "handle.ConfigResolveClasses(idSet, %s)" %(inHierarchicalValue) return cmdlet #04dec #===================================== End Of Function <GenerateConfigResolveCmdlet> =================================== ### provide filter support #======================================================================================== def CreatePythonFilterCode(parentNode, parentFilterName): cmdlet = "" filterName = "" tempName = "" for node in GetElementChildNodes(parentNode): if node.localName == "and": tempName = "andFilter" + str(andCount) andCount += 1 cmdlet = tempName + + " = AndFilter()\n" + CreatePythonFilterCode(node, tempName) + parentFilterName + ".AddChild(" + tempName +")\n" continue if node.localName == "or": tempName = "orFilter" + str(orCount) orCount += 1 cmdlet = tempName + + " = OrFilter()\n" + CreatePythonFilterCode(node, tempName) + parentFilterName + ".AddChild(" + tempName +")\n" continue if node.localName == "not": tempName = "notFilter" + str(notCount) notCount += 1 cmdlet = tempName + + " = NotFilter()\n" + CreatePythonFilterCode(node, tempName) + parentFilterName + ".AddChild(" + tempName +")\n" continue if node.localName == "eq": filterName = "eqFilter" if node.localName == "ne": filterName = "neFilter" if node.localName == "gt": filterName = "gtFilter" if node.localName == "lt": filterName = "ltFilter" if node.localName == "le": filterName = "leFilter" if node.localName == "wcard": filterName = "wcardFilter" if node.localName == "anybit": filterName = "anybitFilter" if node.localName == "allbits": filterName = "allbitsFilter" if node.localName == "bw": filterName = "bwFilter" cmdlet += filterName + " = " + FirstCapital(filterName) + "()\n" for name, value in node.attributes.items(): cmdlet += "%s.%s = \"%s\"\n" %(filterName, FirstCapital(name), value) cmdlet += parentFilterName + ".AddChild(" + filterName + ")\n" return cmdlet #========================================= End Of Function <CreatePythonFilterCode> ==================================== ### Function to handle method <lsClone> and <lsInstantiateTemplate> #======================================================================================== def GenerateSingleCloneCmdlets(xmlString, isTemplate): ### create the document object doc = xml.dom.minidom.parseString(xmlString) node = None topNode = doc.documentElement if topNode is None: return if isTemplate: if topNode.localName == "lsInstantiateTemplate": node = topNode else: if topNode.localName == "lsClone": node = topNode dn = "" if node.hasAttribute(NamingPropertyId.DN): dn = node.getAttribute(NamingPropertyId.DN) else: print "Attribute 'Dn' not available" ## writewarning in dotnet return spNewName = "" if node.hasAttribute("inServerName"): spNewName = node.getAttribute("inServerName") else: print "Attribute 'inServerName' not available" ## writewarning in dotnet return destOrg ="" if node.hasAttribute("inTargetOrg"): destOrg = node.getAttribute("inTargetOrg") spName = re.sub(r"^(org-[\-\.:_a-zA-Z0-9]{1,16}/)*org-[\-\.:_a-zA-Z0-9]{1,16}/ls-","",dn) sourceOrg = "" matchObject = re.match(r"^(org-[\-\.:_a-zA-Z0-9]{1,16}/)*org-[\-\.:_a-zA-Z0-9]{1,16}",dn) if matchObject is not None: sourceOrg = matchObject.group(0) else: print "Attribute 'Dn' is corrupt" return cmdlet = "" inHierarchical = "" if node.hasAttribute("inHierarchical") is not None: inHierarchical = node.getAttribute("inHierarchical") if inHierarchical.lower() == "true": inHierarchicalValue = "YesOrNo.TRUE" else: inHierarchicalValue = "YesOrNo.FALSE" if isTemplate: cmdlet = "handle.lsInstantiateTemplate(\"%s\", \"%s\", \"%s\", %s)" %(dn, spNewName, destOrg, inHierarchicalValue) else: cmdlet = "handle.LsClone(\"%s\", \"%s\", \"%s\", %s)" %(dn, spNewName, destOrg, inHierarchicalValue) return cmdlet #04dec #=================================== End Of Function <GenerateSingleCloneCmdlets> ===================================== ### Function to handle method <lsTemplatise> #======================================================================================== def GenerateLsTemplatiseCmdlets(xmlString): doc = xml.dom.minidom.parseString(xmlString) node = None topNode = doc.documentElement if topNode is None: return if topNode.localName == "lsTemplatise": node = topNode else: print "Check if Method is <lsTemplatise>" return dn = "" if node.hasAttribute(NamingPropertyId.DN): dn = node.getAttribute(NamingPropertyId.DN) else: print "Attribute 'Dn' not available" ## writewarning in dotnet return spNewName = "" if node.hasAttribute("inTemplateName"): spNewName = node.getAttribute("inTemplateName") else: print "Attribute 'inTemplateName' not available" ## writewarning in dotnet return templateType = "" if node.hasAttribute("inTemplateType"): templateType = node.getAttribute("inTemplateType") else: print "Attribute 'inTemplateType' not available" ## writewarning in dotnet return destOrg = "" if node.hasAttribute("inTargetOrg"): destOrg = node.getAttribute("inTargetOrg") spName = re.sub(r"^(org-[\-\.:_a-zA-Z0-9]{1,16}/)*org-[\-\.:_a-zA-Z0-9]{1,16}/ls-","",dn) sourceOrg = "" matchObject = re.match(r"^(org-[\-\.:_a-zA-Z0-9]{1,16}/)*org-[\-\.:_a-zA-Z0-9]{1,16}",dn) if matchObject is not None: sourceOrg = matchObject.group(0) else: print "Attribute 'Dn' is corrupt" return cmdlet = "" inHierarchical = "" if node.hasAttribute("inHierarchical") is not None: inHierarchical = node.getAttribute("inHierarchical") if inHierarchical.lower() == "true": inHierarchicalValue = "YesOrNo.TRUE" else: inHierarchicalValue = "YesOrNo.FALSE" if destOrg is not None: cmdlet = "handle.LsTemplatise(\"%s\", \"%s\", \"%s\", \"%s\", %s)" %(dn, destOrg, spNewName, templateType, inHierarchicalValue) else: cmdlet = "handle.LsTemplatise(\"%s\", \"org-root\", \"%s\", \"%s\", %s)" %(dn, spNewName, templateType, inHierarchicalValue) return cmdlet #04dec #======================================= End Of Function <GenerateLsTemplatiseCmdlets> ================================= ### Function to handle method <lsInstantiateNTemplate> and <lsInstantiateNNamedTemplate> #======================================================================================== def GenerateMultipleCloneCmdlets(xmlString, isPrefixBased): doc = xml.dom.minidom.parseString(xmlString) node = None topNode = doc.documentElement #print topNode.localName if topNode is None: return if isPrefixBased: if topNode.localName == "lsInstantiateNTemplate": node = topNode else: if topNode.localName == "lsInstantiateNNamedTemplate": node = topNode dn = "" if node.hasAttribute(NamingPropertyId.DN): dn = node.getAttribute(NamingPropertyId.DN) else: print "Attribute 'Dn' not available" ## writewarning in dotnet return destOrg ="" if node.hasAttribute("inTargetOrg"): destOrg = node.getAttribute("inTargetOrg") spName = re.sub(r"^(org-[\-\.:_a-zA-Z0-9]{1,16}/)*org-[\-\.:_a-zA-Z0-9]{1,16}/ls-","",dn) sourceOrg = "" matchObject = re.match(r"^(org-[\-\.:_a-zA-Z0-9]{1,16}/)*org-[\-\.:_a-zA-Z0-9]{1,16}",dn) if matchObject is not None: sourceOrg = matchObject.group(0) else: print "Attribute 'Dn' is corrupt" return cmdlet = "" inHierarchical = "" if node.hasAttribute("inHierarchical") is not None: inHierarchical = node.getAttribute("inHierarchical") if isPrefixBased: prefix = "" if node.hasAttribute("inServerNamePrefixOrEmpty") is not None: prefix = node.getAttribute("inServerNamePrefixOrEmpty") else: print "Attribute 'inServerNamePrefixOrEmpty' not available" return count = 0 if node.hasAttribute("inNumberOf") is not None: count = node.getAttribute("inNumberOf") else: print "Attribute 'inNumberOf' not available" return if inHierarchical.lower() == "true": inHierarchicalValue = "YesOrNo.TRUE" else: inHierarchicalValue = "YesOrNo.FALSE" cmdlet = "handle.LsInstantiateNTemplate(\"%s\", %s, \"%s\", \"%s\", %s)" %(dn, count, spName, destOrg, inHierarchicalValue) else: dnNodes = GetElementChildNodes(node.getElementsByTagName("inNameSet")[0]) if dnNodes is None or len(dnNodes)<1: print "Xml is corrupt. New names not available" return newNames = "@(" newNameExists = False tempDn = "" cmdlet = "dnSet = DnSet()" + "\n" for dnNode in dnNodes: if dnNode.hasAttribute("value"): newNameExists = True tempDn = dnNode.getAttribute("value") newNames += "\"" + tempDn + "\"," cmdlet += "dn = Dn()\ndn.setattr(\"Value\",\"%s\")\ndnSet.AddChild(dn)\n" %(tempDn) else: print "Xml is corrupt. New names not available" return if not newNameExists: print "Xml is corrupt. New names not available" return newNames = newNames.rstrip(',') newNames += ")" if inHierarchical.lower() == "true": inHierarchicalValue = "YesOrNo.TRUE" else: inHierarchicalValue = "YesOrNo.FALSE" cmdlet += "handle.LsInstantiateNNamedTemplate(\"%s\", dnSet, \"%s\", %s)" %(dn, destOrg, inHierarchicalValue) return cmdlet #04dec #====================================== End Of Function <GenerateMultipleCloneCmdlets> ====================================== ### Function to handle method <statsClearInterval> #======================================================================================== def GenerateClearIntervalCmdlet(xmlString): doc = xml.dom.minidom.parseString(xmlString) node = None topNode = doc.documentElement if topNode is None: return if topNode.localName == "statsClearInterval": node = topNode else: print "Check if Method is <statsClearInterval>" return cmdlet = "" dnNodes = GetElementChildNodes(node.getElementsByTagName("inDns")[0]) if dnNodes is None or len(dnNodes) < 0: return cmdlet = "dnSet = DnSet()" + "\n" tempDn = "" for dnNode in dnNodes: tempDn = dnNode.getAttribute(NamingPropertyId.VALUE) cmdlet += "dn = Dn()\ndn.setattr(\"Value\",\"%s\")\ndnSet.AddChild(dn)\n" %(tempDn) cmdlet += "handle.ConfigResolveDns(dnSet)" return cmdlet #04dec #==================================== End Of Function <GenerateClearIntervalCmdlet> ======================================= #-------------------------------- END - OF - METHOD SPECIFIC - FUNCTION -------------------------------- ### check which function to call for a specific method #====================================================== def GenerateCmdlets(xmlString): cmdlet = "" global displayXml, outFilePath if displayXml: doc = xml.dom.minidom.parseString(xmlString) DumpXmlOnScreen(doc) category = "" matchFound = re.match(r"^[\s]*<[\s]*([\S]+)", xmlString) if matchFound: methodName = matchFound.group(1) category = methodName else: return if category == "configConfMos" or category == "configConfMo": cmdlet = GenerateConfigConfCmdlets(xmlString) elif category in ["configResolveDn","configResolveDns","configResolveClass","configResolveClasses"]: cmdlet = GenerateConfigResolveCmdlet(xmlString, category) elif category == "lsClone": cmdlet = GenerateSingleCloneCmdlets(xmlString, False) elif category == "lsInstantiateTemplate": cmdlet = GenerateSingleCloneCmdlets(xmlString, True) elif category == "lsTemplatise": cmdlet = GenerateLsTemplatiseCmdlets(xmlString) elif category == "lsInstantiateNTemplate": cmdlet = GenerateMultipleCloneCmdlets(xmlString, True) elif category == "lsInstantiateNNamedTemplate": cmdlet = GenerateMultipleCloneCmdlets(xmlString, False) elif category == "statsClearInterval": cmdlet = GenerateClearIntervalCmdlet(xmlString) ## 04dec: support for redirecting script output to respective file if outFileFlag: outFile = open(outFilePath, 'a') print >> outFile, '##### Start-Of-PythonScript #####' print >> outFile, cmdlet print >> outFile, '##### End-Of-PythonScript #####' outFile.close() else: print "##### Start-Of-PythonScript #####" print cmdlet print "##### End-Of-PythonScript #####" return #===================================== End Of Function <GenerateCmdlets> ================================== ###This Extracts xmlstring from the file and calls the GenerateCmdlets() on this xmlstring. #====================================================== def ExtractXML(fileStream,line): readFlag = False requestString = "" while line <> "": if readFlag and not re.search(r"^\s*$",line): requestString += line + "\n" if CheckIfAnyListValueInString(multiLineMethod, line): requestString += line + "\n" readFlag = True if readFlag and CheckIfAnyListValueInString(singleLineMethod, line): readFlag = False GenerateCmdlets(requestString) requestString = "" break line = fileStream.readline() #===================================== End Of Function <ExtractXML> ================================== ###Depending on guiLog flag, calls the ExtractXML() internally. #====================================================== def FindXmlRequestsInFile_test(fileStream, guiLog): #print "Inside FindXmlRequestsInFile_test" line = fileStream.readline() while line <> "": if not guiLog: ExtractXML(fileStream,line) elif "[------------- Sending Request to Server ------------" in line: line = fileStream.readline() ExtractXML(fileStream, line) line = fileStream.readline() #===================================== End Of Function <FindXmlRequestsInFile_test> ================================== ###checks if path or literalPath present for the respective parameter set and if exists then call FindXmlRequestsInFile_test() #====================================================== def IfPathOrLiteralPath(path,literalPath, guiLog): if path: if literalPath: print "Parameter <path> takes precedence over <literalPath>" filePath = path elif literalPath: filePath = literalPath else: print "Please provide path or literalPath" return fileStream = open(filePath, 'r') FindXmlRequestsInFile_test(fileStream, guiLog) fileStream.close() #===================================== End Of Function <IfPathOrLiteralPath> ================================== ### By default this will generate python script for the action in UCSM GUI. ### xml=True & request="xmlstring" : Generate Python Script for XML Request. ### xml=True & path/LiteralPath : Generate Python script from the file containing XML Request. ### guiLog=True & path/LiteralPath : Generate Python script from the UCSM GUI logfile. ### displayXML=True will also dispaly corresponding XML Request. #====================================================== def ConvertToPython(xml=False,request=None,guiLog=False,path=None,literalPath=None,dumpXml=False,dumpToFile=False,dumpFilePath=None): print "### Please review the generated cmdlets before deployment.\n" global displayXml, outFileFlag, outFilePath, outFile displayXml=dumpXml outFileFlag=dumpToFile outFilePath=dumpFilePath if outFileFlag in _AffirmativeList: if outFilePath: print "### Script Output is in file < " + outFilePath + " >" outFile = open(outFilePath, 'w') outFile.close() #outFile = open(r"c:\work.txt", 'w+') else: print "Please profide dumpFilePath" return if xml in _AffirmativeList: if guiLog in _AffirmativeList: print "parameter <xml> takes precedence over <guiLog>" if request: GenerateCmdlets(request) elif path or literalPath: IfPathOrLiteralPath(path,literalPath,False) else: print "Please provide request" return elif guiLog in _AffirmativeList: if path or literalPath: IfPathOrLiteralPath(path,literalPath,True) else: print "Please provide path or literalPath" else: from sys import platform as _platform if _platform.lower() == "linux" or _platform.lower() == "linux2": # linux logFilePath = GetUCSDefaultLogpathLinux() elif _platform.lower() == "darwin": # OS X logFilePath = GetUCSDefaultLogpathOSX() elif _platform.lower() == "win32" or _platform.lower() == "win64": # Windows... logFilePath = GetUCSDefaultLogpathWindows() elif "cygwin" in _platform.lower(): # Cygwin logFilePath = GetUCSDefaultLogpathCygwin() else: print "[Error]: Unsupported OS:",_platform logFilePath = None return ## Get the latest logfile #logFilePath = r"C:\Users\ragupta4\AppData\LocalLow\Sun\Java\Deployment\log\.ucsm" #files = [ file for file in glob.glob(logFilePath + "\\" + "*") if os.path.isfile(file)] os.chdir(logFilePath) files = [ file for file in glob.glob("centrale_*.log") if os.path.isfile(file)] files.sort(key=lambda x: os.path.getmtime(x), reverse=True) lastUpdatedFile = files[0] fileStream = open(lastUpdatedFile, 'r') ## read the file till the end cnt = 0 for line in fileStream: cnt += 1 ## Wait indefinitely until receive new line of set and then again wait while True: line = fileStream.readline() if line: FindXmlRequestsInFile_test(fileStream, True) time.sleep(2) fileStream.close() #if outFilePath: #outFile.close() print "### End of Convert-To-Python ###" #===================================== End Of Function <ConvertToPython> ================================== def GetUCSDefaultLogpathWindows (): if 'APPDATA' in os.environ.keys(): logFilePath = os.getenv('APPDATA') else: print os.name raise 'Not windows OS' if sys.getwindowsversion()[0] == 6: ## in case OS is Win 2008 or above logFilePath = dirname(logFilePath) + "\LocalLow" logFilePath += r"\Sun\Java\Deployment\log\.ucsm" + "\\" return logFilePath def GetUCSDefaultLogpathLinux (): logFilePath = os.getenv('HOME') logFilePath += r"/.java/deployment/log/.ucsm/" return logFilePath def GetUCSDefaultLogpathOSX (): logFilePath = os.getenv('HOME') logFilePath += r"/Library/Caches/Java/log/.ucsm" return logFilePath ## SVN CheckIn3 Added Support for CYGWIN def GetUCSDefaultLogpathCygwin(): #from subprocess import Popen, PIPE #p = Popen('cat /proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows\ NT/CurrentVersion/CurrentVersion', shell=True,stdout=PIPE, stderr=PIPE) #out, err = p.communicate() #OSMajorVersion = out.split('.')[0] logFilePath = os.getenv('APPDATA') logFilePath = logFilePath.replace("\\","/") #TODO: #if OSMajorVersion == 6: logFilePath = dirname(logFilePath) + "/LocalLow" logFilePath += r"/Sun/Java/Deployment/log/.ucsm/" return logFilePath
[]
[]
[ "APPDATA", "HOME" ]
[]
["APPDATA", "HOME"]
python
2
0
deprecated/pyorbit_emcee.py
from __future__ import print_function from pyorbit.classes.common import np from pyorbit.classes.model_container_emcee import ModelContainerEmcee from pyorbit.classes.input_parser import yaml_parser, pars_input from pyorbit.classes.io_subroutines import pyde_save_to_pickle,\ pyde_load_from_cpickle,\ emcee_save_to_cpickle, emcee_load_from_cpickle, emcee_flatchain,\ emcee_create_dummy_file, starting_point_load_from_cpickle import pyorbit.classes.results_analysis as results_analysis import os import sys __all__ = ["pyorbit_emcee", "yaml_parser"] def pyorbit_emcee(config_in, input_datasets=None, return_output=None): try: import emcee except: print("ERROR: emcee not installed, this will not work") quit() os.environ["OMP_NUM_THREADS"] = "1" optimize_dir_output = './' + config_in['output'] + '/optimize/' pyde_dir_output = './' + config_in['output'] + '/pyde/' emcee_dir_output = './' + config_in['output'] + '/emcee/' reloaded_optimize = False reloaded_pyde = False reloaded_emcee_multirun = False reloaded_emcee = False try: mc, population, starting_point, theta_dict = pyde_load_from_cpickle( pyde_dir_output, prefix='') reloaded_pyde = True except: pass try: mc, starting_point, population, _, _, sampler_chain, _, _, theta_dict, _ = \ emcee_load_from_cpickle(emcee_dir_output, prefix='MR') reloaded_emcee_multirun = True except: pass try: mc, starting_point, population, _, _, sampler_chain, sampler_lnprobability, _, theta_dict, _ = \ emcee_load_from_cpickle(emcee_dir_output) reloaded_emcee = True except: pass try: starting_point, previous_boundaries, theta_dict = starting_point_load_from_cpickle( optimize_dir_output) reloaded_optimize = True except: pass print() print('reloaded_optimize: ', reloaded_pyde) print('reloaded_pyde: ', reloaded_pyde) print('reloaded_emcee_multirun: ', reloaded_emcee_multirun) print('reloaded_emcee: ', reloaded_emcee) if reloaded_emcee: """ There's no need to do anything""" flatchain = emcee_flatchain( sampler_chain, mc.emcee_parameters['nburn'], mc.emcee_parameters['thin']) mc.model_setup() mc.initialize_logchi2() results_analysis.print_integrated_ACF( sampler_chain, theta_dict, mc.emcee_parameters['thin']) results_analysis.results_resumen(mc, flatchain) if return_output: return mc, sampler_chain, sampler_lnprobability else: return reloaded_mc = reloaded_pyde or reloaded_emcee_multirun if reloaded_mc: previous_boundaries = mc.bounds mc = ModelContainerEmcee() pars_input(config_in, mc, input_datasets) if mc.pyde_parameters['shutdown_jitter'] or mc.emcee_parameters['shutdown_jitter']: for dataset_name, dataset in mc.dataset_dict.items(): dataset.shutdown_jitter() # keep track of which version has been used to perform emcee computations mc.emcee_parameters['version'] = emcee.__version__[0] mc.model_setup() mc.create_variables_bounds() mc.initialize_logchi2() results_analysis.results_resumen(mc, None, skip_theta=True) mc.pyde_dir_output = pyde_dir_output mc.emcee_dir_output = emcee_dir_output mc.emcee_parameters['nwalkers'] = mc.ndim * \ mc.emcee_parameters['npop_mult'] if mc.emcee_parameters['nwalkers'] % 2 == 1: mc.emcee_parameters['nwalkers'] += 1 if not os.path.exists(mc.emcee_dir_output): os.makedirs(mc.emcee_dir_output) print() print('emcee version: ', emcee.__version__) if mc.emcee_parameters['version'] == '2': print('WARNING: upgrading to version 3 is strongly advised') print() print('Include priors: ', mc.include_priors) print() print('Reference Time Tref: ', mc.Tref) print() print('Dimensions = ', mc.ndim) print('Nwalkers = ', mc.emcee_parameters['nwalkers']) if not getattr(mc, 'use_threading_pool', False): mc.use_threading_pool = False print() print('Using threading pool:', mc.use_threading_pool) print() print('*************************************************************') print() if reloaded_mc: theta_dict_legacy = theta_dict.copy() population_legacy = population.copy() theta_dict = results_analysis.get_theta_dictionary(mc) population = np.zeros( [mc.emcee_parameters['nwalkers'], mc.ndim], dtype=np.double) for theta_name, theta_i in theta_dict.items(): population[:, theta_i] = population_legacy[:, theta_dict_legacy[theta_name]] mc.bounds[theta_i] = previous_boundaries[theta_dict_legacy[theta_name]] starting_point = np.median(population, axis=0) # print(starting_point) # print(population) print('Using previous population as starting point. ') sys.stdout.flush() print() else: if mc.starting_point_flag or reloaded_optimize: if reloaded_optimize: print('Using the output from a previous optimize run as starting point') theta_dict_legacy = theta_dict.copy() starting_point_legacy = starting_point.copy() theta_dict = results_analysis.get_theta_dictionary(mc) for theta_name, theta_i in theta_dict.items(): starting_point[theta_i] = starting_point_legacy[theta_dict_legacy[theta_name]] else: print('Using user-defined starting point from YAML file') mc.create_starting_point() starting_point = mc.starting_point population = np.zeros( [mc.emcee_parameters['nwalkers'], mc.ndim], dtype=np.double) for ii in range(0, mc.emcee_parameters['nwalkers']): population[ii, :] = np.random.normal(starting_point, 0.0000001) print( 'to create a synthetic population extremely close to the starting values.') sys.stdout.flush() else: try: from pyde.de import DiffEvol except ImportError: print('ERROR! PyDE is not installed, run first with optimize instead of emcee') quit() if not os.path.exists(mc.pyde_dir_output): os.makedirs(mc.pyde_dir_output) print('PyDE running') sys.stdout.flush() de = DiffEvol( mc, mc.bounds, mc.emcee_parameters['nwalkers'], maximize=True) de.optimize(int(mc.pyde_parameters['ngen'])) population = de.population starting_point = np.median(population, axis=0) theta_dict = results_analysis.get_theta_dictionary(mc) """ bounds redefinition and fix for PyDE anomalous results """ if mc.recenter_bounds_flag: pyde_save_to_pickle( mc, population, starting_point, theta_dict, prefix='orig') mc.recenter_bounds(starting_point) population = mc.fix_population(starting_point, population) starting_point = np.median(population, axis=0) print('Boundaries redefined after PyDE output') pyde_save_to_pickle(mc, population, starting_point, theta_dict) print('PyDE completed') sys.stdout.flush() results_analysis.results_resumen( mc, starting_point, compute_lnprob=True, is_starting_point=True) if mc.use_threading_pool: if mc.emcee_parameters['version'] == '2': threads_pool = emcee.interruptible_pool.InterruptiblePool( mc.emcee_parameters['nwalkers']) else: from multiprocessing.pool import Pool as InterruptiblePool threads_pool = InterruptiblePool(mc.emcee_parameters['nwalkers']) if mc.emcee_parameters['multirun'] and not reloaded_emcee_multirun: for ii in range(0, mc.emcee_parameters['multirun_iter']): print('emcee exploratory run #', ii, ' of ', mc.emcee_parameters['multirun_iter']) # sampler = emcee.EnsembleSampler(mc.emcee_parameters['nwalkers'], mc.ndim, mc, # threads=mc.emcee_parameters['nwalkers']) if mc.use_threading_pool: sampler = emcee.EnsembleSampler( mc.emcee_parameters['nwalkers'], mc.ndim, mc, pool=threads_pool) else: sampler = emcee.EnsembleSampler( mc.emcee_parameters['nwalkers'], mc.ndim, mc) population, prob, state = sampler.run_mcmc( population, mc.emcee_parameters['multirun']) flatchain = emcee_flatchain( sampler.chain, mc.emcee_parameters['nburn'], mc.emcee_parameters['thin']) results_analysis.results_resumen(mc, flatchain) max_ind = np.argmax(prob) starting_point = population[max_ind, :] population = np.asarray([starting_point + 1e-4*np.random.randn(mc.ndim) for i in range(mc.emcee_parameters['nwalkers'])]) sampler.reset() theta_dict = results_analysis.get_theta_dictionary(mc) emcee_save_to_cpickle(mc, starting_point, population, prob, state, sampler, theta_dict, prefix='MR_'+repr(ii)) emcee_save_to_cpickle(mc, starting_point, population, prob, state, sampler, theta_dict, prefix='MR') flatchain = emcee_flatchain( sampler.chain, mc.emcee_parameters['nburn'], mc.emcee_parameters['thin']) results_analysis.print_integrated_ACF( sampler.chain, theta_dict, mc.emcee_parameters['thin']) results_analysis.results_resumen(mc, flatchain) print('emcee exploratory runs completed') sys.stdout.flush() print() print('Running emcee') state = None if mc.use_threading_pool: sampler = emcee.EnsembleSampler( mc.emcee_parameters['nwalkers'], mc.ndim, mc, pool=threads_pool) else: sampler = emcee.EnsembleSampler( mc.emcee_parameters['nwalkers'], mc.ndim, mc) if mc.emcee_parameters['nsave'] > 0: print() print(' Saving temporary steps') niter = int(mc.emcee_parameters['nsteps']/mc.emcee_parameters['nsave']) sampled = 0 for i in range(0, niter): population, prob, state = sampler.run_mcmc( population, mc.emcee_parameters['nsave'], thin=mc.emcee_parameters['thin'], rstate0=state) sampled += mc.emcee_parameters['nsave'] theta_dict = results_analysis.get_theta_dictionary(mc) emcee_save_to_cpickle(mc, starting_point, population, prob, state, sampler, theta_dict, samples=sampled) flatchain = emcee_flatchain( sampler.chain, mc.emcee_parameters['nburn'], mc.emcee_parameters['thin']) results_analysis.print_integrated_ACF( sampler.chain, theta_dict, mc.emcee_parameters['thin']) results_analysis.results_resumen(mc, flatchain) print() print(sampled, ' steps completed, average lnprob:, ', np.median(prob)) sys.stdout.flush() else: population, prob, state = sampler.run_mcmc( population, mc.emcee_parameters['nsteps'], thin=mc.emcee_parameters['thin']) theta_dict = results_analysis.get_theta_dictionary(mc) emcee_save_to_cpickle(mc, starting_point, population, prob, state, sampler, theta_dict) flatchain = emcee_flatchain( sampler.chain, mc.emcee_parameters['nburn'], mc.emcee_parameters['thin']) results_analysis.print_integrated_ACF( sampler.chain, theta_dict, mc.emcee_parameters['thin']) results_analysis.results_resumen(mc, flatchain) print() print('emcee completed') if mc.use_threading_pool: # close the pool of threads threads_pool.close() threads_pool.terminate() threads_pool.join() """ A dummy file is created to let the cpulimit script to proceed with the next step""" emcee_create_dummy_file(mc) if return_output: return mc, sampler.chain, sampler.lnprobability
[]
[]
[ "OMP_NUM_THREADS" ]
[]
["OMP_NUM_THREADS"]
python
1
0
diofant/polys/polyconfig.py
"""Configuration utilities for polynomial manipulation algorithms. """ import ast import contextlib import os __all__ = ('setup',) _default_config = { 'USE_COLLINS_RESULTANT': False, 'USE_SIMPLIFY_GCD': True, 'USE_HEU_GCD': True, 'HEU_GCD_MAX': 6, 'FALLBACK_GCD_ZZ_METHOD': 'prs', 'GCD_AA_METHOD': 'prs', 'USE_IRREDUCIBLE_IN_FACTOR': False, 'USE_CYCLOTOMIC_FACTOR': True, 'EEZ_RESTART_IF_NEEDED': True, 'EEZ_NUMBER_OF_CONFIGS': 3, 'EEZ_NUMBER_OF_TRIES': 5, 'EEZ_MODULUS_STEP': 2, 'GF_IRRED_METHOD': 'rabin', 'GF_FACTOR_METHOD': 'zassenhaus', 'AA_FACTOR_METHOD': 'trager', 'GROEBNER': 'buchberger', 'MINPOLY_METHOD': 'compose', 'KARATSUBA_CUTOFF': 100, } _current_config = {} @contextlib.contextmanager def using(**kwargs): for k, v in kwargs.items(): setup(k, v) yield for k in kwargs: setup(k) def setup(key, value=None): """Assign a value to (or reset) a configuration item. """ key = key.upper() if value is not None: _current_config[key] = value else: _current_config[key] = _default_config[key] def query(key): """Ask for a value of the given configuration item. """ return _current_config.get(key.upper(), None) def configure(): """Initialized configuration of polys module. """ for key, default in _default_config.items(): _current_config[key] = default value = os.getenv('DIOFANT_' + key) if value is not None: try: value = ast.literal_eval(value) except (SyntaxError, ValueError): pass else: if type(value) is type(default): _current_config[key] = value configure()
[]
[]
[ "DIOFANT_' + ke" ]
[]
["DIOFANT_' + ke"]
python
1
0
cmd/blog/main.go
package main import ( "fmt" "log" "net/http" "os" "time" "github.com/didip/tollbooth" "github.com/didip/tollbooth/limiter" "github.com/didip/tollbooth_chi" "github.com/go-chi/chi" "github.com/go-chi/chi/middleware" "github.com/go-chi/cors" "github.com/go-redis/redis/v7" "github.com/jadoint/micro/pkg/blog" "github.com/jadoint/micro/pkg/conn" "github.com/jadoint/micro/pkg/db" "github.com/jadoint/micro/pkg/logger" "github.com/jadoint/micro/pkg/visitor" ) func main() { // Load environment variables if // not already set. if os.Getenv("LISTEN") == "" { log.Fatal("LISTEN is not set") } // Database dbClient, err := db.GetClient() if err != nil { logger.Panic(err.Error()) } defer dbClient.Master.Close() defer dbClient.Read.Close() // Cache redisClient := redis.NewClient(&redis.Options{Addr: os.Getenv("CACHE_ADDR")}) defer redisClient.Close() // Clients clients := &conn.Clients{ DB: dbClient, Cache: redisClient, } // Routes r := chi.NewRouter() r.Use(middleware.StripSlashes) r.Use(middleware.RealIP) r.Use(middleware.Logger) r.Use(middleware.Recoverer) r.Use(middleware.SetHeader("Content-Type", "application/json")) cors := cors.New(cors.Options{ AllowedOrigins: []string{os.Getenv("SITE_URL")}, AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token"}, ExposedHeaders: []string{"Link"}, AllowCredentials: true, MaxAge: 300, // Maximum value not ignored by any of major browsers }) r.Use(cors.Handler) if os.Getenv("ENV") == "development" { r.Use(middleware.SetHeader("Access-Control-Allow-Origin", os.Getenv("SITE_URL"))) } // Rate limiter: first argument is "x requests / second" per IP lmt := tollbooth.NewLimiter(100, &limiter.ExpirableOptions{DefaultExpirationTTL: time.Hour}) lmt.SetIPLookups([]string{"X-Forwarded-For", "RemoteAddr", "X-Real-IP"}) r.Use(tollbooth_chi.LimitHandler(lmt)) r.Use(visitor.Middleware) startPath := fmt.Sprintf(`/%s/`, os.Getenv("START_PATH")) r.Mount(startPath+"blog/tag", blog.RouteTag(clients)) r.Mount(startPath+"blog", blog.RouteBlog(clients)) srv := &http.Server{ Addr: os.Getenv("LISTEN"), Handler: r, ReadTimeout: 5 * time.Second, WriteTimeout: 10 * time.Second, IdleTimeout: 120 * time.Second, } log.Println(srv.ListenAndServeTLS(os.Getenv("TLS_CERT"), os.Getenv("TLS_KEY"))) }
[ "\"LISTEN\"", "\"CACHE_ADDR\"", "\"SITE_URL\"", "\"ENV\"", "\"SITE_URL\"", "\"START_PATH\"", "\"LISTEN\"", "\"TLS_CERT\"", "\"TLS_KEY\"" ]
[]
[ "CACHE_ADDR", "ENV", "TLS_CERT", "LISTEN", "SITE_URL", "START_PATH", "TLS_KEY" ]
[]
["CACHE_ADDR", "ENV", "TLS_CERT", "LISTEN", "SITE_URL", "START_PATH", "TLS_KEY"]
go
7
0
Code/mysite/polls/admin.py
from django.contrib import admin from.models import Movie from .models import User admin.site.register(Movie) admin.site.register(User)
[]
[]
[]
[]
[]
python
null
null
null
providers/box/box_test.go
package box_test import ( "github.com/AKovalevich/goth" "github.com/AKovalevich/goth/providers/box" "github.com/stretchr/testify/assert" "os" "testing" ) func Test_New(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() a.Equal(p.ClientKey, os.Getenv("BOX_KEY")) a.Equal(p.Secret, os.Getenv("BOX_SECRET")) a.Equal(p.CallbackURL, "/foo") } func Test_Implements_Provider(t *testing.T) { t.Parallel() a := assert.New(t) a.Implements((*goth.Provider)(nil), provider()) } func Test_BeginAuth(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.BeginAuth("test_state") s := session.(*box.Session) a.NoError(err) a.Contains(s.AuthURL, "app.box.com/api/oauth2/authorize") } func Test_SessionFromJSON(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.UnmarshalSession(`{"AuthURL":"https://app.box.com/api/oauth2/authorize","AccessToken":"1234567890"}`) a.NoError(err) s := session.(*box.Session) a.Equal(s.AuthURL, "https://app.box.com/api/oauth2/authorize") a.Equal(s.AccessToken, "1234567890") } func provider() *box.Provider { return box.New(os.Getenv("BOX_KEY"), os.Getenv("BOX_SECRET"), "/foo") }
[ "\"BOX_KEY\"", "\"BOX_SECRET\"", "\"BOX_KEY\"", "\"BOX_SECRET\"" ]
[]
[ "BOX_KEY", "BOX_SECRET" ]
[]
["BOX_KEY", "BOX_SECRET"]
go
2
0
internal/pkg/utils/validator_test.go
// Copyright 2018 Clivern. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package utils import ( "fmt" "github.com/nbio/st" "github.com/spf13/viper" "os" "strconv" "testing" ) // init setup stuff func init() { basePath := fmt.Sprintf("%s/src/github.com/clivern/beaver", os.Getenv("GOPATH")) configFile := fmt.Sprintf("%s/%s", basePath, "config.test.yml") viper.SetConfigFile(configFile) err := viper.ReadInConfig() if err != nil { panic(fmt.Sprintf( "Error while loading config file [%s]: %s", configFile, err.Error(), )) } os.Setenv("BeaverBasePath", fmt.Sprintf("%s/", basePath)) os.Setenv("PORT", strconv.Itoa(viper.GetInt("app.port"))) } // TestValidation test cases func TestValidation(t *testing.T) { validate := Validator{} st.Expect(t, validate.IsIn("public", []string{"public", "private"}), true) st.Expect(t, validate.IsSlug("customers_chat_0123", 5, 60), true) st.Expect(t, validate.IsSlug("customers_chat-0123", 5, 60), false) st.Expect(t, validate.IsSlug(" customers_chat_0123", 5, 60), false) st.Expect(t, validate.IsSlug("-customers_chat_0123", 5, 60), false) st.Expect(t, validate.IsSlug("customers_chat_0123_", 5, 60), false) st.Expect(t, validate.IsSlug("cu", 5, 60), false) st.Expect(t, validate.IsSlug("cu263hd53t3g363g3g36362gr3", 5, 10), false) st.Expect(t, validate.IsEmpty(" "), true) st.Expect(t, validate.IsEmpty(" Test \t "), false) st.Expect(t, validate.IsEmpty(" Test "), false) st.Expect(t, validate.IsEmpty(" \t "), true) st.Expect(t, validate.IsJSON(`{"id": "12", "name": "Joe"}`), true) st.Expect(t, validate.IsJSON(`"id": "12", "name": "Joe"}`), false) st.Expect(t, validate.IsJSON(`{"id": "12" "name": "Joe"}`), false) st.Expect(t, validate.IsJSON(`{"id": "12", "name": "Joe}`), false) }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
src/rkd_harbor/tasks/deployment/base.py
import os import subprocess from abc import ABC from jinja2 import Environment from jinja2 import FileSystemLoader from jinja2 import StrictUndefined from jinja2.exceptions import UndefinedError from argparse import ArgumentParser from rkd.api.contract import ExecutionContext from rkd.yaml_parser import YamlFileLoader from rkd.exception import MissingInputException from rkd.api.inputoutput import Wizard from ..base import HarborBaseTask from ...exception import MissingDeploymentConfigurationError HARBOR_ROOT = os.path.dirname(os.path.realpath(__file__)) + '/../../deployment/files' class BaseDeploymentTask(HarborBaseTask, ABC): ansible_dir: str = '.rkd/deployment' _config: dict vault_args: list = [] def get_config(self) -> dict: """Loads and parses deployment.yml file. Supports: - Ansible Vault encryption of deployment.yml - SSH private key storage inside deployment.yml """ deployment_filenames = ['deployment.yml', 'deployment.yaml'] try: self._config except AttributeError: # try multiple files for filename in deployment_filenames: if os.path.isfile(filename): # # Check file contents before # with open(filename, 'rb') as f: content = f.read().decode('utf-8') # # When file is encrypted, then decrypt it # if content.startswith('$ANSIBLE_VAULT;'): tmp_vault_path, tmp_vault_filename = self.temp.create_tmp_file_path() self.io().info('Decrypting deployment file') self.sh('cp %s %s' % (filename, tmp_vault_path)) self.io().info_msg('Need a vault passphrase to decrypt "%s"' % filename) self.rkd([':harbor:vault:encrypt', '-d', tmp_vault_path] + self.vault_args) self._config = YamlFileLoader(self._ctx.directories).load_from_file( tmp_vault_filename, 'org.riotkit.harbor/deployment/v1' ) self._process_config_private_keys() return self._config self._config = YamlFileLoader(self._ctx.directories).load_from_file( filename, 'org.riotkit.harbor/deployment/v1' ) self._process_config_private_keys() return self._config raise MissingDeploymentConfigurationError() return self._config def _process_config_private_keys(self): """Allow private keys to be pasted directly to the deployment.yml On-the-fly those keys will be written into the temporary directory """ for group_name, nodes in self._config['nodes'].items(): for node_num in range(0, len(nodes)): if 'private_key' not in self._config['nodes'][group_name][node_num]: continue if '-----BEGIN' not in self._config['nodes'][group_name][node_num]['private_key']: continue tmp_path = self.temp.assign_temporary_file(mode=0o700) self.io().info('Storing inline private key as "%s"' % tmp_path) with open(tmp_path, 'w') as key_file: key_file.write(self._config['nodes'][group_name][node_num]['private_key'].strip()) key_file.write("\n") self._config['nodes'][group_name][node_num]['private_key'] = tmp_path def _verify_synced_version(self, abs_ansible_dir: str): """Verifies last synchronization - displays warning if Harbor version was changed after last files synchronization""" if not os.path.isfile(abs_ansible_dir + '/.synced'): return with open(abs_ansible_dir + '/.synced', 'rb') as f: synced_version = f.read().decode('utf-8').strip() actual_version = self.get_harbor_version() if synced_version != actual_version: self.io().warn('Ansible deployment in .rkd/deployment is not up-to-date. We recommend to update' + ' from %s to %s' % (synced_version, actual_version)) def _write_synced_version(self, abs_ansible_dir: str): """Writes information about, in which Harbor version the files were synced last time""" with open(abs_ansible_dir + '/.synced', 'wb') as f: f.write(self.get_harbor_version().encode('utf-8')) def role_is_installed_and_configured(self) -> bool: return os.path.isfile(self.ansible_dir + '/.synced') def _ask_and_set_var(self, ctx: ExecutionContext, arg_name: str, title: str, attribute: str, secret: bool): """Ask user an interactive question, then add answer to the deployment.yml loaded in memory The variable will be appended to any node, where the variable is empty. Example: We have 5 servers, 3 without a password. So the password will be applied to 3 servers. """ self.get_config() if not ctx.get_arg(arg_name): return wizard = Wizard(self).ask(title, attribute=attribute, secret=secret) for group_name, nodes in self._config['nodes'].items(): node_num = 0 for node in nodes: node_num += 1 if attribute in self._config['nodes'][group_name][node_num - 1]: continue self._config['nodes'][group_name][node_num - 1][attribute] = wizard.answers[attribute] def install_and_configure_role(self, ctx: ExecutionContext, force_update: bool = False) -> bool: """Install an Ansible role from galaxy, and configure playbook, inventory, all the needed things""" abs_ansible_dir = os.path.realpath(self.ansible_dir) should_update = force_update or not os.path.isfile(abs_ansible_dir + '/.synced') self.io().info('Checking role installation...') self._silent_mkdir(abs_ansible_dir) self._verify_synced_version(abs_ansible_dir) # optionally ask user and set facts such as passwords, key paths, sudo passwords # ansible-vault password prompt is handed by ansible-vault itself self._ask_and_set_var(ctx, '--ask-ssh-login', 'SSH username', 'user', secret=True) self._ask_and_set_var(ctx, '--ask-ssh-pass', 'SSH password', 'password', secret=True) self._ask_and_set_var(ctx, '--ask-ssh-key-path', 'SSH private key path', 'private_key', secret=False) self._ask_and_set_var(ctx, '--ask-sudo-pass', 'Sudo password for remote machines', 'sudo_pass', secret=True) if not self._synchronize_structure_from_template(abs_ansible_dir, only_jinja_templates=True): self.io().error_msg('Cannot synchronize templates') return False if should_update: self.io().info('Role will be updated') if not self._synchronize_structure_from_template(abs_ansible_dir): self.io().error_msg('Cannot synchronize structure') return False self.io().debug('Downloading fresh role...') self.download_roles() self._write_synced_version(abs_ansible_dir) return True def download_roles(self): self.sh(' '.join([ 'ansible-galaxy', 'install', '-r', self.ansible_dir + '/requirements.yml', '-p', self.ansible_dir + '/roles/', '--force' ]), capture=False) def _synchronize_structure_from_template(self, abs_ansible_dir: str, only_jinja_templates: bool = False) -> bool: """Synchronizes template structure into .rkd/deployment""" self.io().debug( 'Synchronizing structure from template (only_jinja_templates=' + str(only_jinja_templates) + ')') # synchronize directory structure for root, subdirs, files in os.walk(HARBOR_ROOT): relative_root = root[len(HARBOR_ROOT) + 1:] self._silent_mkdir(abs_ansible_dir + '/' + relative_root) for file in files: if only_jinja_templates and not file.endswith('.j2'): continue abs_src_file_path = root + '/' + file abs_dest_file_path = abs_ansible_dir + '/' + relative_root + '/' + file if not self._copy_file(abs_src_file_path, abs_dest_file_path): self.io().error('Cannot process file %s' % abs_dest_file_path) return False return True def _copy_file(self, abs_src_file_path: str, abs_dest_file_path: str): """Copies a file from template directory - supports jinja2 files rendering on-the-fly""" if abs_dest_file_path.endswith('.j2'): abs_dest_file_path = abs_dest_file_path[:-3] with open(abs_src_file_path, 'rb') as f: tpl = Environment(loader=FileSystemLoader(['./', './rkd/deployment']), undefined=StrictUndefined)\ .from_string(f.read().decode('utf-8')) try: variables = self._prepare_variables() with open(abs_dest_file_path, 'wb') as f: f.write(tpl.render(**variables).encode('utf-8')) except UndefinedError as e: self.io().error(str(e) + " - required in " + abs_src_file_path + ", please define it in deployment.yml") return False return True subprocess.check_call(['cp', '-p', abs_src_file_path, abs_dest_file_path]) self.io().debug('Created ' + abs_dest_file_path) return True def _prepare_variables(self): """Glues together variables from environment and from deployment.yaml for exposing in JINJA2 templates""" variables = {} variables.update(os.environ) variables.update(self.get_config()) if 'git_url' not in variables: variables['git_url'] = subprocess\ .check_output(['git', 'config', '--get', 'remote.origin.url']).decode('utf-8')\ .replace('\n', '')\ .strip() if 'git_secret_url' not in variables: variables['git_secret_url'] = variables['git_url'].replace('\n', '') return variables def _preserve_vault_parameters_for_usage_in_inner_tasks(self, ctx: ExecutionContext): """Preserve original parameters related to Vault, so those parameters can be propagated to inner tasks""" try: vault_passwords = ctx.get_arg_or_env('--vault-passwords') except MissingInputException: vault_passwords = '' # keep the vault arguments for decryption of deployment.yml self.vault_args = ['--vault-passwords=' + vault_passwords] if ctx.get_arg('--ask-vault-pass'): self.vault_args.append('--ask-vault-pass') def _get_vault_opts(self, ctx: ExecutionContext, chdir: str = '') -> str: """Creates options to pass in Ansible Vault commandline The output will be a temporary vault file with password entered inline or a --ask-vault-pass switch """ try: vault_passwords = ctx.get_arg_or_env('--vault-passwords').split('||') except MissingInputException: vault_passwords = [] num = 0 opts = '' enforce_ask_pass = ctx.get_arg('--ask-vault-pass') for passwd in vault_passwords: num = num + 1 if not passwd: continue if passwd.startswith('./') or passwd.startswith('/'): if os.path.isfile(passwd): opts += ' --vault-password-file="%s" ' % (chdir + passwd) else: self.io().error('Vault password file "%s" does not exist, calling --ask-vault-pass' % passwd) enforce_ask_pass = True else: tmp_vault_file = self.temp.assign_temporary_file(mode=0o644) with open(tmp_vault_file, 'w') as f: f.write(passwd) opts += ' --vault-password-file="%s" ' % (chdir + tmp_vault_file) if enforce_ask_pass: opts += ' --ask-vault-pass ' return opts @classmethod def _add_vault_arguments_to_argparse(cls, parser: ArgumentParser): parser.add_argument('--ask-vault-pass', '-v', help='Ask for vault password interactively', action='store_true') parser.add_argument('--vault-passwords', '-V', help='Vault passwords separated by "||" eg. 123||456') @classmethod def _add_ask_pass_arguments_to_argparse(cls, parser: ArgumentParser): parser.add_argument('--ask-ssh-login', help='Ask for SSH username', action='store_true') parser.add_argument('--ask-ssh-pass', help='Ask for a SSH password', action='store_true') parser.add_argument('--ask-ssh-key-path', help='Ask for a SSH private key path', action='store_true') parser.add_argument('--ask-sudo-pass', help='Ask for sudo password', action='store_true')
[]
[]
[]
[]
[]
python
0
0
miniblog/settings.py
""" Django settings for miniblog project. Generated by 'django-admin startproject' using Django 3.2.4. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from pathlib import Path import os # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECRET_KEY = 'django-insecure-4v*o8q87xl@ppg6-ejt$c+$d4@kcfvz=boce^1)h=hg&^_+-yf' SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'django-insecure-4v*o8q87xl@ppg6-ejt$c+$d4@kcfvz=boce^1)h=hg&^_+-yf') # SECURITY WARNING: don't run with debug turned on in production! # DEBUG = True DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False' ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'blog.apps.BlogConfig', # add my 'blog' app to my project ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'miniblog.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'miniblog.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { # 'ENGINE': 'django.db.backends.sqlite3', # 'NAME': BASE_DIR / 'db.sqlite3', 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'django_mini_blog_db', 'USER': 'django_mini_blog_user', 'PASSWORD': 'djangominiblog', 'HOST': 'db', 'PORT': 5432, } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ # LANGUAGE_CODE = 'en-us' LANGUAGE_CODE = 'ja' # TIME_ZONE = 'UTC' TIME_ZONE = 'Asia/Tokyo' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Redirect to home URL after login (Default redirects to /accounts/profile/) LOGIN_REDIRECT_URL = '/' # Heroku: Update database configuration from $DATABASE_URL. import dj_database_url db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Simplified static file serving. # https://warehouse.python.org/project/whitenoise/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
[]
[]
[ "DJANGO_DEBUG", "DJANGO_SECRET_KEY" ]
[]
["DJANGO_DEBUG", "DJANGO_SECRET_KEY"]
python
2
0
internal/namespaces/init/init.go
package init import ( "context" "fmt" "os" "reflect" "github.com/fatih/color" "github.com/scaleway/scaleway-cli/internal/account" "github.com/scaleway/scaleway-cli/internal/core" "github.com/scaleway/scaleway-cli/internal/interactive" "github.com/scaleway/scaleway-cli/internal/namespaces/autocomplete" "github.com/scaleway/scaleway-cli/internal/terminal" "github.com/scaleway/scaleway-sdk-go/logger" "github.com/scaleway/scaleway-sdk-go/scw" "github.com/scaleway/scaleway-sdk-go/validation" ) /* See below the schema `scw init` follows to ask for default config: yes +----------+ +-------+Config ok?| | +----------+ +---+ no +----v----+ |no |out+<----+Override?| v +---+ +----+----+ +----+-----+ | |Read email+-----------+ +------>+ or token | token | yes +----------+ | |email | v v +---+----+ +------+---+ | Read | |Get access| |password| | key | +---+----+ +------+---+ | | v | +--------+ yes +-+-+ | |Read OTP+<----+2FA| | +---+----+ +---+ | | |no | | v | | +-----+------+ | +----->+Create token| | +-----+------+ | | | v | +-------+----------+ | |ask default config+<-----+ +------------------+ */ func GetCommands() *core.Commands { return core.NewCommands(initCommand()) } type initArgs struct { SecretKey string Region scw.Region Zone scw.Zone OrganizationID string SendTelemetry *bool InstallAutocomplete *bool } func initCommand() *core.Command { return &core.Command{ Short: `Initialize the config`, Long: `Initialize the active profile of the config located in ` + scw.GetConfigPath(), Namespace: "init", NoClient: true, ArgsType: reflect.TypeOf(initArgs{}), ArgSpecs: core.ArgSpecs{ { Name: "secret-key", ValidateFunc: core.ValidateSecretKey(), }, { Name: "region", EnumValues: []string{"fr-par", "nl-ams"}, }, { Name: "zone", EnumValues: []string{"fr-par-1", "fr-par-2", "nl-ams-1"}, }, // `organization-id` is not required before `PreValidateFunc()`, but is required after `PreValidateFunc()`. // See workflow in cobra_utils.go/cobraRun(). // It is not required in the command line: the user is not obliged to type it. // But it is required to make the request: this is why we use `ValidateOrganizationIDRequired(). // If `organization-id` is not typed by the user, we set it in `PreValidateFunc()`. { Name: "organization-id", ValidateFunc: core.ValidateOrganizationIDRequired(), }, { Name: "send-usage", }, { Name: "install-autocomplete", Short: "Whether the autocomplete script should be installed during initialisation", }, }, SeeAlsos: []*core.SeeAlso{ { Short: "Config management help", Command: "scw config --help", }, }, PreValidateFunc: func(ctx context.Context, argsI interface{}) error { args := argsI.(*initArgs) // Show logo banner, or simple welcome message if terminal.GetWidth() >= 80 { interactive.Printf("%s\n%s\n\n", interactive.Center(logo), interactive.Line("-")) } else { interactive.Printf("Welcome to the Scaleway Cli\n\n") } // Check if a config exists // Actual creation of the new config is done in the Run() newConfig := false config, err := scw.LoadConfig() if err != nil { newConfig = true } // If it is not a new config, ask if we want to override the existing config if !newConfig { _, _ = interactive.PrintlnWithoutIndent(` Current config is located at ` + scw.GetConfigPath() + ` ` + terminal.Style(fmt.Sprint(config), color.Faint) + ` `) overrideConfig, err := interactive.PromptBoolWithConfig(&interactive.PromptBoolConfig{ Prompt: "Do you want to override current config?", DefaultValue: true, }) if err != nil { return err } if !overrideConfig { return fmt.Errorf("initialization cancelled") } } // Manually prompt for missing args if args.SecretKey == "" { args.SecretKey, err = promptSecretKey() if err != nil { return err } } if args.Zone == "" { zone, err := interactive.PromptStringWithConfig(&interactive.PromptStringConfig{ Prompt: "Select a zone", DefaultValueDoc: "fr-par-1", DefaultValue: "fr-par-1", ValidateFunc: func(s string) error { logger.Debugf("s: %v", s) if !validation.IsZone(s) { return fmt.Errorf("invalid zone") } return nil }, }) if err != nil { return err } args.Zone, err = scw.ParseZone(zone) if err != nil { return err } } // Deduce Region from Zone if args.Region == "" { args.Region, err = args.Zone.Region() if err != nil { return err } } // Set OrganizationID if not done previously // As OrganizationID depends on args.SecretKey, we can't use a DefaultFunc or ArgPromptFunc. if args.OrganizationID == "" { args.OrganizationID, err = getOrganizationID(args.SecretKey) if err != nil { return err } } // Ask for send usage permission if args.SendTelemetry == nil { _, _ = interactive.Println() _, _ = interactive.PrintlnWithoutIndent(` To improve this tool we rely on diagnostic and usage data. Sending such data is optional and can be disable at any time by running "scw config set send_telemetry false" `) sendTelemetry, err := interactive.PromptBoolWithConfig(&interactive.PromptBoolConfig{ Prompt: "Do you want to send usage statistics and diagnostics?", DefaultValue: true, }) if err != nil { return err } args.SendTelemetry = scw.BoolPtr(sendTelemetry) } // Ask whether we should install autocomplete if args.InstallAutocomplete == nil { _, _ = interactive.Println() _, _ = interactive.PrintlnWithoutIndent(` To fully enjoy Scaleway CLI we recommend you to install autocomplete support in your shell. `) installAutocomplete, err := interactive.PromptBoolWithConfig(&interactive.PromptBoolConfig{ Prompt: "Do you want to install autocomplete?", DefaultValue: true, }) if err != nil { return err } args.InstallAutocomplete = scw.BoolPtr(installAutocomplete) } return nil }, Run: func(ctx context.Context, argsI interface{}) (i interface{}, e error) { args := argsI.(*initArgs) // Check if a config exists // Creates a new one if it does not config, err := scw.LoadConfig() if err != nil { config = &scw.Config{} interactive.Printf("Creating new config at %v\n", scw.GetConfigPath()) } if args.SendTelemetry != nil { config.SendTelemetry = *args.SendTelemetry } // Update active profile profile, err := config.GetActiveProfile() if err != nil { return nil, err } profile.SecretKey = &args.SecretKey profile.DefaultZone = scw.StringPtr(args.Zone.String()) profile.DefaultRegion = scw.StringPtr(args.Region.String()) profile.DefaultOrganizationID = &args.OrganizationID err = config.Save() if err != nil { return nil, err } // Get access key accessKey, err := account.GetAccessKey(args.SecretKey) if err != nil { interactive.Printf("Config saved at %s:\n%s\n", scw.GetConfigPath(), terminal.Style(fmt.Sprint(config), color.Faint)) return "", &core.CliError{ Err: err, Details: "Failed to retrieve Access Key for the given Secret Key.", } } profile.AccessKey = &accessKey err = config.Save() if err != nil { return nil, err } successMessage := "Initialization completed with success" if *args.InstallAutocomplete { _, err := autocomplete.InstallCommandRun(ctx, &autocomplete.InstallArgs{}) if err != nil { successMessage += " except for autocomplete:\n" + err.Error() } } return &core.SuccessResult{ Message: successMessage, }, nil }, } } func promptSecretKey() (string, error) { UUIDOrEmail, err := interactive.Readline(&interactive.ReadlineConfig{ PromptFunc: func(value string) string { secretKey, email := "secret-key", "email" switch { case validation.IsEmail(value): email = terminal.Style(email, color.FgBlue) case validation.IsUUID(value): secretKey = terminal.Style(secretKey, color.FgBlue) } return terminal.Style(fmt.Sprintf("Enter a valid %s or an %s: ", secretKey, email), color.Bold) }, ValidateFunc: func(s string) error { if validation.IsEmail(s) || validation.IsSecretKey(s) { return nil } return fmt.Errorf("invalid email or secret-key") }, }) if err != nil { return "", err } switch { case validation.IsEmail(UUIDOrEmail): email := UUIDOrEmail password, err := interactive.PromptPassword("Enter your " + terminal.Style("password", color.Bold)) if err != nil { return "", err } hostname, _ := os.Hostname() loginReq := &account.LoginRequest{ Email: email, Password: password, Description: fmt.Sprintf("scw-cli %s@%s", os.Getenv("USER"), hostname), } var t *account.Token var twoFactorRequired bool for { t, twoFactorRequired, err = account.Login(loginReq) if err != nil { return "", err } if !twoFactorRequired { return t.SecretKey, nil } loginReq.TwoFactorToken, err = interactive.PromptString("Enter your 2FA code") if err != nil { return "", err } } case validation.IsUUID(UUIDOrEmail): return UUIDOrEmail, nil default: return "", fmt.Errorf("invalid email or secret-key: '%v'", UUIDOrEmail) } } // getOrganizationId handles prompting for the argument organization-id // If we have only 1 id : we use it, and don't prompt // If we have more than 1 id, we prompt, with id[0] as default value. func getOrganizationID(secretKey string) (string, error) { IDs, err := account.GetOrganizationsIds(secretKey) if err != nil { logger.Warningf("%v", err) return promptOrganizationID(IDs) } if len(IDs) != 1 { return promptOrganizationID(IDs) } return IDs[0], nil } func promptOrganizationID(IDs []string) (string, error) { config := &interactive.PromptStringConfig{ Prompt: "Enter your Organization ID", ValidateFunc: interactive.ValidateOrganizationID(), } if len(IDs) > 0 { config.DefaultValue = IDs[0] config.DefaultValueDoc = IDs[0] } ID, err := interactive.PromptStringWithConfig(config) if err != nil { return "", err } return ID, nil } const logo = ` @@@@@@@@@@@@@@@. @@@@@@@@@@@@@@@@@@@@ __ __ _ @@@ @@@@ \ \ / / | | @@@ @@@@@@@ .@@@ \ \ /\ / /__| | ___ ___ _ __ ___ ___ @@@ @@@@@@@@ @@@ \ \/ \/ / _ \ |/ __/ _ \| '_ ` + "`" + ` _ \ / _ \ @@@ @@@ @@@ \ /\ / __/ | (_| (_) | | | | | | __/ @@@ @@@ @@@ @@@ \/ \/ \___|_|\___\___/|_| |_| |_|\___| @@@ @@@ @@@ @@@ _ _ @@@ @@@ @@@ | |(_) @@@ .@@@@@@@ @@@ ___ ___ __ __ ___ | | _ @@@ @@@@@@@ @@@ / __| / __|\ \ /\ / / / __|| || | @@@. @@@ \__ \| (__ \ V V / | (__ | || | @@@@@@. .@@@@ |___/ \___| \_/\_/ \___||_||_| @@@@@@@@@@@@@@@@. `
[ "\"USER\"" ]
[]
[ "USER" ]
[]
["USER"]
go
1
0
readtwice/layers/recompute_grad.py
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Library for rematerialization. Incubates a version of tf.recompute_grad that is XLA compatible. """ import collections import os import threading from typing import Deque, List, NamedTuple, Optional, Sequence from absl import logging import numpy as np import tensorflow as tf class RecomputeContext( NamedTuple('RecomputeContext', [ ('is_recomputing', bool), ('seed', tf.Tensor), ('children', Deque['RecomputeContext']), ])): """Context for recomputation. Attributes: is_recomputing: Whether we are in a recomputation phase. seed: Scalar integer tensor that should be used with stateless random ops for deterministic behavior and correct computation of the gradient. children: Nested `RecomputeContext` instances. Used internally by `recompute_grad` to track nested instances of `RecomputeContext`. """ def __enter__(self): return _context_stack.push(self) def __exit__(self, exc_type, exc_value, traceback): _context_stack.pop(self) # Simplified version of `_DefaultStack` in # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/ops.py. class _ContextStack(threading.local): """A thread-local stack for providing implicit recompute contexts.""" def __init__(self): super(_ContextStack, self).__init__() self._stack = [] def top(self): return self._stack[-1] if self._stack else None def push(self, context): self._stack.append(context) return context def pop(self, context): if self._stack[-1] is not context: raise AssertionError('Nesting violated for RecomputeContext.') self._stack.pop() _context_stack = _ContextStack() def get_recompute_context(): """Returns the current recomputing context if it exists.""" return _context_stack.top() # Adapted from # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/control_flow_util.py. def _get_containing_xla_context(graph): """Returns the first ancestor `XLAControlFlowContext` in the `graph`.""" ctxt = graph._get_control_flow_context() # pylint: disable=protected-access while ctxt: if ctxt.IsXLAContext(): return ctxt ctxt = ctxt.outer_context return None def _in_xla_context(graph = None): """Detects whether we are in an XLA context.""" if '--tf_xla_auto_jit=2' in os.environ.get('TF_XLA_FLAGS', ''): return True graph = tf.compat.v1.get_default_graph() if graph is None else graph while True: if _get_containing_xla_context(graph) is not None: return True try: graph = graph.outer_graph except AttributeError: return False def _force_data_dependency( first_compute, then_compute): """Force all of `then_compute` to depend on all of `first_compute`. Uses a dummy data dependency, which is useful when running on TPUs because XLA ignores control dependencies. Only supports float arguments. Args: first_compute: Sequence of `Tensor`s to be executed before `then_compute`. then_compute: Sequence of `Tensor`s to executed after `first_compute`. Returns: Sequence of `Tensor`s with same length of `then_compute`. Raises: ValueError: if ranks are unknown or types are not floating. """ def _first_element(x): if x.shape.ndims is None: raise ValueError('Rank of Tensor %s must be known' % x) ndims = x.shape.ndims begin = tf.zeros(ndims, dtype=tf.int32) size = tf.ones(ndims, dtype=tf.int32) return tf.reshape(tf.slice(x, begin, size), []) first_compute_sum = tf.add_n( [_first_element(x) for x in first_compute if x is not None]) dtype = first_compute_sum.dtype if not dtype.is_floating: raise ValueError('_force_data_dependency only supports floating dtypes.') zero = np.finfo(dtype.as_numpy_dtype).tiny * first_compute_sum return [ x + tf.cast(zero, x.dtype) if x is not None else None for x in then_compute ] def _make_seed_if_none(seed): """Uses the global generator to make a seed if necessary.""" if seed is not None: return seed generator = tf.random.experimental.get_global_generator() # The two seeds for stateless random ops don't have individual semantics and # are scrambled together, so providing one seed is fine. This makes it easier # for users to provide a local seed without worrying about integer overflow. # See `make_seeds` in # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/stateful_random_ops.py. try: return generator.uniform_full_int([], tf.int32, name='recompute_grad_seed') except (RuntimeError, TypeError, ValueError, tf.errors.NotFoundError) as e: # For a number of reasons, the above operation can fail like using multiple # graphs or toggling between eager and graph modes. Reset the generator. logging.warn('Resetting the generator. %s: %s', type(e), e) tf.random.experimental.set_global_generator(None) generator = tf.random.experimental.get_global_generator() return generator.uniform_full_int([], tf.int32, name='recompute_grad_seed') def recompute_grad(f, seed=None): """An eager-compatible version of recompute_grad. For f(*args, **kwargs), this supports gradients with respect to args, or to gradients with respect to any variables residing in the kwarg 'variables'. Note that for keras layer and model objects, this is handled automatically. Warning: If `f` was originally a tf.keras Model or Layer object, `g` will not be able to access the member variables of that object, because `g` returns through the wrapper function `inner`. When recomputing gradients through objects that inherit from keras, we suggest keeping a reference to the underlying object around for the purpose of accessing these variables. Args: f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs. seed: Optional seed for random ops. `seed` should an integer scalar `Tensor`. When compiling to XLA, `seed` must have dtype `tf.int32`. If `seed` is not provided one will be generated. Returns: A function `g` that wraps `f`, but which recomputes `f` on the backwards pass of a gradient call. """ @tf.custom_gradient def inner(*args, **kwargs): """Inner function closure for calculating gradients.""" # Detect when we're nested and in the backwards pass, so we don't generate # an additional seed. parent_context = get_recompute_context() if parent_context is not None and parent_context.is_recomputing: # Use the cached context in the recomputation phase. with parent_context.children.popleft()._replace( is_recomputing=True) as context: result = f(*args, **kwargs) else: with RecomputeContext( is_recomputing=False, seed=_make_seed_if_none(seed), children=collections.deque()) as context: result = f(*args, **kwargs) # In the forward pass, build up a tree of recomputation contexts. if parent_context is not None and not parent_context.is_recomputing: parent_context.children.append(context) def grad(*dresult, **grad_kwargs): """Gradient function calculation for inner function.""" variables = grad_kwargs.pop('variables', None) if grad_kwargs: raise ValueError('Found unexpected kwargs for `grad`: ', list(grad_kwargs.keys())) inputs, seed = list(args), context.seed if _in_xla_context(): inputs = _force_data_dependency( tf.nest.flatten(dresult), inputs + [seed]) seed = inputs.pop() with tf.GradientTape() as tape: tape.watch(inputs) if variables is not None: tape.watch(variables) with tf.control_dependencies(dresult): with context._replace(is_recomputing=True, seed=seed): result = f(*inputs, **kwargs) kw_vars = [] if variables is not None: kw_vars = list(variables) grads = tape.gradient( result, list(inputs) + kw_vars, output_gradients=dresult) return grads[:len(inputs)], grads[len(inputs):] return result, grad return inner
[]
[]
[ "TF_XLA_FLAGS" ]
[]
["TF_XLA_FLAGS"]
python
1
0
src/pip/_internal/locations/__init__.py
import functools import logging import os import pathlib import sys import sysconfig from typing import List, Optional from pip._internal.models.scheme import SCHEME_KEYS, Scheme from . import _distutils, _sysconfig from .base import ( USER_CACHE_DIR, get_major_minor_version, get_src_prefix, is_osx_framework, site_packages, user_site, ) __all__ = [ "USER_CACHE_DIR", "get_bin_prefix", "get_bin_user", "get_major_minor_version", "get_platlib", "get_prefixed_libs", "get_purelib", "get_scheme", "get_src_prefix", "site_packages", "user_site", ] logger = logging.getLogger(__name__) if os.environ.get("_PIP_LOCATIONS_NO_WARN_ON_MISMATCH"): _MISMATCH_LEVEL = logging.DEBUG else: _MISMATCH_LEVEL = logging.WARNING def _default_base(*, user: bool) -> str: if user: base = sysconfig.get_config_var("userbase") else: base = sysconfig.get_config_var("base") assert base is not None return base @functools.lru_cache(maxsize=None) def _warn_if_mismatch(old: pathlib.Path, new: pathlib.Path, *, key: str) -> bool: if old == new: return False issue_url = "https://github.com/pypa/pip/issues/10151" message = ( "Value for %s does not match. Please report this to <%s>" "\ndistutils: %s" "\nsysconfig: %s" ) logger.log(_MISMATCH_LEVEL, message, key, issue_url, old, new) return True @functools.lru_cache(maxsize=None) def _log_context( *, user: bool = False, home: Optional[str] = None, root: Optional[str] = None, prefix: Optional[str] = None, ) -> None: message = ( "Additional context:" "\nuser = %r" "\nhome = %r" "\nroot = %r" "\nprefix = %r" ) logger.log(_MISMATCH_LEVEL, message, user, home, root, prefix) def get_scheme( dist_name: str, user: bool = False, home: Optional[str] = None, root: Optional[str] = None, isolated: bool = False, prefix: Optional[str] = None, ) -> Scheme: old = _distutils.get_scheme( dist_name, user=user, home=home, root=root, isolated=isolated, prefix=prefix, ) new = _sysconfig.get_scheme( dist_name, user=user, home=home, root=root, isolated=isolated, prefix=prefix, ) base = prefix or home or _default_base(user=user) warned = [] for k in SCHEME_KEYS: # Extra join because distutils can return relative paths. old_v = pathlib.Path(base, getattr(old, k)) new_v = pathlib.Path(getattr(new, k)) # distutils incorrectly put PyPy packages under ``site-packages/python`` # in the ``posix_home`` scheme, but PyPy devs said they expect the # directory name to be ``pypy`` instead. So we treat this as a bug fix # and not warn about it. See bpo-43307 and python/cpython#24628. skip_pypy_special_case = ( sys.implementation.name == "pypy" and home is not None and k in ("platlib", "purelib") and old_v.parent == new_v.parent and old_v.name.startswith("python") and new_v.name.startswith("pypy") ) if skip_pypy_special_case: continue # sysconfig's ``osx_framework_user`` does not include ``pythonX.Y`` in # the ``include`` value, but distutils's ``headers`` does. We'll let # CPython decide whether this is a bug or feature. See bpo-43948. skip_osx_framework_user_special_case = ( user and is_osx_framework() and k == "headers" and old_v.parent == new_v and old_v.name.startswith("python") ) if skip_osx_framework_user_special_case: continue warned.append(_warn_if_mismatch(old_v, new_v, key=f"scheme.{k}")) if any(warned): _log_context(user=user, home=home, root=root, prefix=prefix) return old def get_bin_prefix() -> str: old = _distutils.get_bin_prefix() new = _sysconfig.get_bin_prefix() if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="bin_prefix"): _log_context() return old def get_bin_user() -> str: return _sysconfig.get_scheme("", user=True).scripts def get_purelib() -> str: """Return the default pure-Python lib location.""" old = _distutils.get_purelib() new = _sysconfig.get_purelib() if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="purelib"): _log_context() return old def get_platlib() -> str: """Return the default platform-shared lib location.""" old = _distutils.get_platlib() new = _sysconfig.get_platlib() if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="platlib"): _log_context() return old def get_prefixed_libs(prefix: str) -> List[str]: """Return the lib locations under ``prefix``.""" old_pure, old_plat = _distutils.get_prefixed_libs(prefix) new_pure, new_plat = _sysconfig.get_prefixed_libs(prefix) warned = [ _warn_if_mismatch( pathlib.Path(old_pure), pathlib.Path(new_pure), key="prefixed-purelib", ), _warn_if_mismatch( pathlib.Path(old_plat), pathlib.Path(new_plat), key="prefixed-platlib", ), ] if any(warned): _log_context(prefix=prefix) if old_pure == old_plat: return [old_pure] return [old_pure, old_plat]
[]
[]
[ "_PIP_LOCATIONS_NO_WARN_ON_MISMATCH" ]
[]
["_PIP_LOCATIONS_NO_WARN_ON_MISMATCH"]
python
1
0
config/main.py
#!/usr/sbin/env python import click import ipaddress import json import netaddr import netifaces import os import re import subprocess import sys import threading import time from socket import AF_INET, AF_INET6 from minigraph import parse_device_desc_xml from portconfig import get_child_ports from sonic_py_common import device_info, multi_asic from sonic_py_common.interface import get_interface_table_name, get_port_table_name from swsssdk import ConfigDBConnector, SonicDBConfig from swsscommon.swsscommon import SonicV2Connector from utilities_common.db import Db from utilities_common.intf_filter import parse_interface_in_filter import utilities_common.cli as clicommon from .utils import log from . import aaa from . import chassis_modules from . import console from . import feature from . import kdump from . import kube from . import mlnx from . import muxcable from . import nat from . import vlan from . import vxlan from .config_mgmt import ConfigMgmtDPB # mock masic APIs for unit test try: if os.environ["UTILITIES_UNIT_TESTING"] == "1" or os.environ["UTILITIES_UNIT_TESTING"] == "2": modules_path = os.path.join(os.path.dirname(__file__), "..") tests_path = os.path.join(modules_path, "tests") sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": import mock_tables.mock_multi_asic mock_tables.dbconnector.load_namespace_config() except KeyError: pass CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help', '-?']) SONIC_GENERATED_SERVICE_PATH = '/etc/sonic/generated_services.conf' SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen' VLAN_SUB_INTERFACE_SEPARATOR = '.' ASIC_CONF_FILENAME = 'asic.conf' DEFAULT_CONFIG_DB_FILE = '/etc/sonic/config_db.json' NAMESPACE_PREFIX = 'asic' INTF_KEY = "interfaces" INIT_CFG_FILE = '/etc/sonic/init_cfg.json' SYSTEMCTL_ACTION_STOP="stop" SYSTEMCTL_ACTION_RESTART="restart" SYSTEMCTL_ACTION_RESET_FAILED="reset-failed" DEFAULT_NAMESPACE = '' CFG_LOOPBACK_PREFIX = "Loopback" CFG_LOOPBACK_PREFIX_LEN = len(CFG_LOOPBACK_PREFIX) CFG_LOOPBACK_NAME_TOTAL_LEN_MAX = 11 CFG_LOOPBACK_ID_MAX_VAL = 999 CFG_LOOPBACK_NO="<0-999>" CFG_PORTCHANNEL_PREFIX = "PortChannel" CFG_PORTCHANNEL_PREFIX_LEN = 11 CFG_PORTCHANNEL_NAME_TOTAL_LEN_MAX = 15 CFG_PORTCHANNEL_MAX_VAL = 9999 CFG_PORTCHANNEL_NO="<0-9999>" PORT_MTU = "mtu" PORT_SPEED = "speed" asic_type = None # # Breakout Mode Helper functions # # Read given JSON file def readJsonFile(fileName): try: with open(fileName) as f: result = json.load(f) except Exception as e: raise Exception(str(e)) return result def _get_breakout_options(ctx, args, incomplete): """ Provides dynamic mode option as per user argument i.e. interface name """ all_mode_options = [] interface_name = args[-1] breakout_cfg_file = device_info.get_path_to_port_config_file() if not os.path.isfile(breakout_cfg_file) or not breakout_cfg_file.endswith('.json'): return [] else: breakout_file_input = readJsonFile(breakout_cfg_file) if interface_name in breakout_file_input[INTF_KEY]: breakout_mode_list = [v["breakout_modes"] for i, v in breakout_file_input[INTF_KEY].items() if i == interface_name][0] breakout_mode_options = [] for i in breakout_mode_list.split(','): breakout_mode_options.append(i) all_mode_options = [str(c) for c in breakout_mode_options if incomplete in c] return all_mode_options def shutdown_interfaces(ctx, del_intf_dict): """ shut down all the interfaces before deletion """ for intf in del_intf_dict: config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, intf) if interface_name is None: click.echo("[ERROR] interface name is None!") return False if interface_name_is_valid(config_db, intf) is False: click.echo("[ERROR] Interface name is invalid. Please enter a valid interface name!!") return False port_dict = config_db.get_table('PORT') if not port_dict: click.echo("port_dict is None!") return False if intf in port_dict: config_db.mod_entry("PORT", intf, {"admin_status": "down"}) else: click.secho("[ERROR] Could not get the correct interface name, exiting", fg='red') return False return True def _validate_interface_mode(ctx, breakout_cfg_file, interface_name, target_brkout_mode, cur_brkout_mode): """ Validate Parent interface and user selected mode before starting deletion or addition process """ breakout_file_input = readJsonFile(breakout_cfg_file)["interfaces"] if interface_name not in breakout_file_input: click.secho("[ERROR] {} is not a Parent port. So, Breakout Mode is not available on this port".format(interface_name), fg='red') return False # Check whether target breakout mode is available for the user-selected interface or not if target_brkout_mode not in breakout_file_input[interface_name]["breakout_modes"]: click.secho('[ERROR] Target mode {} is not available for the port {}'. format(target_brkout_mode, interface_name), fg='red') return False # Get config db context config_db = ctx.obj['config_db'] port_dict = config_db.get_table('PORT') # Check whether there is any port in config db. if not port_dict: click.echo("port_dict is None!") return False # Check whether the user-selected interface is part of 'port' table in config db. if interface_name not in port_dict: click.secho("[ERROR] {} is not in port_dict".format(interface_name)) return False click.echo("\nRunning Breakout Mode : {} \nTarget Breakout Mode : {}".format(cur_brkout_mode, target_brkout_mode)) if (cur_brkout_mode == target_brkout_mode): click.secho("[WARNING] No action will be taken as current and desired Breakout Mode are same.", fg='magenta') sys.exit(0) return True def load_ConfigMgmt(verbose): """ Load config for the commands which are capable of change in config DB. """ try: cm = ConfigMgmtDPB(debug=verbose) return cm except Exception as e: raise Exception("Failed to load the config. Error: {}".format(str(e))) def breakout_warnUser_extraTables(cm, final_delPorts, confirm=True): """ Function to warn user about extra tables while Dynamic Port Breakout(DPB). confirm: re-confirm from user to proceed. Config Tables Without Yang model considered extra tables. cm = instance of config MGMT class. """ try: # check if any extra tables exist eTables = cm.tablesWithOutYang() if len(eTables): # find relavent tables in extra tables, i.e. one which can have deleted # ports tables = cm.configWithKeys(configIn=eTables, keys=final_delPorts) click.secho("Below Config can not be verified, It may cause harm "\ "to the system\n {}".format(json.dumps(tables, indent=2))) click.confirm('Do you wish to Continue?', abort=True) except Exception as e: raise Exception("Failed in breakout_warnUser_extraTables. Error: {}".format(str(e))) return def breakout_Ports(cm, delPorts=list(), portJson=dict(), force=False, \ loadDefConfig=False, verbose=False): deps, ret = cm.breakOutPort(delPorts=delPorts, portJson=portJson, \ force=force, loadDefConfig=loadDefConfig) # check if DPB failed if ret == False: if not force and deps: click.echo("Dependecies Exist. No further action will be taken") click.echo("*** Printing dependecies ***") for dep in deps: click.echo(dep) sys.exit(0) else: click.echo("[ERROR] Port breakout Failed!!! Opting Out") raise click.Abort() return # # Helper functions # # Execute action per NPU instance for multi instance services. def execute_systemctl_per_asic_instance(inst, event, service, action): try: click.echo("Executing {} of service {}@{}...".format(action, service, inst)) clicommon.run_command("systemctl {} {}@{}.service".format(action, service, inst)) except SystemExit as e: log.log_error("Failed to execute {} of service {}@{} with error {}".format(action, service, inst, e)) # Set the event object if there is a failure and exception was raised. event.set() # Execute action on list of systemd services def execute_systemctl(list_of_services, action): num_asic = multi_asic.get_num_asics() generated_services_list, generated_multi_instance_services = _get_sonic_generated_services(num_asic) if ((generated_services_list == []) and (generated_multi_instance_services == [])): log.log_error("Failed to get generated services") return for service in list_of_services: if (service + '.service' in generated_services_list): try: click.echo("Executing {} of service {}...".format(action, service)) clicommon.run_command("systemctl {} {}".format(action, service)) except SystemExit as e: log.log_error("Failed to execute {} of service {} with error {}".format(action, service, e)) raise if (service + '.service' in generated_multi_instance_services): # With Multi NPU, Start a thread per instance to do the "action" on multi instance services. if multi_asic.is_multi_asic(): threads = [] # Use this event object to co-ordinate if any threads raised exception e = threading.Event() kwargs = {'service': service, 'action': action} for inst in range(num_asic): t = threading.Thread(target=execute_systemctl_per_asic_instance, args=(inst, e), kwargs=kwargs) threads.append(t) t.start() # Wait for all the threads to finish. for inst in range(num_asic): threads[inst].join() # Check if any of the threads have raised exception, if so exit the process. if e.is_set(): sys.exit(1) def _get_device_type(): """ Get device type TODO: move to sonic-py-common """ command = "{} -m -v DEVICE_METADATA.localhost.type".format(SONIC_CFGGEN_PATH) proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) device_type, err = proc.communicate() if err: click.echo("Could not get the device type from minigraph, setting device type to Unknown") device_type = 'Unknown' else: device_type = device_type.strip() return device_type def interface_alias_to_name(config_db, interface_alias): """Return default interface name if alias name is given as argument """ vlan_id = "" sub_intf_sep_idx = -1 if interface_alias is not None: sub_intf_sep_idx = interface_alias.find(VLAN_SUB_INTERFACE_SEPARATOR) if sub_intf_sep_idx != -1: vlan_id = interface_alias[sub_intf_sep_idx + 1:] # interface_alias holds the parent port name so the subsequent logic still applies interface_alias = interface_alias[:sub_intf_sep_idx] # If the input parameter config_db is None, derive it from interface. # In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE. if config_db is None: namespace = get_port_namespace(interface_alias) if namespace is None: return None config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() port_dict = config_db.get_table('PORT') if interface_alias is not None: if not port_dict: click.echo("port_dict is None!") raise click.Abort() for port_name in port_dict: if interface_alias == port_dict[port_name]['alias']: return port_name if sub_intf_sep_idx == -1 else port_name + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id # Interface alias not in port_dict, just return interface_alias, e.g., # portchannel is passed in as argument, which does not have an alias return interface_alias if sub_intf_sep_idx == -1 else interface_alias + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id def interface_name_is_valid(config_db, interface_name): """Check if the interface name is valid """ # If the input parameter config_db is None, derive it from interface. # In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE. if config_db is None: namespace = get_port_namespace(interface_name) if namespace is None: return False config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() port_dict = config_db.get_table('PORT') port_channel_dict = config_db.get_table('PORTCHANNEL') sub_port_intf_dict = config_db.get_table('VLAN_SUB_INTERFACE') if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is not None: if not port_dict: click.echo("port_dict is None!") raise click.Abort() for port_name in port_dict: if interface_name == port_name: return True if port_channel_dict: for port_channel_name in port_channel_dict: if interface_name == port_channel_name: return True if sub_port_intf_dict: for sub_port_intf_name in sub_port_intf_dict: if interface_name == sub_port_intf_name: return True return False def interface_name_to_alias(config_db, interface_name): """Return alias interface name if default name is given as argument """ # If the input parameter config_db is None, derive it from interface. # In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE. if config_db is None: namespace = get_port_namespace(interface_name) if namespace is None: return None config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() port_dict = config_db.get_table('PORT') if interface_name is not None: if not port_dict: click.echo("port_dict is None!") raise click.Abort() for port_name in port_dict: if interface_name == port_name: return port_dict[port_name]['alias'] return None def interface_ipaddr_dependent_on_interface(config_db, interface_name): """Get table keys including ipaddress """ data = [] table_name = get_interface_table_name(interface_name) if table_name == "": return data keys = config_db.get_keys(table_name) for key in keys: if interface_name in key and len(key) == 2: data.append(key) return data def is_interface_bind_to_vrf(config_db, interface_name): """Get interface if bind to vrf or not """ table_name = get_interface_table_name(interface_name) if table_name == "": return False entry = config_db.get_entry(table_name, interface_name) if entry and entry.get("vrf_name"): return True return False def is_portchannel_name_valid(portchannel_name): """Port channel name validation """ # Return True if Portchannel name is PortChannelXXXX (XXXX can be 0-9999) if portchannel_name[:CFG_PORTCHANNEL_PREFIX_LEN] != CFG_PORTCHANNEL_PREFIX : return False if (portchannel_name[CFG_PORTCHANNEL_PREFIX_LEN:].isdigit() is False or int(portchannel_name[CFG_PORTCHANNEL_PREFIX_LEN:]) > CFG_PORTCHANNEL_MAX_VAL) : return False if len(portchannel_name) > CFG_PORTCHANNEL_NAME_TOTAL_LEN_MAX: return False return True def is_portchannel_present_in_db(db, portchannel_name): """Check if Portchannel is present in Config DB """ # Return True if Portchannel name exists in the CONFIG_DB portchannel_list = db.get_table(CFG_PORTCHANNEL_PREFIX) if portchannel_list is None: return False if portchannel_name in portchannel_list: return True return False def is_port_member_of_this_portchannel(db, port_name, portchannel_name): """Check if a port is member of given portchannel """ portchannel_list = db.get_table(CFG_PORTCHANNEL_PREFIX) if portchannel_list is None: return False for k,v in db.get_table('PORTCHANNEL_MEMBER'): if (k == portchannel_name) and (v == port_name): return True return False # Return the namespace where an interface belongs # The port name input could be in default mode or in alias mode. def get_port_namespace(port): # If it is a non multi-asic platform, or if the interface is management interface # return DEFAULT_NAMESPACE if not multi_asic.is_multi_asic() or port == 'eth0': return DEFAULT_NAMESPACE # Get the table to check for interface presence table_name = get_port_table_name(port) if table_name == "": return None ns_list = multi_asic.get_all_namespaces() namespaces = ns_list['front_ns'] + ns_list['back_ns'] for namespace in namespaces: config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() # If the interface naming mode is alias, search the tables for alias_name. if clicommon.get_interface_naming_mode() == "alias": port_dict = config_db.get_table(table_name) if port_dict: for port_name in port_dict: if port == port_dict[port_name]['alias']: return namespace else: entry = config_db.get_entry(table_name, port) if entry: return namespace return None def del_interface_bind_to_vrf(config_db, vrf_name): """del interface bind to vrf """ tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE'] for table_name in tables: interface_dict = config_db.get_table(table_name) if interface_dict: for interface_name in interface_dict: if 'vrf_name' in interface_dict[interface_name] and vrf_name == interface_dict[interface_name]['vrf_name']: interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name) for interface_del in interface_dependent: config_db.set_entry(table_name, interface_del, None) config_db.set_entry(table_name, interface_name, None) def set_interface_naming_mode(mode): """Modify SONIC_CLI_IFACE_MODE env variable in user .bashrc """ user = os.getenv('SUDO_USER') bashrc_ifacemode_line = "export SONIC_CLI_IFACE_MODE={}".format(mode) # In case of multi-asic, we can check for the alias mode support in any of # the namespaces as this setting of alias mode should be identical everywhere. # Here by default we set the namespaces to be a list just having '' which # represents the linux host. In case of multi-asic, we take the first namespace # created for the front facing ASIC. namespaces = [DEFAULT_NAMESPACE] if multi_asic.is_multi_asic(): namespaces = multi_asic.get_all_namespaces()['front_ns'] # Ensure all interfaces have an 'alias' key in PORT dict config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespaces[0]) config_db.connect() port_dict = config_db.get_table('PORT') if not port_dict: click.echo("port_dict is None!") raise click.Abort() for port_name in port_dict: try: if port_dict[port_name]['alias']: pass except KeyError: click.echo("Platform does not support alias mapping") raise click.Abort() if not user: user = os.getenv('USER') if user != "root": bashrc = "/home/{}/.bashrc".format(user) else: click.get_current_context().fail("Cannot set interface naming mode for root user!") f = open(bashrc, 'r') filedata = f.read() f.close() if "SONIC_CLI_IFACE_MODE" not in filedata: newdata = filedata + bashrc_ifacemode_line newdata += "\n" else: newdata = re.sub(r"export SONIC_CLI_IFACE_MODE=\w+", bashrc_ifacemode_line, filedata) f = open(bashrc, 'w') f.write(newdata) f.close() click.echo("Please logout and log back in for changes take effect.") def _is_neighbor_ipaddress(config_db, ipaddress): """Returns True if a neighbor has the IP address <ipaddress>, False if not """ entry = config_db.get_entry('BGP_NEIGHBOR', ipaddress) return True if entry else False def _get_all_neighbor_ipaddresses(config_db): """Returns list of strings containing IP addresses of all BGP neighbors """ addrs = [] bgp_sessions = config_db.get_table('BGP_NEIGHBOR') for addr, session in bgp_sessions.items(): addrs.append(addr) return addrs def _get_neighbor_ipaddress_list_by_hostname(config_db, hostname): """Returns list of strings, each containing an IP address of neighbor with hostname <hostname>. Returns empty list if <hostname> not a neighbor """ addrs = [] bgp_sessions = config_db.get_table('BGP_NEIGHBOR') for addr, session in bgp_sessions.items(): if 'name' in session and session['name'] == hostname: addrs.append(addr) return addrs def _change_bgp_session_status_by_addr(config_db, ipaddress, status, verbose): """Start up or shut down BGP session by IP address """ verb = 'Starting' if status == 'up' else 'Shutting' click.echo("{} {} BGP session with neighbor {}...".format(verb, status, ipaddress)) config_db.mod_entry('bgp_neighbor', ipaddress, {'admin_status': status}) def _change_bgp_session_status(config_db, ipaddr_or_hostname, status, verbose): """Start up or shut down BGP session by IP address or hostname """ ip_addrs = [] # If we were passed an IP address, convert it to lowercase because IPv6 addresses were # stored in ConfigDB with all lowercase alphabet characters during minigraph parsing if _is_neighbor_ipaddress(config_db, ipaddr_or_hostname.lower()): ip_addrs.append(ipaddr_or_hostname.lower()) else: # If <ipaddr_or_hostname> is not the IP address of a neighbor, check to see if it's a hostname ip_addrs = _get_neighbor_ipaddress_list_by_hostname(config_db, ipaddr_or_hostname) if not ip_addrs: return False for ip_addr in ip_addrs: _change_bgp_session_status_by_addr(config_db, ip_addr, status, verbose) return True def _validate_bgp_neighbor(config_db, neighbor_ip_or_hostname): """validates whether the given ip or host name is a BGP neighbor """ ip_addrs = [] if _is_neighbor_ipaddress(config_db, neighbor_ip_or_hostname.lower()): ip_addrs.append(neighbor_ip_or_hostname.lower()) else: ip_addrs = _get_neighbor_ipaddress_list_by_hostname(config_db, neighbor_ip_or_hostname.upper()) return ip_addrs def _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname): """Removes BGP configuration of the given neighbor """ ip_addrs = _validate_bgp_neighbor(config_db, neighbor_ip_or_hostname) if not ip_addrs: return False for ip_addr in ip_addrs: config_db.mod_entry('bgp_neighbor', ip_addr, None) click.echo("Removed configuration of BGP neighbor {}".format(ip_addr)) return True def _change_hostname(hostname): current_hostname = os.uname()[1] if current_hostname != hostname: clicommon.run_command('echo {} > /etc/hostname'.format(hostname), display_cmd=True) clicommon.run_command('hostname -F /etc/hostname', display_cmd=True) clicommon.run_command('sed -i "/\s{}$/d" /etc/hosts'.format(current_hostname), display_cmd=True) clicommon.run_command('echo "127.0.0.1 {}" >> /etc/hosts'.format(hostname), display_cmd=True) def _clear_qos(): QOS_TABLE_NAMES = [ 'TC_TO_PRIORITY_GROUP_MAP', 'MAP_PFC_PRIORITY_TO_QUEUE', 'TC_TO_QUEUE_MAP', 'DSCP_TO_TC_MAP', 'SCHEDULER', 'PFC_PRIORITY_TO_PRIORITY_GROUP_MAP', 'PORT_QOS_MAP', 'WRED_PROFILE', 'QUEUE', 'CABLE_LENGTH', 'BUFFER_POOL', 'BUFFER_PROFILE', 'BUFFER_PG', 'BUFFER_QUEUE'] namespace_list = [DEFAULT_NAMESPACE] if multi_asic.get_num_asics() > 1: namespace_list = multi_asic.get_namespaces_from_linux() for ns in namespace_list: if ns is DEFAULT_NAMESPACE: config_db = ConfigDBConnector() else: config_db = ConfigDBConnector( use_unix_socket_path=True, namespace=ns ) config_db.connect() for qos_table in QOS_TABLE_NAMES: config_db.delete_table(qos_table) def _get_sonic_generated_services(num_asic): if not os.path.isfile(SONIC_GENERATED_SERVICE_PATH): return None generated_services_list = [] generated_multi_instance_services = [] with open(SONIC_GENERATED_SERVICE_PATH) as generated_service_file: for line in generated_service_file: if '@' in line: line = line.replace('@', '') if num_asic > 1: generated_multi_instance_services.append(line.rstrip('\n')) else: generated_services_list.append(line.rstrip('\n')) else: generated_services_list.append(line.rstrip('\n')) return generated_services_list, generated_multi_instance_services # Callback for confirmation prompt. Aborts if user enters "n" def _abort_if_false(ctx, param, value): if not value: ctx.abort() def _get_disabled_services_list(config_db): disabled_services_list = [] feature_table = config_db.get_table('FEATURE') if feature_table is not None: for feature_name in feature_table: if not feature_name: log.log_warning("Feature is None") continue state = feature_table[feature_name]['state'] if not state: log.log_warning("Enable state of feature '{}' is None".format(feature_name)) continue if state == "disabled": disabled_services_list.append(feature_name) else: log.log_warning("Unable to retreive FEATURE table") return disabled_services_list def _stop_services(config_db): # This list is order-dependent. Please add services in the order they should be stopped # on Mellanox platform pmon is stopped by syncd services_to_stop = [ 'telemetry', 'restapi', 'swss', 'lldp', 'pmon', 'bgp', 'hostcfgd', 'nat' ] if asic_type == 'mellanox' and 'pmon' in services_to_stop: services_to_stop.remove('pmon') disabled_services = _get_disabled_services_list(config_db) for service in disabled_services: if service in services_to_stop: services_to_stop.remove(service) execute_systemctl(services_to_stop, SYSTEMCTL_ACTION_STOP) def _reset_failed_services(config_db): # This list is order-independent. Please keep list in alphabetical order services_to_reset = [ 'bgp', 'dhcp_relay', 'hostcfgd', 'hostname-config', 'interfaces-config', 'lldp', 'nat', 'ntp-config', 'pmon', 'radv', 'restapi', 'rsyslog-config', 'sflow', 'snmp', 'swss', 'syncd', 'teamd', 'telemetry' ] disabled_services = _get_disabled_services_list(config_db) for service in disabled_services: if service in services_to_reset: services_to_reset.remove(service) execute_systemctl(services_to_reset, SYSTEMCTL_ACTION_RESET_FAILED) def _restart_services(config_db): # This list is order-dependent. Please add services in the order they should be started # on Mellanox platform pmon is started by syncd services_to_restart = [ 'hostname-config', 'interfaces-config', 'ntp-config', 'rsyslog-config', 'swss', 'bgp', 'pmon', 'lldp', 'hostcfgd', 'nat', 'sflow', 'restapi', 'telemetry' ] disabled_services = _get_disabled_services_list(config_db) for service in disabled_services: if service in services_to_restart: services_to_restart.remove(service) if asic_type == 'mellanox' and 'pmon' in services_to_restart: services_to_restart.remove('pmon') execute_systemctl(services_to_restart, SYSTEMCTL_ACTION_RESTART) # Reload Monit configuration to pick up new hostname in case it changed click.echo("Reloading Monit configuration ...") clicommon.run_command("sudo monit reload") def interface_is_in_vlan(vlan_member_table, interface_name): """ Check if an interface is in a vlan """ for _, intf in vlan_member_table: if intf == interface_name: return True return False def interface_is_in_portchannel(portchannel_member_table, interface_name): """ Check if an interface is part of portchannel """ for _, intf in portchannel_member_table: if intf == interface_name: return True return False def interface_has_mirror_config(mirror_table, interface_name): """ Check if port is already configured with mirror config """ for _, v in mirror_table.items(): if 'src_port' in v and v['src_port'] == interface_name: return True if 'dst_port' in v and v['dst_port'] == interface_name: return True return False def validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction): """ Check if SPAN mirror-session config is valid """ if len(config_db.get_entry('MIRROR_SESSION', session_name)) != 0: click.echo("Error: {} already exists".format(session_name)) return False vlan_member_table = config_db.get_table('VLAN_MEMBER') mirror_table = config_db.get_table('MIRROR_SESSION') portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') if dst_port: if not interface_name_is_valid(config_db, dst_port): click.echo("Error: Destination Interface {} is invalid".format(dst_port)) return False if interface_is_in_vlan(vlan_member_table, dst_port): click.echo("Error: Destination Interface {} has vlan config".format(dst_port)) return False if interface_has_mirror_config(mirror_table, dst_port): click.echo("Error: Destination Interface {} already has mirror config".format(dst_port)) return False if interface_is_in_portchannel(portchannel_member_table, dst_port): click.echo("Error: Destination Interface {} has portchannel config".format(dst_port)) return False if clicommon.is_port_router_interface(config_db, dst_port): click.echo("Error: Destination Interface {} is a L3 interface".format(dst_port)) return False if src_port: for port in src_port.split(","): if not interface_name_is_valid(config_db, port): click.echo("Error: Source Interface {} is invalid".format(port)) return False if dst_port and dst_port == port: click.echo("Error: Destination Interface cant be same as Source Interface") return False if interface_has_mirror_config(mirror_table, port): click.echo("Error: Source Interface {} already has mirror config".format(port)) return False if direction: if direction not in ['rx', 'tx', 'both']: click.echo("Error: Direction {} is invalid".format(direction)) return False return True def update_sonic_environment(): """Prepare sonic environment variable using SONiC environment template file. """ SONIC_ENV_TEMPLATE_FILE = os.path.join('/', "usr", "share", "sonic", "templates", "sonic-environment.j2") SONIC_VERSION_YML_FILE = os.path.join('/', "etc", "sonic", "sonic_version.yml") SONIC_ENV_FILE = os.path.join('/', "etc", "sonic", "sonic-environment") if os.path.isfile(SONIC_ENV_TEMPLATE_FILE) and os.path.isfile(SONIC_VERSION_YML_FILE): clicommon.run_command( "{} -d -y {} -t {},{}".format( SONIC_CFGGEN_PATH, SONIC_VERSION_YML_FILE, SONIC_ENV_TEMPLATE_FILE, SONIC_ENV_FILE ), display_cmd=True ) # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context def config(ctx): """SONiC command line - 'config' command""" # # Load asic_type for further use # global asic_type try: version_info = device_info.get_sonic_version_info() asic_type = version_info['asic_type'] except (KeyError, TypeError): raise click.Abort() if asic_type == 'mellanox': platform.add_command(mlnx.mlnx) # Load the global config file database_global.json once. SonicDBConfig.load_sonic_global_db_config() if os.geteuid() != 0: exit("Root privileges are required for this operation") ctx.obj = Db() # Add groups from other modules config.add_command(aaa.aaa) config.add_command(aaa.tacacs) config.add_command(chassis_modules.chassis_modules) config.add_command(console.console) config.add_command(feature.feature) config.add_command(kdump.kdump) config.add_command(kube.kubernetes) config.add_command(muxcable.muxcable) config.add_command(nat.nat) config.add_command(vlan.vlan) config.add_command(vxlan.vxlan) @config.command() @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Existing files will be overwritten, continue?') @click.argument('filename', required=False) def save(filename): """Export current config DB to a file on disk.\n <filename> : Names of configuration file(s) to save, separated by comma with no spaces in between """ num_asic = multi_asic.get_num_asics() cfg_files = [] num_cfg_file = 1 if multi_asic.is_multi_asic(): num_cfg_file += num_asic # If the user give the filename[s], extract the file names. if filename is not None: cfg_files = filename.split(',') if len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return # In case of multi-asic mode we have additional config_db{NS}.json files for # various namespaces created per ASIC. {NS} is the namespace index. for inst in range(-1, num_cfg_file-1): #inst = -1, refers to the linux host where there is no namespace. if inst == -1: namespace = None else: namespace = "{}{}".format(NAMESPACE_PREFIX, inst) # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json if cfg_files: file = cfg_files[inst+1] else: if namespace is None: file = DEFAULT_CONFIG_DB_FILE else: file = "/etc/sonic/config_db{}.json".format(inst) if namespace is None: command = "{} -d --print-data > {}".format(SONIC_CFGGEN_PATH, file) else: command = "{} -n {} -d --print-data > {}".format(SONIC_CFGGEN_PATH, namespace, file) log.log_info("'save' executing...") clicommon.run_command(command, display_cmd=True) @config.command() @click.option('-y', '--yes', is_flag=True) @click.argument('filename', required=False) def load(filename, yes): """Import a previous saved config DB dump file. <filename> : Names of configuration file(s) to load, separated by comma with no spaces in between """ if filename is None: message = 'Load config from the default config file(s) ?' else: message = 'Load config from the file(s) {} ?'.format(filename) if not yes: click.confirm(message, abort=True) num_asic = multi_asic.get_num_asics() cfg_files = [] num_cfg_file = 1 if multi_asic.is_multi_asic(): num_cfg_file += num_asic # If the user give the filename[s], extract the file names. if filename is not None: cfg_files = filename.split(',') if len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return # In case of multi-asic mode we have additional config_db{NS}.json files for # various namespaces created per ASIC. {NS} is the namespace index. for inst in range(-1, num_cfg_file-1): #inst = -1, refers to the linux host where there is no namespace. if inst == -1: namespace = None else: namespace = "{}{}".format(NAMESPACE_PREFIX, inst) # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json if cfg_files: file = cfg_files[inst+1] else: if namespace is None: file = DEFAULT_CONFIG_DB_FILE else: file = "/etc/sonic/config_db{}.json".format(inst) # if any of the config files in linux host OR namespace is not present, return if not os.path.exists(file): click.echo("The config_db file {} doesn't exist".format(file)) return if namespace is None: command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, file) else: command = "{} -n {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, namespace, file) log.log_info("'load' executing...") clicommon.run_command(command, display_cmd=True) @config.command() @click.option('-y', '--yes', is_flag=True) @click.option('-l', '--load-sysinfo', is_flag=True, help='load system default information (mac, portmap etc) first.') @click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services') @click.argument('filename', required=False) @clicommon.pass_db def reload(db, filename, yes, load_sysinfo, no_service_restart): """Clear current configuration and import a previous saved config DB dump file. <filename> : Names of configuration file(s) to load, separated by comma with no spaces in between """ if filename is None: message = 'Clear current config and reload config from the default config file(s) ?' else: message = 'Clear current config and reload config from the file(s) {} ?'.format(filename) if not yes: click.confirm(message, abort=True) log.log_info("'reload' executing...") num_asic = multi_asic.get_num_asics() cfg_files = [] num_cfg_file = 1 if multi_asic.is_multi_asic(): num_cfg_file += num_asic # If the user give the filename[s], extract the file names. if filename is not None: cfg_files = filename.split(',') if len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return if load_sysinfo: command = "{} -j {} -v DEVICE_METADATA.localhost.hwsku".format(SONIC_CFGGEN_PATH, filename) proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) cfg_hwsku, err = proc.communicate() if err: click.echo("Could not get the HWSKU from config file, exiting") sys.exit(1) else: cfg_hwsku = cfg_hwsku.strip() #Stop services before config push if not no_service_restart: log.log_info("'reload' stopping services...") _stop_services(db.cfgdb) # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB # service running in the host + DB services running in each ASIC namespace created per ASIC. # In the below logic, we get all namespaces in this platform and add an empty namespace '' # denoting the current namespace which we are in ( the linux host ) for inst in range(-1, num_cfg_file-1): # Get the namespace name, for linux host it is None if inst == -1: namespace = None else: namespace = "{}{}".format(NAMESPACE_PREFIX, inst) # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json if cfg_files: file = cfg_files[inst+1] else: if namespace is None: file = DEFAULT_CONFIG_DB_FILE else: file = "/etc/sonic/config_db{}.json".format(inst) # Check the file exists before proceeding. if not os.path.exists(file): click.echo("The config_db file {} doesn't exist".format(file)) continue if namespace is None: config_db = ConfigDBConnector() else: config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() client = config_db.get_redis_client(config_db.CONFIG_DB) client.flushdb() if load_sysinfo: if namespace is None: command = "{} -H -k {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku) else: command = "{} -H -k {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku, namespace) clicommon.run_command(command, display_cmd=True) # For the database service running in linux host we use the file user gives as input # or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace, # the default config_db<namespaceID>.json format is used. if namespace is None: if os.path.isfile(INIT_CFG_FILE): command = "{} -j {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, INIT_CFG_FILE, file) else: command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, file) else: if os.path.isfile(INIT_CFG_FILE): command = "{} -j {} -j {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, INIT_CFG_FILE, file, namespace) else: command = "{} -j {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, file, namespace) clicommon.run_command(command, display_cmd=True) client.set(config_db.INIT_INDICATOR, 1) # Migrate DB contents to latest version db_migrator='/usr/local/bin/db_migrator.py' if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): if namespace is None: command = "{} -o migrate".format(db_migrator) else: command = "{} -o migrate -n {}".format(db_migrator, namespace) clicommon.run_command(command, display_cmd=True) # We first run "systemctl reset-failed" to remove the "failed" # status from all services before we attempt to restart them if not no_service_restart: _reset_failed_services(db.cfgdb) log.log_info("'reload' restarting services...") _restart_services(db.cfgdb) @config.command("load_mgmt_config") @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Reload mgmt config?') @click.argument('filename', default='/etc/sonic/device_desc.xml', type=click.Path(exists=True)) def load_mgmt_config(filename): """Reconfigure hostname and mgmt interface based on device description file.""" log.log_info("'load_mgmt_config' executing...") command = "{} -M {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) clicommon.run_command(command, display_cmd=True) #FIXME: After config DB daemon for hostname and mgmt interface is implemented, we'll no longer need to do manual configuration here config_data = parse_device_desc_xml(filename) hostname = config_data['DEVICE_METADATA']['localhost']['hostname'] _change_hostname(hostname) mgmt_conf = netaddr.IPNetwork(list(config_data['MGMT_INTERFACE'].keys())[0][1]) gw_addr = list(config_data['MGMT_INTERFACE'].values())[0]['gwaddr'] command = "ifconfig eth0 {} netmask {}".format(str(mgmt_conf.ip), str(mgmt_conf.netmask)) clicommon.run_command(command, display_cmd=True) command = "ip route add default via {} dev eth0 table default".format(gw_addr) clicommon.run_command(command, display_cmd=True, ignore_error=True) command = "ip rule add from {} table default".format(str(mgmt_conf.ip)) clicommon.run_command(command, display_cmd=True, ignore_error=True) command = "[ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid" clicommon.run_command(command, display_cmd=True, ignore_error=True) click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.") @config.command("load_minigraph") @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Reload config from minigraph?') @click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services') @clicommon.pass_db def load_minigraph(db, no_service_restart): """Reconfigure based on minigraph.""" log.log_info("'load_minigraph' executing...") #Stop services before config push if not no_service_restart: log.log_info("'load_minigraph' stopping services...") _stop_services(db.cfgdb) # For Single Asic platform the namespace list has the empty string # for mulit Asic platform the empty string to generate the config # for host namespace_list = [DEFAULT_NAMESPACE] num_npus = multi_asic.get_num_asics() if num_npus > 1: namespace_list += multi_asic.get_namespaces_from_linux() for namespace in namespace_list: if namespace is DEFAULT_NAMESPACE: config_db = ConfigDBConnector() cfggen_namespace_option = " " ns_cmd_prefix = "" else: config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) cfggen_namespace_option = " -n {}".format(namespace) ns_cmd_prefix = "sudo ip netns exec {} ".format(namespace) config_db.connect() client = config_db.get_redis_client(config_db.CONFIG_DB) client.flushdb() if os.path.isfile('/etc/sonic/init_cfg.json'): command = "{} -H -m -j /etc/sonic/init_cfg.json {} --write-to-db".format(SONIC_CFGGEN_PATH, cfggen_namespace_option) else: command = "{} -H -m --write-to-db {}".format(SONIC_CFGGEN_PATH, cfggen_namespace_option) clicommon.run_command(command, display_cmd=True) client.set(config_db.INIT_INDICATOR, 1) # get the device type device_type = _get_device_type() if device_type != 'MgmtToRRouter': clicommon.run_command("pfcwd start_default", display_cmd=True) # Update SONiC environmnet file update_sonic_environment() if os.path.isfile('/etc/sonic/acl.json'): clicommon.run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True) # generate QoS and Buffer configs clicommon.run_command("config qos reload --no-dynamic-buffer", display_cmd=True) # Write latest db version string into db db_migrator='/usr/local/bin/db_migrator.py' if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): for namespace in namespace_list: if namespace is DEFAULT_NAMESPACE: cfggen_namespace_option = " " else: cfggen_namespace_option = " -n {}".format(namespace) clicommon.run_command(db_migrator + ' -o set_version' + cfggen_namespace_option) # We first run "systemctl reset-failed" to remove the "failed" # status from all services before we attempt to restart them if not no_service_restart: _reset_failed_services(db.cfgdb) #FIXME: After config DB daemon is implemented, we'll no longer need to restart every service. log.log_info("'load_minigraph' restarting services...") _restart_services(db.cfgdb) click.echo("Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`.") # # 'hostname' command # @config.command('hostname') @click.argument('new_hostname', metavar='<new_hostname>', required=True) def hostname(new_hostname): """Change device hostname without impacting the traffic.""" config_db = ConfigDBConnector() config_db.connect() config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"hostname" : new_hostname}) try: command = "service hostname-config restart" clicommon.run_command(command, display_cmd=True) except SystemExit as e: click.echo("Restarting hostname-config service failed with error {}".format(e)) raise # Reload Monit configuration to pick up new hostname in case it changed click.echo("Reloading Monit configuration ...") clicommon.run_command("sudo monit reload") click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.") # # 'synchronous_mode' command ('config synchronous_mode ...') # @config.command('synchronous_mode') @click.argument('sync_mode', metavar='<enable|disable>', required=True) def synchronous_mode(sync_mode): """ Enable or disable synchronous mode between orchagent and syncd \n swss restart required to apply the configuration \n Options to restart swss and apply the configuration: \n 1. config save -y \n config reload -y \n 2. systemctl restart swss """ if sync_mode == 'enable' or sync_mode == 'disable': config_db = ConfigDBConnector() config_db.connect() config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"synchronous_mode" : sync_mode}) click.echo("""Wrote %s synchronous mode into CONFIG_DB, swss restart required to apply the configuration: \n Option 1. config save -y \n config reload -y \n Option 2. systemctl restart swss""" % sync_mode) else: raise click.BadParameter("Error: Invalid argument %s, expect either enable or disable" % sync_mode) # # 'portchannel' group ('config portchannel ...') # @config.group(cls=clicommon.AbbreviationGroup) # TODO add "hidden=True if this is a single ASIC platform, once we have click 7.0 in all branches. @click.option('-n', '--namespace', help='Namespace name', required=True if multi_asic.is_multi_asic() else False, type=click.Choice(multi_asic.get_namespace_list())) @click.pass_context def portchannel(ctx, namespace): # Set namespace to default_namespace if it is None. if namespace is None: namespace = DEFAULT_NAMESPACE config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=str(namespace)) config_db.connect() ctx.obj = {'db': config_db, 'namespace': str(namespace)} @portchannel.command('add') @click.argument('portchannel_name', metavar='<portchannel_name>', required=True) @click.option('--min-links', default=0, type=int) @click.option('--fallback', default='false') @click.pass_context def add_portchannel(ctx, portchannel_name, min_links, fallback): """Add port channel""" if is_portchannel_name_valid(portchannel_name) != True: ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) db = ctx.obj['db'] if is_portchannel_present_in_db(db, portchannel_name): ctx.fail("{} already exists!".format(portchannel_name)) fvs = {'admin_status': 'up', 'mtu': '9100'} if min_links != 0: fvs['min_links'] = str(min_links) if fallback != 'false': fvs['fallback'] = 'true' db.set_entry('PORTCHANNEL', portchannel_name, fvs) @portchannel.command('del') @click.argument('portchannel_name', metavar='<portchannel_name>', required=True) @click.pass_context def remove_portchannel(ctx, portchannel_name): """Remove port channel""" if is_portchannel_name_valid(portchannel_name) != True: ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) db = ctx.obj['db'] # Dont proceed if the port channel does not exist if is_portchannel_present_in_db(db, portchannel_name) is False: ctx.fail("{} is not present.".format(portchannel_name)) if len([(k, v) for k, v in db.get_table('PORTCHANNEL_MEMBER') if k == portchannel_name]) != 0: click.echo("Error: Portchannel {} contains members. Remove members before deleting Portchannel!".format(portchannel_name)) else: db.set_entry('PORTCHANNEL', portchannel_name, None) @portchannel.group(cls=clicommon.AbbreviationGroup, name='member') @click.pass_context def portchannel_member(ctx): pass @portchannel_member.command('add') @click.argument('portchannel_name', metavar='<portchannel_name>', required=True) @click.argument('port_name', metavar='<port_name>', required=True) @click.pass_context def add_portchannel_member(ctx, portchannel_name, port_name): """Add member to port channel""" db = ctx.obj['db'] if clicommon.is_port_mirror_dst_port(db, port_name): ctx.fail("{} is configured as mirror destination port".format(port_name)) # Check if the member interface given by user is valid in the namespace. if port_name.startswith("Ethernet") is False or interface_name_is_valid(db, port_name) is False: ctx.fail("Interface name is invalid. Please enter a valid interface name!!") # Dont proceed if the port channel name is not valid if is_portchannel_name_valid(portchannel_name) is False: ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) # Dont proceed if the port channel does not exist if is_portchannel_present_in_db(db, portchannel_name) is False: ctx.fail("{} is not present.".format(portchannel_name)) # Dont allow a port to be member of port channel if it is configured with an IP address for key in db.get_table('INTERFACE').keys(): if type(key) != tuple: continue if key[0] == port_name: ctx.fail(" {} has ip address {} configured".format(port_name, key[1])) return # Dont allow a port to be member of port channel if it is configured as a VLAN member for k,v in db.get_table('VLAN_MEMBER'): if v == port_name: ctx.fail("%s Interface configured as VLAN_MEMBER under vlan : %s" %(port_name,str(k))) return # Dont allow a port to be member of port channel if it is already member of a port channel for k,v in db.get_table('PORTCHANNEL_MEMBER'): if v == port_name: ctx.fail("{} Interface is already member of {} ".format(v,k)) # Dont allow a port to be member of port channel if its speed does not match with existing members for k,v in db.get_table('PORTCHANNEL_MEMBER'): if k == portchannel_name: member_port_entry = db.get_entry('PORT', v) port_entry = db.get_entry('PORT', port_name) if member_port_entry is not None and port_entry is not None: member_port_speed = member_port_entry.get(PORT_SPEED) port_speed = port_entry.get(PORT_SPEED) if member_port_speed != port_speed: ctx.fail("Port speed of {} is different than the other members of the portchannel {}" .format(port_name, portchannel_name)) # Dont allow a port to be member of port channel if its MTU does not match with portchannel portchannel_entry = db.get_entry('PORTCHANNEL', portchannel_name) if portchannel_entry and portchannel_entry.get(PORT_MTU) is not None : port_entry = db.get_entry('PORT', port_name) if port_entry and port_entry.get(PORT_MTU) is not None: port_mtu = port_entry.get(PORT_MTU) portchannel_mtu = portchannel_entry.get(PORT_MTU) if portchannel_mtu != port_mtu: ctx.fail("Port MTU of {} is different than the {} MTU size" .format(port_name, portchannel_name)) db.set_entry('PORTCHANNEL_MEMBER', (portchannel_name, port_name), {'NULL': 'NULL'}) @portchannel_member.command('del') @click.argument('portchannel_name', metavar='<portchannel_name>', required=True) @click.argument('port_name', metavar='<port_name>', required=True) @click.pass_context def del_portchannel_member(ctx, portchannel_name, port_name): """Remove member from portchannel""" # Dont proceed if the port channel name is not valid if is_portchannel_name_valid(portchannel_name) is False: ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) db = ctx.obj['db'] # Check if the member interface given by user is valid in the namespace. if interface_name_is_valid(db, port_name) is False: ctx.fail("Interface name is invalid. Please enter a valid interface name!!") # Dont proceed if the port channel does not exist if is_portchannel_present_in_db(db, portchannel_name) is False: ctx.fail("{} is not present.".format(portchannel_name)) # Dont proceed if the the port is not an existing member of the port channel if not is_port_member_of_this_portchannel(db, port_name, portchannel_name): ctx.fail("{} is not a member of portchannel {}".format(port_name, portchannel_name)) db.set_entry('PORTCHANNEL_MEMBER', (portchannel_name, port_name), None) db.set_entry('PORTCHANNEL_MEMBER', portchannel_name + '|' + port_name, None) # # 'mirror_session' group ('config mirror_session ...') # @config.group(cls=clicommon.AbbreviationGroup, name='mirror_session') def mirror_session(): pass # # 'add' subgroup ('config mirror_session add ...') # @mirror_session.command('add') @click.argument('session_name', metavar='<session_name>', required=True) @click.argument('src_ip', metavar='<src_ip>', required=True) @click.argument('dst_ip', metavar='<dst_ip>', required=True) @click.argument('dscp', metavar='<dscp>', required=True) @click.argument('ttl', metavar='<ttl>', required=True) @click.argument('gre_type', metavar='[gre_type]', required=False) @click.argument('queue', metavar='[queue]', required=False) @click.option('--policer') def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer): """ Add ERSPAN mirror session.(Legacy support) """ add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer) @mirror_session.group(cls=clicommon.AbbreviationGroup, name='erspan') @click.pass_context def erspan(ctx): """ ERSPAN mirror_session """ pass # # 'add' subcommand # @erspan.command('add') @click.argument('session_name', metavar='<session_name>', required=True) @click.argument('src_ip', metavar='<src_ip>', required=True) @click.argument('dst_ip', metavar='<dst_ip>', required=True) @click.argument('dscp', metavar='<dscp>', required=True) @click.argument('ttl', metavar='<ttl>', required=True) @click.argument('gre_type', metavar='[gre_type]', required=False) @click.argument('queue', metavar='[queue]', required=False) @click.argument('src_port', metavar='[src_port]', required=False) @click.argument('direction', metavar='[direction]', required=False) @click.option('--policer') def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction): """ Add ERSPAN mirror session """ add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction) def gather_session_info(session_info, policer, queue, src_port, direction): if policer: session_info['policer'] = policer if queue: session_info['queue'] = queue if src_port: if clicommon.get_interface_naming_mode() == "alias": src_port_list = [] for port in src_port.split(","): src_port_list.append(interface_alias_to_name(None, port)) src_port=",".join(src_port_list) session_info['src_port'] = src_port if not direction: direction = "both" session_info['direction'] = direction.upper() return session_info def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port=None, direction=None): session_info = { "type" : "ERSPAN", "src_ip": src_ip, "dst_ip": dst_ip, "dscp": dscp, "ttl": ttl } if gre_type: session_info['gre_type'] = gre_type session_info = gather_session_info(session_info, policer, queue, src_port, direction) """ For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() if not namespaces['front_ns']: config_db = ConfigDBConnector() config_db.connect() if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: return config_db.set_entry("MIRROR_SESSION", session_name, session_info) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) per_npu_configdb[front_asic_namespaces].connect() if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False: return per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) @mirror_session.group(cls=clicommon.AbbreviationGroup, name='span') @click.pass_context def span(ctx): """ SPAN mirror session """ pass @span.command('add') @click.argument('session_name', metavar='<session_name>', required=True) @click.argument('dst_port', metavar='<dst_port>', required=True) @click.argument('src_port', metavar='[src_port]', required=False) @click.argument('direction', metavar='[direction]', required=False) @click.argument('queue', metavar='[queue]', required=False) @click.option('--policer') def add(session_name, dst_port, src_port, direction, queue, policer): """ Add SPAN mirror session """ add_span(session_name, dst_port, src_port, direction, queue, policer) def add_span(session_name, dst_port, src_port, direction, queue, policer): if clicommon.get_interface_naming_mode() == "alias": dst_port = interface_alias_to_name(None, dst_port) if dst_port is None: click.echo("Error: Destination Interface {} is invalid".format(dst_port)) return session_info = { "type" : "SPAN", "dst_port": dst_port, } session_info = gather_session_info(session_info, policer, queue, src_port, direction) """ For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() if not namespaces['front_ns']: config_db = ConfigDBConnector() config_db.connect() if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False: return config_db.set_entry("MIRROR_SESSION", session_name, session_info) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) per_npu_configdb[front_asic_namespaces].connect() if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False: return per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) @mirror_session.command() @click.argument('session_name', metavar='<session_name>', required=True) def remove(session_name): """ Delete mirror session """ """ For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() if not namespaces['front_ns']: config_db = ConfigDBConnector() config_db.connect() config_db.set_entry("MIRROR_SESSION", session_name, None) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) per_npu_configdb[front_asic_namespaces].connect() per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None) # # 'pfcwd' group ('config pfcwd ...') # @config.group(cls=clicommon.AbbreviationGroup) def pfcwd(): """Configure pfc watchdog """ pass @pfcwd.command() @click.option('--action', '-a', type=click.Choice(['drop', 'forward', 'alert'])) @click.option('--restoration-time', '-r', type=click.IntRange(100, 60000)) @click.option('--verbose', is_flag=True, help="Enable verbose output") @click.argument('ports', nargs=-1) @click.argument('detection-time', type=click.IntRange(100, 5000)) def start(action, restoration_time, ports, detection_time, verbose): """ Start PFC watchdog on port(s). To config all ports, use all as input. Example: config pfcwd start --action drop ports all detection-time 400 --restoration-time 400 """ cmd = "pfcwd start" if action: cmd += " --action {}".format(action) if ports: ports = set(ports) - set(['ports', 'detection-time']) cmd += " {}".format(' '.join(ports)) if detection_time: cmd += " {}".format(detection_time) if restoration_time: cmd += " --restoration-time {}".format(restoration_time) clicommon.run_command(cmd, display_cmd=verbose) @pfcwd.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") def stop(verbose): """ Stop PFC watchdog """ cmd = "pfcwd stop" clicommon.run_command(cmd, display_cmd=verbose) @pfcwd.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") @click.argument('poll_interval', type=click.IntRange(100, 3000)) def interval(poll_interval, verbose): """ Set PFC watchdog counter polling interval (ms) """ cmd = "pfcwd interval {}".format(poll_interval) clicommon.run_command(cmd, display_cmd=verbose) @pfcwd.command('counter_poll') @click.option('--verbose', is_flag=True, help="Enable verbose output") @click.argument('counter_poll', type=click.Choice(['enable', 'disable'])) def counter_poll(counter_poll, verbose): """ Enable/disable counter polling """ cmd = "pfcwd counter_poll {}".format(counter_poll) clicommon.run_command(cmd, display_cmd=verbose) @pfcwd.command('big_red_switch') @click.option('--verbose', is_flag=True, help="Enable verbose output") @click.argument('big_red_switch', type=click.Choice(['enable', 'disable'])) def big_red_switch(big_red_switch, verbose): """ Enable/disable BIG_RED_SWITCH mode """ cmd = "pfcwd big_red_switch {}".format(big_red_switch) clicommon.run_command(cmd, display_cmd=verbose) @pfcwd.command('start_default') @click.option('--verbose', is_flag=True, help="Enable verbose output") def start_default(verbose): """ Start PFC WD by default configurations """ cmd = "pfcwd start_default" clicommon.run_command(cmd, display_cmd=verbose) # # 'qos' group ('config qos ...') # @config.group(cls=clicommon.AbbreviationGroup) @click.pass_context def qos(ctx): """QoS-related configuration tasks""" pass @qos.command('clear') def clear(): """Clear QoS configuration""" log.log_info("'qos clear' executing...") _clear_qos() def _update_buffer_calculation_model(config_db, model): """Update the buffer calculation model into CONFIG_DB""" buffer_model_changed = False device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost') if device_metadata.get('buffer_model') != model: buffer_model_changed = True device_metadata['buffer_model'] = model config_db.set_entry('DEVICE_METADATA', 'localhost', device_metadata) return buffer_model_changed @qos.command('reload') @click.pass_context @click.option('--no-dynamic-buffer', is_flag=True, help="Disable dynamic buffer calculation") @click.option( '--json-data', type=click.STRING, help="json string with additional data, valid with --dry-run option" ) @click.option( '--dry_run', type=click.STRING, help="Dry run, writes config to the given file" ) def reload(ctx, no_dynamic_buffer, dry_run, json_data): """Reload QoS configuration""" log.log_info("'qos reload' executing...") _clear_qos() _, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs() sonic_version_file = device_info.get_sonic_version_file() from_db = "-d --write-to-db" if dry_run: from_db = "--additional-data \'{}\'".format(json_data) if json_data else "" namespace_list = [DEFAULT_NAMESPACE] if multi_asic.get_num_asics() > 1: namespace_list = multi_asic.get_namespaces_from_linux() buffer_model_updated = False vendors_supporting_dynamic_buffer = ["mellanox"] for ns in namespace_list: if ns is DEFAULT_NAMESPACE: asic_id_suffix = "" config_db = ConfigDBConnector() else: asic_id = multi_asic.get_asic_id_from_name(ns) if asic_id is None: click.secho( "Command 'qos reload' failed with invalid namespace '{}'". format(ns), fg="yellow" ) raise click.Abort() asic_id_suffix = str(asic_id) config_db = ConfigDBConnector( use_unix_socket_path=True, namespace=ns ) config_db.connect() if not no_dynamic_buffer and asic_type in vendors_supporting_dynamic_buffer: buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers_dynamic.json.j2") buffer_model_updated |= _update_buffer_calculation_model(config_db, "dynamic") else: buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers.json.j2") if asic_type in vendors_supporting_dynamic_buffer: buffer_model_updated |= _update_buffer_calculation_model(config_db, "traditional") if os.path.isfile(buffer_template_file): qos_template_file = os.path.join( hwsku_path, asic_id_suffix, "qos.json.j2" ) if os.path.isfile(qos_template_file): cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns) fname = "{}{}".format(dry_run, asic_id_suffix) if dry_run else "config-db" command = "{} {} {} -t {},{} -t {},{} -y {}".format( SONIC_CFGGEN_PATH, cmd_ns, from_db, buffer_template_file, fname, qos_template_file, fname, sonic_version_file ) # Apply the configurations only when both buffer and qos # configuration files are present clicommon.run_command(command, display_cmd=True) else: click.secho("QoS definition template not found at {}".format( qos_template_file ), fg="yellow") else: click.secho("Buffer definition template not found at {}".format( buffer_template_file ), fg="yellow") if buffer_model_updated: print("Buffer calculation model updated, restarting swss is required to take effect") def is_dynamic_buffer_enabled(config_db): """Return whether the current system supports dynamic buffer calculation""" device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost') return 'dynamic' == device_metadata.get('buffer_model') # # 'warm_restart' group ('config warm_restart ...') # @config.group(cls=clicommon.AbbreviationGroup, name='warm_restart') @click.pass_context @click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection') def warm_restart(ctx, redis_unix_socket_path): """warm_restart-related configuration tasks""" kwargs = {} if redis_unix_socket_path: kwargs['unix_socket_path'] = redis_unix_socket_path config_db = ConfigDBConnector(**kwargs) config_db.connect(wait_for_init=False) # warm restart enable/disable config is put in stateDB, not persistent across cold reboot, not saved to config_DB.json file state_db = SonicV2Connector(host='127.0.0.1') state_db.connect(state_db.STATE_DB, False) TABLE_NAME_SEPARATOR = '|' prefix = 'WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR ctx.obj = {'db': config_db, 'state_db': state_db, 'prefix': prefix} @warm_restart.command('enable') @click.argument('module', metavar='<module>', default='system', required=False, type=click.Choice(["system", "swss", "bgp", "teamd"])) @click.pass_context def warm_restart_enable(ctx, module): state_db = ctx.obj['state_db'] prefix = ctx.obj['prefix'] _hash = '{}{}'.format(prefix, module) state_db.set(state_db.STATE_DB, _hash, 'enable', 'true') state_db.close(state_db.STATE_DB) @warm_restart.command('disable') @click.argument('module', metavar='<module>', default='system', required=False, type=click.Choice(["system", "swss", "bgp", "teamd"])) @click.pass_context def warm_restart_enable(ctx, module): state_db = ctx.obj['state_db'] prefix = ctx.obj['prefix'] _hash = '{}{}'.format(prefix, module) state_db.set(state_db.STATE_DB, _hash, 'enable', 'false') state_db.close(state_db.STATE_DB) @warm_restart.command('neighsyncd_timer') @click.argument('seconds', metavar='<seconds>', required=True, type=int) @click.pass_context def warm_restart_neighsyncd_timer(ctx, seconds): db = ctx.obj['db'] if seconds not in range(1, 9999): ctx.fail("neighsyncd warm restart timer must be in range 1-9999") db.mod_entry('WARM_RESTART', 'swss', {'neighsyncd_timer': seconds}) @warm_restart.command('bgp_timer') @click.argument('seconds', metavar='<seconds>', required=True, type=int) @click.pass_context def warm_restart_bgp_timer(ctx, seconds): db = ctx.obj['db'] if seconds not in range(1, 3600): ctx.fail("bgp warm restart timer must be in range 1-3600") db.mod_entry('WARM_RESTART', 'bgp', {'bgp_timer': seconds}) @warm_restart.command('teamsyncd_timer') @click.argument('seconds', metavar='<seconds>', required=True, type=int) @click.pass_context def warm_restart_teamsyncd_timer(ctx, seconds): db = ctx.obj['db'] if seconds not in range(1, 3600): ctx.fail("teamsyncd warm restart timer must be in range 1-3600") db.mod_entry('WARM_RESTART', 'teamd', {'teamsyncd_timer': seconds}) @warm_restart.command('bgp_eoiu') @click.argument('enable', metavar='<enable>', default='true', required=False, type=click.Choice(["true", "false"])) @click.pass_context def warm_restart_bgp_eoiu(ctx, enable): db = ctx.obj['db'] db.mod_entry('WARM_RESTART', 'bgp', {'bgp_eoiu': enable}) def mvrf_restart_services(): """Restart interfaces-config service and NTP service when mvrf is changed""" """ When mvrf is enabled, eth0 should be moved to mvrf; when it is disabled, move it back to default vrf. Restarting the "interfaces-config" service will recreate the /etc/network/interfaces file and restart the "networking" service that takes care of the eth0 movement. NTP service should also be restarted to rerun the NTP service with or without "cgexec" accordingly. """ cmd="service ntp stop" os.system (cmd) cmd="systemctl restart interfaces-config" os.system (cmd) cmd="service ntp start" os.system (cmd) def vrf_add_management_vrf(config_db): """Enable management vrf in config DB""" entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global") if entry and entry['mgmtVrfEnabled'] == 'true' : click.echo("ManagementVRF is already Enabled.") return None config_db.mod_entry('MGMT_VRF_CONFIG', "vrf_global", {"mgmtVrfEnabled": "true"}) mvrf_restart_services() """ The regular expression for grep in below cmd is to match eth0 line in /proc/net/route, sample file: $ cat /proc/net/route Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT eth0 00000000 01803B0A 0003 0 0 202 00000000 0 0 0 """ cmd = "cat /proc/net/route | grep -E \"eth0\s+00000000\s+[0-9A-Z]+\s+[0-9]+\s+[0-9]+\s+[0-9]+\s+202\" | wc -l" proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) output = proc.communicate() if int(output[0]) >= 1: cmd="ip -4 route del default dev eth0 metric 202" proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) proc.communicate() if proc.returncode != 0: click.echo("Could not delete eth0 route") def vrf_delete_management_vrf(config_db): """Disable management vrf in config DB""" entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global") if not entry or entry['mgmtVrfEnabled'] == 'false' : click.echo("ManagementVRF is already Disabled.") return None config_db.mod_entry('MGMT_VRF_CONFIG', "vrf_global", {"mgmtVrfEnabled": "false"}) mvrf_restart_services() @config.group(cls=clicommon.AbbreviationGroup) @click.pass_context def snmpagentaddress(ctx): """SNMP agent listening IP address, port, vrf configuration""" config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} ip_family = {4: AF_INET, 6: AF_INET6} @snmpagentaddress.command('add') @click.argument('agentip', metavar='<SNMP AGENT LISTENING IP Address>', required=True) @click.option('-p', '--port', help="SNMP AGENT LISTENING PORT") @click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None") @click.pass_context def add_snmp_agent_address(ctx, agentip, port, vrf): """Add the SNMP agent listening IP:Port%Vrf configuration""" #Construct SNMP_AGENT_ADDRESS_CONFIG table key in the format ip|<port>|<vrf> if not clicommon.is_ipaddress(agentip): click.echo("Invalid IP address") return False config_db = ctx.obj['db'] if not vrf: entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global") if entry and entry['mgmtVrfEnabled'] == 'true' : click.echo("ManagementVRF is Enabled. Provide vrf.") return False found = 0 ip = ipaddress.ip_address(agentip) for intf in netifaces.interfaces(): ipaddresses = netifaces.ifaddresses(intf) if ip_family[ip.version] in ipaddresses: for ipaddr in ipaddresses[ip_family[ip.version]]: if agentip == ipaddr['addr']: found = 1 break; if found == 1: break; else: click.echo("IP addfress is not available") return key = agentip+'|' if port: key = key+port #snmpd does not start if we have two entries with same ip and port. key1 = "SNMP_AGENT_ADDRESS_CONFIG|" + key + '*' entry = config_db.get_keys(key1) if entry: ip_port = agentip + ":" + port click.echo("entry with {} already exists ".format(ip_port)) return key = key+'|' if vrf: key = key+vrf config_db.set_entry('SNMP_AGENT_ADDRESS_CONFIG', key, {}) #Restarting the SNMP service will regenerate snmpd.conf and rerun snmpd cmd="systemctl restart snmp" os.system (cmd) @snmpagentaddress.command('del') @click.argument('agentip', metavar='<SNMP AGENT LISTENING IP Address>', required=True) @click.option('-p', '--port', help="SNMP AGENT LISTENING PORT") @click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None") @click.pass_context def del_snmp_agent_address(ctx, agentip, port, vrf): """Delete the SNMP agent listening IP:Port%Vrf configuration""" key = agentip+'|' if port: key = key+port key = key+'|' if vrf: key = key+vrf config_db = ctx.obj['db'] config_db.set_entry('SNMP_AGENT_ADDRESS_CONFIG', key, None) cmd="systemctl restart snmp" os.system (cmd) @config.group(cls=clicommon.AbbreviationGroup) @click.pass_context def snmptrap(ctx): """SNMP Trap server configuration to send traps""" config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} @snmptrap.command('modify') @click.argument('ver', metavar='<SNMP Version>', type=click.Choice(['1', '2', '3']), required=True) @click.argument('serverip', metavar='<SNMP TRAP SERVER IP Address>', required=True) @click.option('-p', '--port', help="SNMP Trap Server port, default 162", default="162") @click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None", default="None") @click.option('-c', '--comm', help="Community", default="public") @click.pass_context def modify_snmptrap_server(ctx, ver, serverip, port, vrf, comm): """Modify the SNMP Trap server configuration""" #SNMP_TRAP_CONFIG for each SNMP version config_db = ctx.obj['db'] if ver == "1": #By default, v1TrapDest value in snmp.yml is "NotConfigured". Modify it. config_db.mod_entry('SNMP_TRAP_CONFIG', "v1TrapDest", {"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm}) elif ver == "2": config_db.mod_entry('SNMP_TRAP_CONFIG', "v2TrapDest", {"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm}) else: config_db.mod_entry('SNMP_TRAP_CONFIG', "v3TrapDest", {"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm}) cmd="systemctl restart snmp" os.system (cmd) @snmptrap.command('del') @click.argument('ver', metavar='<SNMP Version>', type=click.Choice(['1', '2', '3']), required=True) @click.pass_context def delete_snmptrap_server(ctx, ver): """Delete the SNMP Trap server configuration""" config_db = ctx.obj['db'] if ver == "1": config_db.mod_entry('SNMP_TRAP_CONFIG', "v1TrapDest", None) elif ver == "2": config_db.mod_entry('SNMP_TRAP_CONFIG', "v2TrapDest", None) else: config_db.mod_entry('SNMP_TRAP_CONFIG', "v3TrapDest", None) cmd="systemctl restart snmp" os.system (cmd) # # 'bgp' group ('config bgp ...') # @config.group(cls=clicommon.AbbreviationGroup) def bgp(): """BGP-related configuration tasks""" pass # # 'shutdown' subgroup ('config bgp shutdown ...') # @bgp.group(cls=clicommon.AbbreviationGroup) def shutdown(): """Shut down BGP session(s)""" pass # 'all' subcommand @shutdown.command() @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def all(verbose): """Shut down all BGP sessions In the case of Multi-Asic platform, we shut only the EBGP sessions with external neighbors. """ log.log_info("'bgp shutdown all' executing...") namespaces = [DEFAULT_NAMESPACE] if multi_asic.is_multi_asic(): ns_list = multi_asic.get_all_namespaces() namespaces = ns_list['front_ns'] # Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the # namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s) for namespace in namespaces: config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses(config_db) for ipaddress in bgp_neighbor_ip_list: _change_bgp_session_status_by_addr(config_db, ipaddress, 'down', verbose) # 'neighbor' subcommand @shutdown.command() @click.argument('ipaddr_or_hostname', metavar='<ipaddr_or_hostname>', required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def neighbor(ipaddr_or_hostname, verbose): """Shut down BGP session by neighbor IP address or hostname. User can specify either internal or external BGP neighbor to shutdown """ log.log_info("'bgp shutdown neighbor {}' executing...".format(ipaddr_or_hostname)) namespaces = [DEFAULT_NAMESPACE] found_neighbor = False if multi_asic.is_multi_asic(): ns_list = multi_asic.get_all_namespaces() namespaces = ns_list['front_ns'] + ns_list['back_ns'] # Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the # namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s) for namespace in namespaces: config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() if _change_bgp_session_status(config_db, ipaddr_or_hostname, 'down', verbose): found_neighbor = True if not found_neighbor: click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname)) @bgp.group(cls=clicommon.AbbreviationGroup) def startup(): """Start up BGP session(s)""" pass # 'all' subcommand @startup.command() @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def all(verbose): """Start up all BGP sessions In the case of Multi-Asic platform, we startup only the EBGP sessions with external neighbors. """ log.log_info("'bgp startup all' executing...") namespaces = [DEFAULT_NAMESPACE] if multi_asic.is_multi_asic(): ns_list = multi_asic.get_all_namespaces() namespaces = ns_list['front_ns'] # Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the # namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s) for namespace in namespaces: config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses(config_db) for ipaddress in bgp_neighbor_ip_list: _change_bgp_session_status_by_addr(config_db, ipaddress, 'up', verbose) # 'neighbor' subcommand @startup.command() @click.argument('ipaddr_or_hostname', metavar='<ipaddr_or_hostname>', required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def neighbor(ipaddr_or_hostname, verbose): log.log_info("'bgp startup neighbor {}' executing...".format(ipaddr_or_hostname)) """Start up BGP session by neighbor IP address or hostname. User can specify either internal or external BGP neighbor to startup """ namespaces = [DEFAULT_NAMESPACE] found_neighbor = False if multi_asic.is_multi_asic(): ns_list = multi_asic.get_all_namespaces() namespaces = ns_list['front_ns'] + ns_list['back_ns'] # Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the # namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s) for namespace in namespaces: config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() if _change_bgp_session_status(config_db, ipaddr_or_hostname, 'up', verbose): found_neighbor = True if not found_neighbor: click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname)) # # 'remove' subgroup ('config bgp remove ...') # @bgp.group(cls=clicommon.AbbreviationGroup) def remove(): "Remove BGP neighbor configuration from the device" pass @remove.command('neighbor') @click.argument('neighbor_ip_or_hostname', metavar='<neighbor_ip_or_hostname>', required=True) def remove_neighbor(neighbor_ip_or_hostname): """Deletes BGP neighbor configuration of given hostname or ip from devices User can specify either internal or external BGP neighbor to remove """ namespaces = [DEFAULT_NAMESPACE] removed_neighbor = False if multi_asic.is_multi_asic(): ns_list = multi_asic.get_all_namespaces() namespaces = ns_list['front_ns'] + ns_list['back_ns'] # Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the # namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s) for namespace in namespaces: config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() if _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname): removed_neighbor = True if not removed_neighbor: click.get_current_context().fail("Could not locate neighbor '{}'".format(neighbor_ip_or_hostname)) # # 'interface' group ('config interface ...') # @config.group(cls=clicommon.AbbreviationGroup) # TODO add "hidden=True if this is a single ASIC platform, once we have click 7.0 in all branches. @click.option('-n', '--namespace', help='Namespace name', required=True if multi_asic.is_multi_asic() else False, type=click.Choice(multi_asic.get_namespace_list())) @click.pass_context def interface(ctx, namespace): """Interface-related configuration tasks""" # Set namespace to default_namespace if it is None. if namespace is None: namespace = DEFAULT_NAMESPACE config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=str(namespace)) config_db.connect() ctx.obj = {'config_db': config_db, 'namespace': str(namespace)} # # 'startup' subcommand # @interface.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.pass_context def startup(ctx, interface_name): """Start up interface""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") intf_fs = parse_interface_in_filter(interface_name) if len(intf_fs) > 1 and multi_asic.is_multi_asic(): ctx.fail("Interface range not supported in multi-asic platforms !!") if len(intf_fs) == 1 and interface_name_is_valid(config_db, interface_name) is False: ctx.fail("Interface name is invalid. Please enter a valid interface name!!") log.log_info("'interface startup {}' executing...".format(interface_name)) port_dict = config_db.get_table('PORT') for port_name in port_dict: if port_name in intf_fs: config_db.mod_entry("PORT", port_name, {"admin_status": "up"}) portchannel_list = config_db.get_table("PORTCHANNEL") for po_name in portchannel_list: if po_name in intf_fs: config_db.mod_entry("PORTCHANNEL", po_name, {"admin_status": "up"}) subport_list = config_db.get_table("VLAN_SUB_INTERFACE") for sp_name in subport_list: if sp_name in intf_fs: config_db.mod_entry("VLAN_SUB_INTERFACE", sp_name, {"admin_status": "up"}) # # 'shutdown' subcommand # @interface.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.pass_context def shutdown(ctx, interface_name): """Shut down interface""" log.log_info("'interface shutdown {}' executing...".format(interface_name)) # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") intf_fs = parse_interface_in_filter(interface_name) if len(intf_fs) > 1 and multi_asic.is_multi_asic(): ctx.fail("Interface range not supported in multi-asic platforms !!") if len(intf_fs) == 1 and interface_name_is_valid(config_db, interface_name) is False: ctx.fail("Interface name is invalid. Please enter a valid interface name!!") port_dict = config_db.get_table('PORT') for port_name in port_dict: if port_name in intf_fs: config_db.mod_entry("PORT", port_name, {"admin_status": "down"}) portchannel_list = config_db.get_table("PORTCHANNEL") for po_name in portchannel_list: if po_name in intf_fs: config_db.mod_entry("PORTCHANNEL", po_name, {"admin_status": "down"}) subport_list = config_db.get_table("VLAN_SUB_INTERFACE") for sp_name in subport_list: if sp_name in intf_fs: config_db.mod_entry("VLAN_SUB_INTERFACE", sp_name, {"admin_status": "down"}) # # 'speed' subcommand # @interface.command() @click.pass_context @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('interface_speed', metavar='<interface_speed>', required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def speed(ctx, interface_name, interface_speed, verbose): """Set interface speed""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") log.log_info("'interface speed {} {}' executing...".format(interface_name, interface_speed)) if ctx.obj['namespace'] is DEFAULT_NAMESPACE: command = "portconfig -p {} -s {}".format(interface_name, interface_speed) else: command = "portconfig -p {} -s {} -n {}".format(interface_name, interface_speed, ctx.obj['namespace']) if verbose: command += " -vv" clicommon.run_command(command, display_cmd=verbose) # # 'breakout' subcommand # @interface.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('mode', required=True, type=click.STRING, autocompletion=_get_breakout_options) @click.option('-f', '--force-remove-dependencies', is_flag=True, help='Clear all dependencies internally first.') @click.option('-l', '--load-predefined-config', is_flag=True, help='load predefied user configuration (alias, lanes, speed etc) first.') @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Do you want to Breakout the port, continue?') @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") @click.pass_context def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load_predefined_config): """ Set interface breakout mode """ breakout_cfg_file = device_info.get_path_to_port_config_file() if not os.path.isfile(breakout_cfg_file) or not breakout_cfg_file.endswith('.json'): click.secho("[ERROR] Breakout feature is not available without platform.json file", fg='red') raise click.Abort() # Get the config_db connector config_db = ctx.obj['config_db'] target_brkout_mode = mode # Get current breakout mode cur_brkout_dict = config_db.get_table('BREAKOUT_CFG') if len(cur_brkout_dict) == 0: click.secho("[ERROR] BREAKOUT_CFG table is NOT present in CONFIG DB", fg='red') raise click.Abort() if interface_name not in cur_brkout_dict.keys(): click.secho("[ERROR] {} interface is NOT present in BREAKOUT_CFG table of CONFIG DB".format(interface_name), fg='red') raise click.Abort() cur_brkout_mode = cur_brkout_dict[interface_name]["brkout_mode"] # Validate Interface and Breakout mode if not _validate_interface_mode(ctx, breakout_cfg_file, interface_name, mode, cur_brkout_mode): raise click.Abort() """ Interface Deletion Logic """ # Get list of interfaces to be deleted del_ports = get_child_ports(interface_name, cur_brkout_mode, breakout_cfg_file) del_intf_dict = {intf: del_ports[intf]["speed"] for intf in del_ports} if del_intf_dict: """ shut down all the interface before deletion """ ret = shutdown_interfaces(ctx, del_intf_dict) if not ret: raise click.Abort() click.echo("\nPorts to be deleted : \n {}".format(json.dumps(del_intf_dict, indent=4))) else: click.secho("[ERROR] del_intf_dict is None! No interfaces are there to be deleted", fg='red') raise click.Abort() """ Interface Addition Logic """ # Get list of interfaces to be added add_ports = get_child_ports(interface_name, target_brkout_mode, breakout_cfg_file) add_intf_dict = {intf: add_ports[intf]["speed"] for intf in add_ports} if add_intf_dict: click.echo("Ports to be added : \n {}".format(json.dumps(add_intf_dict, indent=4))) else: click.secho("[ERROR] port_dict is None!", fg='red') raise click.Abort() """ Special Case: Dont delete those ports where the current mode and speed of the parent port remains unchanged to limit the traffic impact """ click.secho("\nAfter running Logic to limit the impact", fg="cyan", underline=True) matched_items = [intf for intf in del_intf_dict if intf in add_intf_dict and del_intf_dict[intf] == add_intf_dict[intf]] # Remove the interface which remains unchanged from both del_intf_dict and add_intf_dict for item in matched_items: del_intf_dict.pop(item) add_intf_dict.pop(item) click.secho("\nFinal list of ports to be deleted : \n {} \nFinal list of ports to be added : \n {}".format(json.dumps(del_intf_dict, indent=4), json.dumps(add_intf_dict, indent=4), fg='green', blink=True)) if not add_intf_dict: click.secho("[ERROR] add_intf_dict is None or empty! No interfaces are there to be added", fg='red') raise click.Abort() port_dict = {} for intf in add_intf_dict: if intf in add_ports: port_dict[intf] = add_ports[intf] # writing JSON object with open('new_port_config.json', 'w') as f: json.dump(port_dict, f, indent=4) # Start Interation with Dy Port BreakOut Config Mgmt try: """ Load config for the commands which are capable of change in config DB """ cm = load_ConfigMgmt(verbose) """ Delete all ports if forced else print dependencies using ConfigMgmt API """ final_delPorts = [intf for intf in del_intf_dict] """ Warn user if tables without yang models exist and have final_delPorts """ breakout_warnUser_extraTables(cm, final_delPorts, confirm=True) # Create a dictionary containing all the added ports with its capabilities like alias, lanes, speed etc. portJson = dict(); portJson['PORT'] = port_dict # breakout_Ports will abort operation on failure, So no need to check return breakout_Ports(cm, delPorts=final_delPorts, portJson=portJson, force=force_remove_dependencies, loadDefConfig=load_predefined_config, verbose=verbose) # Set Current Breakout mode in config DB brkout_cfg_keys = config_db.get_keys('BREAKOUT_CFG') if interface_name not in brkout_cfg_keys: click.secho("[ERROR] {} is not present in 'BREAKOUT_CFG' Table!".format(interface_name), fg='red') raise click.Abort() config_db.set_entry("BREAKOUT_CFG", interface_name, {'brkout_mode': target_brkout_mode}) click.secho("Breakout process got successfully completed." .format(interface_name), fg="cyan", underline=True) click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.") except Exception as e: click.secho("Failed to break out Port. Error: {}".format(str(e)), fg='magenta') sys.exit(0) def _get_all_mgmtinterface_keys(): """Returns list of strings containing mgmt interface keys """ config_db = ConfigDBConnector() config_db.connect() return list(config_db.get_table('MGMT_INTERFACE').keys()) def mgmt_ip_restart_services(): """Restart the required services when mgmt inteface IP address is changed""" """ Whenever the eth0 IP address is changed, restart the "interfaces-config" service which regenerates the /etc/network/interfaces file and restarts the networking service to make the new/null IP address effective for eth0. "ntp-config" service should also be restarted based on the new eth0 IP address since the ntp.conf (generated from ntp.conf.j2) is made to listen on that particular eth0 IP address or reset it back. """ cmd="systemctl restart interfaces-config" os.system (cmd) cmd="systemctl restart ntp-config" os.system (cmd) # # 'mtu' subcommand # @interface.command() @click.pass_context @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('interface_mtu', metavar='<interface_mtu>', required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def mtu(ctx, interface_name, interface_mtu, verbose): """Set interface mtu""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') if interface_is_in_portchannel(portchannel_member_table, interface_name): ctx.fail("'interface_name' is in portchannel!") if ctx.obj['namespace'] is DEFAULT_NAMESPACE: command = "portconfig -p {} -m {}".format(interface_name, interface_mtu) else: command = "portconfig -p {} -m {} -n {}".format(interface_name, interface_mtu, ctx.obj['namespace']) if verbose: command += " -vv" clicommon.run_command(command, display_cmd=verbose) @interface.command() @click.pass_context @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('interface_fec', metavar='<interface_fec>', required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def fec(ctx, interface_name, interface_fec, verbose): """Set interface fec""" # Get the config_db connector config_db = ctx.obj['config_db'] if interface_fec not in ["rs", "fc", "none"]: ctx.fail("'fec not in ['rs', 'fc', 'none']!") if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") if ctx.obj['namespace'] is DEFAULT_NAMESPACE: command = "portconfig -p {} -f {}".format(interface_name, interface_fec) else: command = "portconfig -p {} -f {} -n {}".format(interface_name, interface_fec, ctx.obj['namespace']) if verbose: command += " -vv" clicommon.run_command(command, display_cmd=verbose) # # 'ip' subgroup ('config interface ip ...') # @interface.group(cls=clicommon.AbbreviationGroup) @click.pass_context def ip(ctx): """Add or remove IP address""" pass # # 'add' subcommand # @ip.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument("ip_addr", metavar="<ip_addr>", required=True) @click.argument('gw', metavar='<default gateway IP address>', required=False) @click.pass_context def add(ctx, interface_name, ip_addr, gw): """Add an IP address towards the interface""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") # Add a validation to check this interface is not a member in vlan before # changing it to a router port vlan_member_table = config_db.get_table('VLAN_MEMBER') if (interface_is_in_vlan(vlan_member_table, interface_name)): click.echo("Interface {} is a member of vlan\nAborting!".format(interface_name)) return try: net = ipaddress.ip_network(ip_addr, strict=False) if '/' not in ip_addr: ip_addr = str(net) if interface_name == 'eth0': # Configuring more than 1 IPv4 or more than 1 IPv6 address fails. # Allow only one IPv4 and only one IPv6 address to be configured for IPv6. # If a row already exist, overwrite it (by doing delete and add). mgmtintf_key_list = _get_all_mgmtinterface_keys() for key in mgmtintf_key_list: # For loop runs for max 2 rows, once for IPv4 and once for IPv6. # No need to capture the exception since the ip_addr is already validated earlier ip_input = ipaddress.ip_interface(ip_addr) current_ip = ipaddress.ip_interface(key[1]) if (ip_input.version == current_ip.version): # If user has configured IPv4/v6 address and the already available row is also IPv4/v6, delete it here. config_db.set_entry("MGMT_INTERFACE", ("eth0", key[1]), None) # Set the new row with new value if not gw: config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), {"NULL": "NULL"}) else: config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), {"gwaddr": gw}) mgmt_ip_restart_services() return table_name = get_interface_table_name(interface_name) if table_name == "": ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") interface_entry = config_db.get_entry(table_name, interface_name) if len(interface_entry) == 0: if table_name == "VLAN_SUB_INTERFACE": config_db.set_entry(table_name, interface_name, {"admin_status": "up"}) else: config_db.set_entry(table_name, interface_name, {"NULL": "NULL"}) config_db.set_entry(table_name, (interface_name, ip_addr), {"NULL": "NULL"}) except ValueError: ctx.fail("'ip_addr' is not valid.") # # 'del' subcommand # @ip.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument("ip_addr", metavar="<ip_addr>", required=True) @click.pass_context def remove(ctx, interface_name, ip_addr): """Remove an IP address from the interface""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") try: net = ipaddress.ip_network(ip_addr, strict=False) if '/' not in ip_addr: ip_addr = str(net) if interface_name == 'eth0': config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), None) mgmt_ip_restart_services() return table_name = get_interface_table_name(interface_name) if table_name == "": ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") config_db.set_entry(table_name, (interface_name, ip_addr), None) interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name) if len(interface_dependent) == 0 and is_interface_bind_to_vrf(config_db, interface_name) is False: config_db.set_entry(table_name, interface_name, None) if multi_asic.is_multi_asic(): command = "sudo ip netns exec {} ip neigh flush dev {} {}".format(ctx.obj['namespace'], interface_name, ip_addr) else: command = "ip neigh flush dev {} {}".format(interface_name, ip_addr) clicommon.run_command(command) except ValueError: ctx.fail("'ip_addr' is not valid.") # # buffer commands and utilities # def pgmaps_check_legality(ctx, interface_name, input_pg, is_new_pg): """ Tool function to check whether input_pg is legal. Three checking performed: 1. Whether the input_pg is legal: pgs are in range [0-7] 2. Whether the input_pg overlaps an existing pg in the port """ config_db = ctx.obj["config_db"] try: lower = int(input_pg[0]) upper = int(input_pg[-1]) if upper < lower or lower < 0 or upper > 7: ctx.fail("PG {} is not valid.".format(input_pg)) except Exception: ctx.fail("PG {} is not valid.".format(input_pg)) # Check overlapping. # To configure a new PG which is overlapping an existing one is not allowed # For example, to add '5-6' while '3-5' existing is illegal existing_pgs = config_db.get_table("BUFFER_PG") if not is_new_pg: if not (interface_name, input_pg) in existing_pgs.keys(): ctx.fail("PG {} doesn't exist".format(input_pg)) return for k, v in existing_pgs.items(): port, existing_pg = k if port == interface_name: existing_lower = int(existing_pg[0]) existing_upper = int(existing_pg[-1]) if existing_upper < lower or existing_lower > upper: # new and existing pgs disjoint, legal pass else: ctx.fail("PG {} overlaps with existing PG {}".format(input_pg, existing_pg)) def update_pg(ctx, interface_name, pg_map, override_profile, add = True): config_db = ctx.obj["config_db"] # Check whether port is legal ports = config_db.get_entry("PORT", interface_name) if not ports: ctx.fail("Port {} doesn't exist".format(interface_name)) # Check whether pg_map is legal # Check whether there is other lossless profiles configured on the interface pgmaps_check_legality(ctx, interface_name, pg_map, add) # All checking passed if override_profile: profile_dict = config_db.get_entry("BUFFER_PROFILE", override_profile) if not profile_dict: ctx.fail("Profile {} doesn't exist".format(override_profile)) if not 'xoff' in profile_dict.keys() and 'size' in profile_dict.keys(): ctx.fail("Profile {} doesn't exist or isn't a lossless profile".format(override_profile)) profile_full_name = "[BUFFER_PROFILE|{}]".format(override_profile) config_db.set_entry("BUFFER_PG", (interface_name, pg_map), {"profile": profile_full_name}) else: config_db.set_entry("BUFFER_PG", (interface_name, pg_map), {"profile": "NULL"}) adjust_pfc_enable(ctx, interface_name, pg_map, True) def remove_pg_on_port(ctx, interface_name, pg_map): config_db = ctx.obj["config_db"] # Check whether port is legal ports = config_db.get_entry("PORT", interface_name) if not ports: ctx.fail("Port {} doesn't exist".format(interface_name)) # Remvoe all dynamic lossless PGs on the port existing_pgs = config_db.get_table("BUFFER_PG") removed = False for k, v in existing_pgs.items(): port, existing_pg = k if port == interface_name and (not pg_map or pg_map == existing_pg): need_to_remove = False referenced_profile = v.get('profile') if referenced_profile and referenced_profile == '[BUFFER_PROFILE|ingress_lossy_profile]': if pg_map: ctx.fail("Lossy PG {} can't be removed".format(pg_map)) else: continue config_db.set_entry("BUFFER_PG", (interface_name, existing_pg), None) adjust_pfc_enable(ctx, interface_name, pg_map, False) removed = True if not removed: if pg_map: ctx.fail("No specified PG {} found on port {}".format(pg_map, interface_name)) else: ctx.fail("No lossless PG found on port {}".format(interface_name)) def adjust_pfc_enable(ctx, interface_name, pg_map, add): config_db = ctx.obj["config_db"] # Fetch the original pfc_enable qosmap = config_db.get_entry("PORT_QOS_MAP", interface_name) pfc_enable = qosmap.get("pfc_enable") pfc_set = set() if pfc_enable: for priority in pfc_enable.split(","): pfc_set.add(int(priority)) if pg_map: lower_bound = int(pg_map[0]) upper_bound = int(pg_map[-1]) for priority in range(lower_bound, upper_bound + 1): if add: pfc_set.add(priority) elif priority in pfc_set: pfc_set.remove(priority) empty_set = set() pfc_enable = "" if not pfc_set.issubset(empty_set): for priority in pfc_set: pfc_enable += str(priority) + "," elif not add: # Remove all pfc_enable = "" else: ctx.fail("Try to add empty priorities") qosmap["pfc_enable"] = pfc_enable[:-1] config_db.set_entry("PORT_QOS_MAP", interface_name, qosmap) # # 'buffer' subgroup ('config interface buffer ...') # @interface.group(cls=clicommon.AbbreviationGroup) @click.pass_context def buffer(ctx): """Set or clear buffer configuration""" config_db = ctx.obj["config_db"] if not is_dynamic_buffer_enabled(config_db): ctx.fail("This command can only be executed on a system with dynamic buffer enabled") # # 'priority_group' subgroup ('config interface buffer priority_group ...') # @buffer.group(cls=clicommon.AbbreviationGroup) @click.pass_context def priority_group(ctx): """Set or clear buffer configuration""" pass # # 'lossless' subgroup ('config interface buffer priority_group lossless ...') # @priority_group.group(cls=clicommon.AbbreviationGroup) @click.pass_context def lossless(ctx): """Set or clear lossless PGs""" pass # # 'add' subcommand # @lossless.command('add') @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('pg_map', metavar='<pg_map>', required=True) @click.argument('override_profile', metavar='<override_profile>', required=False) @click.pass_context def add_pg(ctx, interface_name, pg_map, override_profile): """Set lossless PGs for the interface""" update_pg(ctx, interface_name, pg_map, override_profile) # # 'set' subcommand # @lossless.command('set') @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('pg_map', metavar='<pg_map>', required=True) @click.argument('override_profile', metavar='<override_profile>', required=False) @click.pass_context def set_pg(ctx, interface_name, pg_map, override_profile): """Set lossless PGs for the interface""" update_pg(ctx, interface_name, pg_map, override_profile, False) # # 'remove' subcommand # @lossless.command('remove') @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('pg_map', metavar='<pg_map', required=False) @click.pass_context def remove_pg(ctx, interface_name, pg_map): """Clear lossless PGs for the interface""" remove_pg_on_port(ctx, interface_name, pg_map) # # 'cable_length' subcommand # @interface.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('length', metavar='<length>', required=True) @click.pass_context def cable_length(ctx, interface_name, length): """Set lossless PGs for the interface""" config_db = ctx.obj["config_db"] if not is_dynamic_buffer_enabled(config_db): ctx.fail("This command can only be supported on a system with dynamic buffer enabled") # Check whether port is legal ports = config_db.get_entry("PORT", interface_name) if not ports: ctx.fail("Port {} doesn't exist".format(interface_name)) try: assert "m" == length[-1] except Exception: ctx.fail("Invalid cable length. Should be in format <num>m, like 300m".format(cable_length)) keys = config_db.get_keys("CABLE_LENGTH") cable_length_set = {} cable_length_set[interface_name] = length config_db.mod_entry("CABLE_LENGTH", keys[0], cable_length_set) # # 'transceiver' subgroup ('config interface transceiver ...') # @interface.group(cls=clicommon.AbbreviationGroup) @click.pass_context def transceiver(ctx): """SFP transceiver configuration""" pass # # 'lpmode' subcommand ('config interface transceiver lpmode ...') # @transceiver.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('state', metavar='(enable|disable)', type=click.Choice(['enable', 'disable'])) @click.pass_context def lpmode(ctx, interface_name, state): """Enable/disable low-power mode for SFP transceiver module""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") if interface_name_is_valid(config_db, interface_name) is False: ctx.fail("Interface name is invalid. Please enter a valid interface name!!") cmd = "sudo sfputil lpmode {} {}".format("on" if state == "enable" else "off", interface_name) clicommon.run_command(cmd) # # 'reset' subcommand ('config interface reset ...') # @transceiver.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.pass_context def reset(ctx, interface_name): """Reset SFP transceiver module""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") if interface_name_is_valid(config_db, interface_name) is False: ctx.fail("Interface name is invalid. Please enter a valid interface name!!") cmd = "sudo sfputil reset {}".format(interface_name) clicommon.run_command(cmd) # # 'vrf' subgroup ('config interface vrf ...') # @interface.group(cls=clicommon.AbbreviationGroup) @click.pass_context def vrf(ctx): """Bind or unbind VRF""" pass # # 'bind' subcommand # @vrf.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('vrf_name', metavar='<vrf_name>', required=True) @click.pass_context def bind(ctx, interface_name, vrf_name): """Bind the interface to VRF""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") table_name = get_interface_table_name(interface_name) if table_name == "": ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") if is_interface_bind_to_vrf(config_db, interface_name) is True and \ config_db.get_entry(table_name, interface_name).get('vrf_name') == vrf_name: return # Clean ip addresses if interface configured interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name) for interface_del in interface_dependent: config_db.set_entry(table_name, interface_del, None) config_db.set_entry(table_name, interface_name, None) # When config_db del entry and then add entry with same key, the DEL will lost. if ctx.obj['namespace'] is DEFAULT_NAMESPACE: state_db = SonicV2Connector(use_unix_socket_path=True) else: state_db = SonicV2Connector(use_unix_socket_path=True, namespace=ctx.obj['namespace']) state_db.connect(state_db.STATE_DB, False) _hash = '{}{}'.format('INTERFACE_TABLE|', interface_name) while state_db.get_all(state_db.STATE_DB, _hash) != None: time.sleep(0.01) state_db.close(state_db.STATE_DB) config_db.set_entry(table_name, interface_name, {"vrf_name": vrf_name}) # # 'unbind' subcommand # @vrf.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.pass_context def unbind(ctx, interface_name): """Unbind the interface to VRF""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("interface is None!") table_name = get_interface_table_name(interface_name) if table_name == "": ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") if is_interface_bind_to_vrf(config_db, interface_name) is False: return interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name) for interface_del in interface_dependent: config_db.set_entry(table_name, interface_del, None) config_db.set_entry(table_name, interface_name, None) # # 'vrf' group ('config vrf ...') # @config.group(cls=clicommon.AbbreviationGroup, name='vrf') @click.pass_context def vrf(ctx): """VRF-related configuration tasks""" config_db = ConfigDBConnector() config_db.connect() ctx.obj = {} ctx.obj['config_db'] = config_db @vrf.command('add') @click.argument('vrf_name', metavar='<vrf_name>', required=True) @click.pass_context def add_vrf(ctx, vrf_name): """Add vrf""" config_db = ctx.obj['config_db'] if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'): ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!") if len(vrf_name) > 15: ctx.fail("'vrf_name' is too long!") if (vrf_name == 'mgmt' or vrf_name == 'management'): vrf_add_management_vrf(config_db) else: config_db.set_entry('VRF', vrf_name, {"NULL": "NULL"}) @vrf.command('del') @click.argument('vrf_name', metavar='<vrf_name>', required=True) @click.pass_context def del_vrf(ctx, vrf_name): """Del vrf""" config_db = ctx.obj['config_db'] if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'): ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!") if len(vrf_name) > 15: ctx.fail("'vrf_name' is too long!") if (vrf_name == 'mgmt' or vrf_name == 'management'): vrf_delete_management_vrf(config_db) else: del_interface_bind_to_vrf(config_db, vrf_name) config_db.set_entry('VRF', vrf_name, None) @vrf.command('add_vrf_vni_map') @click.argument('vrfname', metavar='<vrf-name>', required=True, type=str) @click.argument('vni', metavar='<vni>', required=True) @click.pass_context def add_vrf_vni_map(ctx, vrfname, vni): config_db = ctx.obj['config_db'] found = 0 if vrfname not in config_db.get_table('VRF').keys(): ctx.fail("vrf {} doesnt exists".format(vrfname)) if not vni.isdigit(): ctx.fail("Invalid VNI {}. Only valid VNI is accepted".format(vni)) if clicommon.vni_id_is_valid(int(vni)) is False: ctx.fail("Invalid VNI {}. Valid range [1 to 16777215].".format(vni)) vxlan_table = config_db.get_table('VXLAN_TUNNEL_MAP') vxlan_keys = vxlan_table.keys() if vxlan_keys is not None: for key in vxlan_keys: if (vxlan_table[key]['vni'] == vni): found = 1 break if (found == 0): ctx.fail("VLAN VNI not mapped. Please create VLAN VNI map entry first") found = 0 vrf_table = config_db.get_table('VRF') vrf_keys = vrf_table.keys() if vrf_keys is not None: for vrf_key in vrf_keys: if ('vni' in vrf_table[vrf_key] and vrf_table[vrf_key]['vni'] == vni): found = 1 break if (found == 1): ctx.fail("VNI already mapped to vrf {}".format(vrf_key)) config_db.mod_entry('VRF', vrfname, {"vni": vni}) @vrf.command('del_vrf_vni_map') @click.argument('vrfname', metavar='<vrf-name>', required=True, type=str) @click.pass_context def del_vrf_vni_map(ctx, vrfname): config_db = ctx.obj['config_db'] if vrfname not in config_db.get_table('VRF').keys(): ctx.fail("vrf {} doesnt exists".format(vrfname)) config_db.mod_entry('VRF', vrfname, {"vni": 0}) # # 'route' group ('config route ...') # @config.group(cls=clicommon.AbbreviationGroup) @click.pass_context def route(ctx): """route-related configuration tasks""" pass @route.command('add', context_settings={"ignore_unknown_options":True}) @click.argument('command_str', metavar='prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>', nargs=-1, type=click.Path()) @click.pass_context def add_route(ctx, command_str): """Add route command""" if len(command_str) < 4 or len(command_str) > 9: ctx.fail("argument is not in pattern prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>!") if "prefix" not in command_str: ctx.fail("argument is incomplete, prefix not found!") if "nexthop" not in command_str: ctx.fail("argument is incomplete, nexthop not found!") for i in range(0, len(command_str)): if "nexthop" == command_str[i]: prefix_str = command_str[:i] nexthop_str = command_str[i:] vrf_name = "" cmd = 'sudo vtysh -c "configure terminal" -c "ip route' if prefix_str: if len(prefix_str) == 2: prefix_mask = prefix_str[1] cmd += ' {}'.format(prefix_mask) elif len(prefix_str) == 4: vrf_name = prefix_str[2] prefix_mask = prefix_str[3] cmd += ' {}'.format(prefix_mask) else: ctx.fail("prefix is not in pattern!") if nexthop_str: if len(nexthop_str) == 2: ip = nexthop_str[1] if vrf_name == "": cmd += ' {}'.format(ip) else: cmd += ' {} vrf {}'.format(ip, vrf_name) elif len(nexthop_str) == 3: dev_name = nexthop_str[2] if vrf_name == "": cmd += ' {}'.format(dev_name) else: cmd += ' {} vrf {}'.format(dev_name, vrf_name) elif len(nexthop_str) == 4: vrf_name_dst = nexthop_str[2] ip = nexthop_str[3] if vrf_name == "": cmd += ' {} nexthop-vrf {}'.format(ip, vrf_name_dst) else: cmd += ' {} vrf {} nexthop-vrf {}'.format(ip, vrf_name, vrf_name_dst) else: ctx.fail("nexthop is not in pattern!") cmd += '"' clicommon.run_command(cmd) @route.command('del', context_settings={"ignore_unknown_options":True}) @click.argument('command_str', metavar='prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>', nargs=-1, type=click.Path()) @click.pass_context def del_route(ctx, command_str): """Del route command""" if len(command_str) < 4 or len(command_str) > 9: ctx.fail("argument is not in pattern prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>!") if "prefix" not in command_str: ctx.fail("argument is incomplete, prefix not found!") if "nexthop" not in command_str: ctx.fail("argument is incomplete, nexthop not found!") for i in range(0, len(command_str)): if "nexthop" == command_str[i]: prefix_str = command_str[:i] nexthop_str = command_str[i:] vrf_name = "" cmd = 'sudo vtysh -c "configure terminal" -c "no ip route' if prefix_str: if len(prefix_str) == 2: prefix_mask = prefix_str[1] cmd += ' {}'.format(prefix_mask) elif len(prefix_str) == 4: vrf_name = prefix_str[2] prefix_mask = prefix_str[3] cmd += ' {}'.format(prefix_mask) else: ctx.fail("prefix is not in pattern!") if nexthop_str: if len(nexthop_str) == 2: ip = nexthop_str[1] if vrf_name == "": cmd += ' {}'.format(ip) else: cmd += ' {} vrf {}'.format(ip, vrf_name) elif len(nexthop_str) == 3: dev_name = nexthop_str[2] if vrf_name == "": cmd += ' {}'.format(dev_name) else: cmd += ' {} vrf {}'.format(dev_name, vrf_name) elif len(nexthop_str) == 4: vrf_name_dst = nexthop_str[2] ip = nexthop_str[3] if vrf_name == "": cmd += ' {} nexthop-vrf {}'.format(ip, vrf_name_dst) else: cmd += ' {} vrf {} nexthop-vrf {}'.format(ip, vrf_name, vrf_name_dst) else: ctx.fail("nexthop is not in pattern!") cmd += '"' clicommon.run_command(cmd) # # 'acl' group ('config acl ...') # @config.group(cls=clicommon.AbbreviationGroup) def acl(): """ACL-related configuration tasks""" pass # # 'add' subgroup ('config acl add ...') # @acl.group(cls=clicommon.AbbreviationGroup) def add(): """ Add ACL configuration. """ pass def get_acl_bound_ports(): config_db = ConfigDBConnector() config_db.connect() ports = set() portchannel_members = set() portchannel_member_dict = config_db.get_table("PORTCHANNEL_MEMBER") for key in portchannel_member_dict: ports.add(key[0]) portchannel_members.add(key[1]) port_dict = config_db.get_table("PORT") for key in port_dict: if key not in portchannel_members: ports.add(key) return list(ports) # # 'table' subcommand ('config acl add table ...') # @add.command() @click.argument("table_name", metavar="<table_name>") @click.argument("table_type", metavar="<table_type>") @click.option("-d", "--description") @click.option("-p", "--ports") @click.option("-s", "--stage", type=click.Choice(["ingress", "egress"]), default="ingress") def table(table_name, table_type, description, ports, stage): """ Add ACL table """ config_db = ConfigDBConnector() config_db.connect() table_info = {"type": table_type} if description: table_info["policy_desc"] = description else: table_info["policy_desc"] = table_name if ports: table_info["ports@"] = ports else: table_info["ports@"] = ",".join(get_acl_bound_ports()) table_info["stage"] = stage config_db.set_entry("ACL_TABLE", table_name, table_info) # # 'remove' subgroup ('config acl remove ...') # @acl.group(cls=clicommon.AbbreviationGroup) def remove(): """ Remove ACL configuration. """ pass # # 'table' subcommand ('config acl remove table ...') # @remove.command() @click.argument("table_name", metavar="<table_name>") def table(table_name): """ Remove ACL table """ config_db = ConfigDBConnector() config_db.connect() config_db.set_entry("ACL_TABLE", table_name, None) # # 'acl update' group # @acl.group(cls=clicommon.AbbreviationGroup) def update(): """ACL-related configuration tasks""" pass # # 'full' subcommand # @update.command() @click.argument('file_name', required=True) def full(file_name): """Full update of ACL rules configuration.""" log.log_info("'acl update full {}' executing...".format(file_name)) command = "acl-loader update full {}".format(file_name) clicommon.run_command(command) # # 'incremental' subcommand # @update.command() @click.argument('file_name', required=True) def incremental(file_name): """Incremental update of ACL rule configuration.""" log.log_info("'acl update incremental {}' executing...".format(file_name)) command = "acl-loader update incremental {}".format(file_name) clicommon.run_command(command) # # 'dropcounters' group ('config dropcounters ...') # @config.group(cls=clicommon.AbbreviationGroup) def dropcounters(): """Drop counter related configuration tasks""" pass # # 'install' subcommand ('config dropcounters install') # @dropcounters.command() @click.argument("counter_name", type=str, required=True) @click.argument("counter_type", type=str, required=True) @click.argument("reasons", type=str, required=True) @click.option("-a", "--alias", type=str, help="Alias for this counter") @click.option("-g", "--group", type=str, help="Group for this counter") @click.option("-d", "--desc", type=str, help="Description for this counter") @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def install(counter_name, alias, group, counter_type, desc, reasons, verbose): """Install a new drop counter""" command = "dropconfig -c install -n '{}' -t '{}' -r '{}'".format(counter_name, counter_type, reasons) if alias: command += " -a '{}'".format(alias) if group: command += " -g '{}'".format(group) if desc: command += " -d '{}'".format(desc) clicommon.run_command(command, display_cmd=verbose) # # 'delete' subcommand ('config dropcounters delete') # @dropcounters.command() @click.argument("counter_name", type=str, required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def delete(counter_name, verbose): """Delete an existing drop counter""" command = "dropconfig -c uninstall -n {}".format(counter_name) clicommon.run_command(command, display_cmd=verbose) # # 'add_reasons' subcommand ('config dropcounters add_reasons') # @dropcounters.command('add-reasons') @click.argument("counter_name", type=str, required=True) @click.argument("reasons", type=str, required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def add_reasons(counter_name, reasons, verbose): """Add reasons to an existing drop counter""" command = "dropconfig -c add -n {} -r {}".format(counter_name, reasons) clicommon.run_command(command, display_cmd=verbose) # # 'remove_reasons' subcommand ('config dropcounters remove_reasons') # @dropcounters.command('remove-reasons') @click.argument("counter_name", type=str, required=True) @click.argument("reasons", type=str, required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def remove_reasons(counter_name, reasons, verbose): """Remove reasons from an existing drop counter""" command = "dropconfig -c remove -n {} -r {}".format(counter_name, reasons) clicommon.run_command(command, display_cmd=verbose) # # 'ecn' command ('config ecn ...') # @config.command() @click.option('-profile', metavar='<profile_name>', type=str, required=True, help="Profile name") @click.option('-rmax', metavar='<red threshold max>', type=int, help="Set red max threshold") @click.option('-rmin', metavar='<red threshold min>', type=int, help="Set red min threshold") @click.option('-ymax', metavar='<yellow threshold max>', type=int, help="Set yellow max threshold") @click.option('-ymin', metavar='<yellow threshold min>', type=int, help="Set yellow min threshold") @click.option('-gmax', metavar='<green threshold max>', type=int, help="Set green max threshold") @click.option('-gmin', metavar='<green threshold min>', type=int, help="Set green min threshold") @click.option('-rdrop', metavar='<red drop probability>', type=click.IntRange(0, 100), help="Set red drop probability") @click.option('-ydrop', metavar='<yellow drop probability>', type=click.IntRange(0, 100), help="Set yellow drop probability") @click.option('-gdrop', metavar='<green drop probability>', type=click.IntRange(0, 100), help="Set green drop probability") @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose): """ECN-related configuration tasks""" log.log_info("'ecn -profile {}' executing...".format(profile)) command = "ecnconfig -p %s" % profile if rmax is not None: command += " -rmax %d" % rmax if rmin is not None: command += " -rmin %d" % rmin if ymax is not None: command += " -ymax %d" % ymax if ymin is not None: command += " -ymin %d" % ymin if gmax is not None: command += " -gmax %d" % gmax if gmin is not None: command += " -gmin %d" % gmin if rdrop is not None: command += " -rdrop %d" % rdrop if ydrop is not None: command += " -ydrop %d" % ydrop if gdrop is not None: command += " -gdrop %d" % gdrop if verbose: command += " -vv" clicommon.run_command(command, display_cmd=verbose) # # 'pfc' group ('config interface pfc ...') # @interface.group(cls=clicommon.AbbreviationGroup) @click.pass_context def pfc(ctx): """Set PFC configuration.""" pass # # 'pfc asymmetric' ('config interface pfc asymmetric ...') # @pfc.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('status', type=click.Choice(['on', 'off'])) @click.pass_context def asymmetric(ctx, interface_name, status): """Set asymmetric PFC configuration.""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") clicommon.run_command("pfc config asymmetric {0} {1}".format(status, interface_name)) # # 'pfc priority' command ('config interface pfc priority ...') # @pfc.command() @click.argument('interface_name', metavar='<interface_name>', required=True) @click.argument('priority', type=click.Choice([str(x) for x in range(8)])) @click.argument('status', type=click.Choice(['on', 'off'])) @click.pass_context def priority(ctx, interface_name, priority, status): """Set PFC priority configuration.""" # Get the config_db connector config_db = ctx.obj['config_db'] if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") clicommon.run_command("pfc config priority {0} {1} {2}".format(status, interface_name, priority)) # # 'buffer' group ('config buffer ...') # @config.group(cls=clicommon.AbbreviationGroup) @click.pass_context def buffer(ctx): """Configure buffer_profile""" config_db = ConfigDBConnector() config_db.connect() if not is_dynamic_buffer_enabled(config_db): ctx.fail("This command can only be supported on a system with dynamic buffer enabled") @buffer.group(cls=clicommon.AbbreviationGroup) @click.pass_context def profile(ctx): """Configure buffer profile""" pass @profile.command('add') @click.argument('profile', metavar='<profile>', required=True) @click.option('--xon', metavar='<xon>', type=int, help="Set xon threshold") @click.option('--xoff', metavar='<xoff>', type=int, help="Set xoff threshold") @click.option('--size', metavar='<size>', type=int, help="Set reserved size size") @click.option('--dynamic_th', metavar='<dynamic_th>', type=str, help="Set dynamic threshold") @click.option('--pool', metavar='<pool>', type=str, help="Buffer pool") @clicommon.pass_db def add_profile(db, profile, xon, xoff, size, dynamic_th, pool): """Add or modify a buffer profile""" config_db = db.cfgdb ctx = click.get_current_context() profile_entry = config_db.get_entry('BUFFER_PROFILE', profile) if profile_entry: ctx.fail("Profile {} already exist".format(profile)) update_profile(ctx, config_db, profile, xon, xoff, size, dynamic_th, pool) @profile.command('set') @click.argument('profile', metavar='<profile>', required=True) @click.option('--xon', metavar='<xon>', type=int, help="Set xon threshold") @click.option('--xoff', metavar='<xoff>', type=int, help="Set xoff threshold") @click.option('--size', metavar='<size>', type=int, help="Set reserved size size") @click.option('--dynamic_th', metavar='<dynamic_th>', type=str, help="Set dynamic threshold") @click.option('--pool', metavar='<pool>', type=str, help="Buffer pool") @clicommon.pass_db def set_profile(db, profile, xon, xoff, size, dynamic_th, pool): """Add or modify a buffer profile""" config_db = db.cfgdb ctx = click.get_current_context() profile_entry = config_db.get_entry('BUFFER_PROFILE', profile) if not profile_entry: ctx.fail("Profile {} doesn't exist".format(profile)) if not 'xoff' in profile_entry.keys() and xoff: ctx.fail("Can't change profile {} from dynamically calculating headroom to non-dynamically one".format(profile)) update_profile(ctx, config_db, profile, xon, xoff, size, dynamic_th, pool, profile_entry) def update_profile(ctx, config_db, profile_name, xon, xoff, size, dynamic_th, pool, profile_entry = None): params = {} if profile_entry: params = profile_entry dynamic_calculate = True if not pool: pool = 'ingress_lossless_pool' params['pool'] = '[BUFFER_POOL|' + pool + ']' if not config_db.get_entry('BUFFER_POOL', pool): ctx.fail("Pool {} doesn't exist".format(pool)) if xon: params['xon'] = xon dynamic_calculate = False else: xon = params.get('xon') if xoff: params['xoff'] = xoff dynamic_calculate = False else: xoff = params.get('xoff') if size: params['size'] = size dynamic_calculate = False if xon and not xoff: xoff = int(size) - int (xon) params['xoff'] = xoff elif not dynamic_calculate: if xon and xoff: size = int(xon) + int(xoff) params['size'] = size else: ctx.fail("Either both xon and xoff or size should be provided") if dynamic_calculate: params['headroom_type'] = 'dynamic' if not dynamic_th: ctx.fail("Either size information (xon, xoff, size) or dynamic_th needs to be provided") if dynamic_th: params['dynamic_th'] = dynamic_th else: # Fetch all the keys of default_lossless_buffer_parameter table # and then get the default_dynamic_th from that entry (should be only one) keys = config_db.get_keys('DEFAULT_LOSSLESS_BUFFER_PARAMETER') if len(keys) > 1 or len(keys) == 0: ctx.fail("Multiple or no entry in DEFAULT_LOSSLESS_BUFFER_PARAMETER found while no dynamic_th specified") default_lossless_param = config_db.get_entry('DEFAULT_LOSSLESS_BUFFER_PARAMETER', keys[0]) if 'default_dynamic_th' in default_lossless_param.keys(): params['dynamic_th'] = default_lossless_param['default_dynamic_th'] else: ctx.fail("No dynamic_th defined in DEFAULT_LOSSLESS_BUFFER_PARAMETER") config_db.set_entry("BUFFER_PROFILE", (profile_name), params) @profile.command('remove') @click.argument('profile', metavar='<profile>', required=True) @clicommon.pass_db def remove_profile(db, profile): """Delete a buffer profile""" config_db = db.cfgdb ctx = click.get_current_context() full_profile_name = '[BUFFER_PROFILE|{}]'.format(profile) existing_pgs = config_db.get_table("BUFFER_PG") for k, v in existing_pgs.items(): port, pg = k referenced_profile = v.get('profile') if referenced_profile and referenced_profile == full_profile_name: ctx.fail("Profile {} is referenced by {}|{} and can't be removed".format(profile, port, pg)) entry = config_db.get_entry("BUFFER_PROFILE", profile) if entry: config_db.set_entry("BUFFER_PROFILE", profile, None) else: ctx.fail("Profile {} doesn't exist".format(profile)) # # 'platform' group ('config platform ...') # @config.group(cls=clicommon.AbbreviationGroup) def platform(): """Platform-related configuration tasks""" # 'firmware' subgroup ("config platform firmware ...") @platform.group(cls=clicommon.AbbreviationGroup) def firmware(): """Firmware configuration tasks""" pass # 'install' subcommand ("config platform firmware install") @firmware.command( context_settings=dict( ignore_unknown_options=True, allow_extra_args=True ), add_help_option=False ) @click.argument('args', nargs=-1, type=click.UNPROCESSED) def install(args): """Install platform firmware""" cmd = "fwutil install {}".format(" ".join(args)) try: subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as e: sys.exit(e.returncode) # 'update' subcommand ("config platform firmware update") @firmware.command( context_settings=dict( ignore_unknown_options=True, allow_extra_args=True ), add_help_option=False ) @click.argument('args', nargs=-1, type=click.UNPROCESSED) def update(args): """Update platform firmware""" cmd = "fwutil update {}".format(" ".join(args)) try: subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as e: sys.exit(e.returncode) # # 'watermark' group ("show watermark telemetry interval") # @config.group(cls=clicommon.AbbreviationGroup) def watermark(): """Configure watermark """ pass @watermark.group(cls=clicommon.AbbreviationGroup) def telemetry(): """Configure watermark telemetry""" pass @telemetry.command() @click.argument('interval', required=True) def interval(interval): """Configure watermark telemetry interval""" command = 'watermarkcfg --config-interval ' + interval clicommon.run_command(command) # # 'interface_naming_mode' subgroup ('config interface_naming_mode ...') # @config.group(cls=clicommon.AbbreviationGroup, name='interface_naming_mode') def interface_naming_mode(): """Modify interface naming mode for interacting with SONiC CLI""" pass @interface_naming_mode.command('default') def naming_mode_default(): """Set CLI interface naming mode to DEFAULT (SONiC port name)""" set_interface_naming_mode('default') @interface_naming_mode.command('alias') def naming_mode_alias(): """Set CLI interface naming mode to ALIAS (Vendor port alias)""" set_interface_naming_mode('alias') def is_loopback_name_valid(loopback_name): """Loopback name validation """ if loopback_name[:CFG_LOOPBACK_PREFIX_LEN] != CFG_LOOPBACK_PREFIX : return False if (loopback_name[CFG_LOOPBACK_PREFIX_LEN:].isdigit() is False or int(loopback_name[CFG_LOOPBACK_PREFIX_LEN:]) > CFG_LOOPBACK_ID_MAX_VAL) : return False if len(loopback_name) > CFG_LOOPBACK_NAME_TOTAL_LEN_MAX: return False return True # # 'loopback' group ('config loopback ...') # @config.group() @click.pass_context @click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection') def loopback(ctx, redis_unix_socket_path): """Loopback-related configuration tasks""" kwargs = {} if redis_unix_socket_path: kwargs['unix_socket_path'] = redis_unix_socket_path config_db = ConfigDBConnector(**kwargs) config_db.connect(wait_for_init=False) ctx.obj = {'db': config_db} @loopback.command('add') @click.argument('loopback_name', metavar='<loopback_name>', required=True) @click.pass_context def add_loopback(ctx, loopback_name): config_db = ctx.obj['db'] if is_loopback_name_valid(loopback_name) is False: ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' " .format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO)) lo_intfs = [k for k, v in config_db.get_table('LOOPBACK_INTERFACE').items() if type(k) != tuple] if loopback_name in lo_intfs: ctx.fail("{} already exists".format(loopback_name)) config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, {"NULL" : "NULL"}) @loopback.command('del') @click.argument('loopback_name', metavar='<loopback_name>', required=True) @click.pass_context def del_loopback(ctx, loopback_name): config_db = ctx.obj['db'] if is_loopback_name_valid(loopback_name) is False: ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' " .format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO)) lo_config_db = config_db.get_table('LOOPBACK_INTERFACE') lo_intfs = [k for k, v in lo_config_db.items() if type(k) != tuple] if loopback_name not in lo_intfs: ctx.fail("{} does not exists".format(loopback_name)) ips = [ k[1] for k in lo_config_db if type(k) == tuple and k[0] == loopback_name ] for ip in ips: config_db.set_entry('LOOPBACK_INTERFACE', (loopback_name, ip), None) config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, None) @config.group(cls=clicommon.AbbreviationGroup) def ztp(): """ Configure Zero Touch Provisioning """ if os.path.isfile('/usr/bin/ztp') is False: exit("ZTP feature unavailable in this image version") if os.geteuid() != 0: exit("Root privileges are required for this operation") @ztp.command() @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='ZTP will be restarted. You may lose switch data and connectivity, continue?') @click.argument('run', required=False, type=click.Choice(["run"])) def run(run): """Restart ZTP of the device.""" command = "ztp run -y" clicommon.run_command(command, display_cmd=True) @ztp.command() @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Active ZTP session will be stopped and disabled, continue?') @click.argument('disable', required=False, type=click.Choice(["disable"])) def disable(disable): """Administratively Disable ZTP.""" command = "ztp disable -y" clicommon.run_command(command, display_cmd=True) @ztp.command() @click.argument('enable', required=False, type=click.Choice(["enable"])) def enable(enable): """Administratively Enable ZTP.""" command = "ztp enable" clicommon.run_command(command, display_cmd=True) # # 'syslog' group ('config syslog ...') # @config.group(cls=clicommon.AbbreviationGroup, name='syslog') @click.pass_context def syslog_group(ctx): """Syslog server configuration tasks""" config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} @syslog_group.command('add') @click.argument('syslog_ip_address', metavar='<syslog_ip_address>', required=True) @click.pass_context def add_syslog_server(ctx, syslog_ip_address): """ Add syslog server IP """ if not clicommon.is_ipaddress(syslog_ip_address): ctx.fail('Invalid ip address') db = ctx.obj['db'] syslog_servers = db.get_table("SYSLOG_SERVER") if syslog_ip_address in syslog_servers: click.echo("Syslog server {} is already configured".format(syslog_ip_address)) return else: db.set_entry('SYSLOG_SERVER', syslog_ip_address, {'NULL': 'NULL'}) click.echo("Syslog server {} added to configuration".format(syslog_ip_address)) try: click.echo("Restarting rsyslog-config service...") clicommon.run_command("systemctl restart rsyslog-config", display_cmd=False) except SystemExit as e: ctx.fail("Restart service rsyslog-config failed with error {}".format(e)) @syslog_group.command('del') @click.argument('syslog_ip_address', metavar='<syslog_ip_address>', required=True) @click.pass_context def del_syslog_server(ctx, syslog_ip_address): """ Delete syslog server IP """ if not clicommon.is_ipaddress(syslog_ip_address): ctx.fail('Invalid IP address') db = ctx.obj['db'] syslog_servers = db.get_table("SYSLOG_SERVER") if syslog_ip_address in syslog_servers: db.set_entry('SYSLOG_SERVER', '{}'.format(syslog_ip_address), None) click.echo("Syslog server {} removed from configuration".format(syslog_ip_address)) else: ctx.fail("Syslog server {} is not configured.".format(syslog_ip_address)) try: click.echo("Restarting rsyslog-config service...") clicommon.run_command("systemctl restart rsyslog-config", display_cmd=False) except SystemExit as e: ctx.fail("Restart service rsyslog-config failed with error {}".format(e)) # # 'ntp' group ('config ntp ...') # @config.group(cls=clicommon.AbbreviationGroup) @click.pass_context def ntp(ctx): """NTP server configuration tasks""" config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} @ntp.command('add') @click.argument('ntp_ip_address', metavar='<ntp_ip_address>', required=True) @click.pass_context def add_ntp_server(ctx, ntp_ip_address): """ Add NTP server IP """ if not clicommon.is_ipaddress(ntp_ip_address): ctx.fail('Invalid ip address') db = ctx.obj['db'] ntp_servers = db.get_table("NTP_SERVER") if ntp_ip_address in ntp_servers: click.echo("NTP server {} is already configured".format(ntp_ip_address)) return else: db.set_entry('NTP_SERVER', ntp_ip_address, {'NULL': 'NULL'}) click.echo("NTP server {} added to configuration".format(ntp_ip_address)) try: click.echo("Restarting ntp-config service...") clicommon.run_command("systemctl restart ntp-config", display_cmd=False) except SystemExit as e: ctx.fail("Restart service ntp-config failed with error {}".format(e)) @ntp.command('del') @click.argument('ntp_ip_address', metavar='<ntp_ip_address>', required=True) @click.pass_context def del_ntp_server(ctx, ntp_ip_address): """ Delete NTP server IP """ if not clicommon.is_ipaddress(ntp_ip_address): ctx.fail('Invalid IP address') db = ctx.obj['db'] ntp_servers = db.get_table("NTP_SERVER") if ntp_ip_address in ntp_servers: db.set_entry('NTP_SERVER', '{}'.format(ntp_ip_address), None) click.echo("NTP server {} removed from configuration".format(ntp_ip_address)) else: ctx.fail("NTP server {} is not configured.".format(ntp_ip_address)) try: click.echo("Restarting ntp-config service...") clicommon.run_command("systemctl restart ntp-config", display_cmd=False) except SystemExit as e: ctx.fail("Restart service ntp-config failed with error {}".format(e)) # # 'sflow' group ('config sflow ...') # @config.group(cls=clicommon.AbbreviationGroup) @click.pass_context def sflow(ctx): """sFlow-related configuration tasks""" config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} # # 'sflow' command ('config sflow enable') # @sflow.command() @click.pass_context def enable(ctx): """Enable sFlow""" config_db = ctx.obj['db'] sflow_tbl = config_db.get_table('SFLOW') if not sflow_tbl: sflow_tbl = {'global': {'admin_state': 'up'}} else: sflow_tbl['global']['admin_state'] = 'up' config_db.mod_entry('SFLOW', 'global', sflow_tbl['global']) try: proc = subprocess.Popen("systemctl is-active sflow", shell=True, text=True, stdout=subprocess.PIPE) (out, err) = proc.communicate() except SystemExit as e: ctx.fail("Unable to check sflow status {}".format(e)) if out != "active": log.log_info("sflow service is not enabled. Starting sflow docker...") clicommon.run_command("sudo systemctl enable sflow") clicommon.run_command("sudo systemctl start sflow") # # 'sflow' command ('config sflow disable') # @sflow.command() @click.pass_context def disable(ctx): """Disable sFlow""" config_db = ctx.obj['db'] sflow_tbl = config_db.get_table('SFLOW') if not sflow_tbl: sflow_tbl = {'global': {'admin_state': 'down'}} else: sflow_tbl['global']['admin_state'] = 'down' config_db.mod_entry('SFLOW', 'global', sflow_tbl['global']) # # 'sflow' command ('config sflow polling-interval ...') # @sflow.command('polling-interval') @click.argument('interval', metavar='<polling_interval>', required=True, type=int) @click.pass_context def polling_int(ctx, interval): """Set polling-interval for counter-sampling (0 to disable)""" if interval not in range(5, 301) and interval != 0: click.echo("Polling interval must be between 5-300 (0 to disable)") config_db = ctx.obj['db'] sflow_tbl = config_db.get_table('SFLOW') if not sflow_tbl: sflow_tbl = {'global': {'admin_state': 'down'}} sflow_tbl['global']['polling_interval'] = interval config_db.mod_entry('SFLOW', 'global', sflow_tbl['global']) def is_valid_sample_rate(rate): return rate in range(256, 8388608 + 1) # # 'sflow interface' group # @sflow.group(cls=clicommon.AbbreviationGroup) @click.pass_context def interface(ctx): """Configure sFlow settings for an interface""" pass # # 'sflow' command ('config sflow interface enable ...') # @interface.command() @click.argument('ifname', metavar='<interface_name>', required=True, type=str) @click.pass_context def enable(ctx, ifname): config_db = ctx.obj['db'] if not interface_name_is_valid(config_db, ifname) and ifname != 'all': click.echo("Invalid interface name") return intf_dict = config_db.get_table('SFLOW_SESSION') if intf_dict and ifname in intf_dict: intf_dict[ifname]['admin_state'] = 'up' config_db.mod_entry('SFLOW_SESSION', ifname, intf_dict[ifname]) else: config_db.mod_entry('SFLOW_SESSION', ifname, {'admin_state': 'up'}) # # 'sflow' command ('config sflow interface disable ...') # @interface.command() @click.argument('ifname', metavar='<interface_name>', required=True, type=str) @click.pass_context def disable(ctx, ifname): config_db = ctx.obj['db'] if not interface_name_is_valid(config_db, ifname) and ifname != 'all': click.echo("Invalid interface name") return intf_dict = config_db.get_table('SFLOW_SESSION') if intf_dict and ifname in intf_dict: intf_dict[ifname]['admin_state'] = 'down' config_db.mod_entry('SFLOW_SESSION', ifname, intf_dict[ifname]) else: config_db.mod_entry('SFLOW_SESSION', ifname, {'admin_state': 'down'}) # # 'sflow' command ('config sflow interface sample-rate ...') # @interface.command('sample-rate') @click.argument('ifname', metavar='<interface_name>', required=True, type=str) @click.argument('rate', metavar='<sample_rate>', required=True, type=int) @click.pass_context def sample_rate(ctx, ifname, rate): config_db = ctx.obj['db'] if not interface_name_is_valid(config_db, ifname) and ifname != 'all': click.echo('Invalid interface name') return if not is_valid_sample_rate(rate): click.echo('Error: Sample rate must be between 256 and 8388608') return sess_dict = config_db.get_table('SFLOW_SESSION') if sess_dict and ifname in sess_dict: sess_dict[ifname]['sample_rate'] = rate config_db.mod_entry('SFLOW_SESSION', ifname, sess_dict[ifname]) else: config_db.mod_entry('SFLOW_SESSION', ifname, {'sample_rate': rate}) # # 'sflow collector' group # @sflow.group(cls=clicommon.AbbreviationGroup) @click.pass_context def collector(ctx): """Add/Delete a sFlow collector""" pass def is_valid_collector_info(name, ip, port, vrf_name): if len(name) > 16: click.echo("Collector name must not exceed 16 characters") return False if port not in range(0, 65535 + 1): click.echo("Collector port number must be between 0 and 65535") return False if not clicommon.is_ipaddress(ip): click.echo("Invalid IP address") return False if vrf_name != 'default' and vrf_name != 'mgmt': click.echo("Only 'default' and 'mgmt' VRF are supported") return False return True # # 'sflow' command ('config sflow collector add ...') # @collector.command() @click.option('--port', required=False, type=int, default=6343, help='Collector port number') @click.option('--vrf', required=False, type=str, default='default', help='Collector VRF') @click.argument('name', metavar='<collector_name>', required=True) @click.argument('ipaddr', metavar='<IPv4/v6_address>', required=True) @click.pass_context def add(ctx, name, ipaddr, port, vrf): """Add a sFlow collector""" ipaddr = ipaddr.lower() if not is_valid_collector_info(name, ipaddr, port, vrf): return config_db = ctx.obj['db'] collector_tbl = config_db.get_table('SFLOW_COLLECTOR') if (collector_tbl and name not in collector_tbl and len(collector_tbl) == 2): click.echo("Only 2 collectors can be configured, please delete one") return config_db.mod_entry('SFLOW_COLLECTOR', name, {"collector_ip": ipaddr, "collector_port": port, "collector_vrf": vrf}) return # # 'sflow' command ('config sflow collector del ...') # @collector.command('del') @click.argument('name', metavar='<collector_name>', required=True) @click.pass_context def del_collector(ctx, name): """Delete a sFlow collector""" config_db = ctx.obj['db'] collector_tbl = config_db.get_table('SFLOW_COLLECTOR') if name not in collector_tbl: click.echo("Collector: {} not configured".format(name)) return config_db.mod_entry('SFLOW_COLLECTOR', name, None) # # 'sflow agent-id' group # @sflow.group(cls=clicommon.AbbreviationGroup, name='agent-id') @click.pass_context def agent_id(ctx): """Add/Delete a sFlow agent""" pass # # 'sflow' command ('config sflow agent-id add ...') # @agent_id.command() @click.argument('ifname', metavar='<interface_name>', required=True) @click.pass_context def add(ctx, ifname): """Add sFlow agent information""" if ifname not in netifaces.interfaces(): click.echo("Invalid interface name") return config_db = ctx.obj['db'] sflow_tbl = config_db.get_table('SFLOW') if not sflow_tbl: sflow_tbl = {'global': {'admin_state': 'down'}} if 'agent_id' in sflow_tbl['global']: click.echo("Agent already configured. Please delete it first.") return sflow_tbl['global']['agent_id'] = ifname config_db.mod_entry('SFLOW', 'global', sflow_tbl['global']) # # 'sflow' command ('config sflow agent-id del') # @agent_id.command('del') @click.pass_context def delete(ctx): """Delete sFlow agent information""" config_db = ctx.obj['db'] sflow_tbl = config_db.get_table('SFLOW') if not sflow_tbl: sflow_tbl = {'global': {'admin_state': 'down'}} if 'agent_id' not in sflow_tbl['global']: click.echo("sFlow agent not configured.") return sflow_tbl['global'].pop('agent_id') config_db.set_entry('SFLOW', 'global', sflow_tbl['global']) if __name__ == '__main__': config()
[]
[]
[ "UTILITIES_UNIT_TESTING", "USER", "SUDO_USER", "UTILITIES_UNIT_TESTING_TOPOLOGY" ]
[]
["UTILITIES_UNIT_TESTING", "USER", "SUDO_USER", "UTILITIES_UNIT_TESTING_TOPOLOGY"]
python
4
0
httpie/config.py
import os import json import errno from httpie import __version__ from httpie.compat import is_windows DEFAULT_CONFIG_DIR = str(os.environ.get( 'HTTPIE_CONFIG_DIR', os.path.expanduser('~/.httpie') if not is_windows else os.path.expandvars(r'%APPDATA%\\httpie') )) class BaseConfigDict(dict): name = None helpurl = None about = None def __getattr__(self, item): return self[item] def _get_path(self): """Return the config file path without side-effects.""" raise NotImplementedError() @property def path(self): """Return the config file path creating basedir, if needed.""" path = self._get_path() try: os.makedirs(os.path.dirname(path), mode=0o700) except OSError as e: if e.errno != errno.EEXIST: raise return path def is_new(self): return not os.path.exists(self._get_path()) def load(self): try: with open(self.path, 'rt') as f: try: data = json.load(f) except ValueError as e: raise ValueError( 'Invalid %s JSON: %s [%s]' % (type(self).__name__, str(e), self.path) ) self.update(data) except IOError as e: if e.errno != errno.ENOENT: raise def save(self): self['__meta__'] = { 'httpie': __version__ } if self.helpurl: self['__meta__']['help'] = self.helpurl if self.about: self['__meta__']['about'] = self.about with open(self.path, 'w') as f: json.dump(self, f, indent=4, sort_keys=True, ensure_ascii=True) f.write('\n') def delete(self): try: os.unlink(self.path) except OSError as e: if e.errno != errno.ENOENT: raise class Config(BaseConfigDict): name = 'config' helpurl = 'https://github.com/jkbrzt/httpie#config' about = 'HTTPie configuration file' DEFAULTS = { 'default_options': [] } def __init__(self, directory=DEFAULT_CONFIG_DIR): super(Config, self).__init__() self.update(self.DEFAULTS) self.directory = directory def load(self): super(Config, self).load() self._migrate_implicit_content_type() def _get_path(self): return os.path.join(self.directory, self.name + '.json') def _migrate_implicit_content_type(self): """Migrate the removed implicit_content_type config option""" try: implicit_content_type = self.pop('implicit_content_type') except KeyError: pass else: if implicit_content_type == 'form': self['default_options'].insert(0, '--form') self.save() self.load()
[]
[]
[ "HTTPIE_CONFIG_DIR" ]
[]
["HTTPIE_CONFIG_DIR"]
python
1
0
train_cls.py
""" Author: Benny Date: Nov 2019 """ from data_utils.ModelNetDataLoader import ModelNetDataLoader import argparse import numpy as np import os import torch import datetime import logging from pathlib import Path from tqdm import tqdm import sys import provider import importlib import shutil BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = BASE_DIR sys.path.append(os.path.join(ROOT_DIR, 'models')) def parse_args(): '''PARAMETERS''' parser = argparse.ArgumentParser('PointNet') parser.add_argument('--batch_size', type=int, default=24, help='batch size in training [default: 24]') parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]') parser.add_argument('--epoch', default=200, type=int, help='number of epoch in training [default: 200]') parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training [default: 0.001]') parser.add_argument('--gpu', type=str, default='0', help='specify gpu device [default: 0]') parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]') parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training [default: Adam]') parser.add_argument('--log_dir', type=str, default=None, help='experiment root') parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate [default: 1e-4]') parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]') return parser.parse_args() def test(model, loader, num_class=40): mean_correct = [] class_acc = np.zeros((num_class,3)) for j, data in tqdm(enumerate(loader), total=len(loader)): points, target = data target = target[:, 0] points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() classifier = model.eval() pred, _ = classifier(points) pred_choice = pred.data.max(1)[1] for cat in np.unique(target.cpu()): # kaidong mod: resolve tensor cannot be (target==cat) eq() to a numpy bug cat = cat.item() classacc = pred_choice[target==cat].eq(target[target==cat].long().data).cpu().sum() class_acc[cat,0]+= classacc.item()/float(points[target==cat].size()[0]) class_acc[cat,1]+=1 correct = pred_choice.eq(target.long().data).cpu().sum() mean_correct.append(correct.item()/float(points.size()[0])) class_acc[:,2] = class_acc[:,0]/ class_acc[:,1] class_acc = np.mean(class_acc[:,2]) instance_acc = np.mean(mean_correct) return instance_acc, class_acc def main(args): def log_string(str): logger.info(str) print(str) '''HYPER PARAMETER''' os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu '''CREATE DIR''' timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')) experiment_dir = Path('./log/') experiment_dir.mkdir(exist_ok=True) experiment_dir = experiment_dir.joinpath('classification') experiment_dir.mkdir(exist_ok=True) if args.log_dir is None: experiment_dir = experiment_dir.joinpath(timestr) else: experiment_dir = experiment_dir.joinpath(args.log_dir) experiment_dir.mkdir(exist_ok=True) checkpoints_dir = experiment_dir.joinpath('checkpoints/') checkpoints_dir.mkdir(exist_ok=True) log_dir = experiment_dir.joinpath('logs/') log_dir.mkdir(exist_ok=True) '''LOG''' args = parse_args() logger = logging.getLogger("Model") logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model)) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) log_string('PARAMETER ...') log_string(args) '''DATA LOADING''' log_string('Load dataset ...') DATA_PATH = 'data/modelnet40_normal_resampled/' TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train', normal_channel=args.normal) TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal) trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4) testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4) '''MODEL LOADING''' num_class = 40 MODEL = importlib.import_module(args.model) shutil.copy('./models/%s.py' % args.model, str(experiment_dir)) shutil.copy('./models/pointnet_util.py', str(experiment_dir)) classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda() criterion = MODEL.get_loss().cuda() try: checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth') start_epoch = checkpoint['epoch'] classifier.load_state_dict(checkpoint['model_state_dict']) log_string('Use pretrain model') except: log_string('No existing model, starting training from scratch...') start_epoch = 0 if args.optimizer == 'Adam': optimizer = torch.optim.Adam( classifier.parameters(), lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.decay_rate ) else: optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7) global_epoch = 0 global_step = 0 best_instance_acc = 0.0 best_class_acc = 0.0 mean_correct = [] '''TRANING''' logger.info('Start training...') for epoch in range(start_epoch,args.epoch): log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch)) scheduler.step() for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9): points, target = data points = points.data.numpy() points = provider.random_point_dropout(points) points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3]) points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3]) points = torch.Tensor(points) target = target[:, 0] points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() optimizer.zero_grad() classifier = classifier.train() pred, trans_feat = classifier(points) loss = criterion(pred, target.long(), trans_feat) pred_choice = pred.data.max(1)[1] correct = pred_choice.eq(target.long().data).cpu().sum() mean_correct.append(correct.item() / float(points.size()[0])) loss.backward() optimizer.step() global_step += 1 train_instance_acc = np.mean(mean_correct) log_string('Train Instance Accuracy: %f' % train_instance_acc) with torch.no_grad(): instance_acc, class_acc = test(classifier.eval(), testDataLoader) if (instance_acc >= best_instance_acc): best_instance_acc = instance_acc best_epoch = epoch + 1 if (class_acc >= best_class_acc): best_class_acc = class_acc log_string('Test Instance Accuracy: %f, Class Accuracy: %f'% (instance_acc, class_acc)) log_string('Best Instance Accuracy: %f, Class Accuracy: %f'% (best_instance_acc, best_class_acc)) if (instance_acc >= best_instance_acc): logger.info('Save model...') savepath = str(checkpoints_dir) + '/best_model.pth' log_string('Saving at %s'% savepath) state = { 'epoch': best_epoch, 'instance_acc': instance_acc, 'class_acc': class_acc, 'model_state_dict': classifier.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } torch.save(state, savepath) global_epoch += 1 logger.info('End of training...') if __name__ == '__main__': args = parse_args() main(args)
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
main.go
package main import ( "bufio" "encoding/json" "flag" "fmt" "net/http" "os" "strings" "sync" ) var wg sync.WaitGroup func main() { var webhookURL, lines string flag.StringVar(&webhookURL, "u", "", "Discord Webhook URL") flag.Parse() webhookENV := os.Getenv("DISCORD_WEBHOOK_URL") if webhookENV != "" { webhookURL = webhookENV }else{ if webhookURL == "" { fmt.Println("Discord Webhook URL not set!") } } if !isStdin() { os.Exit(1) } scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { line := scanner.Text() fmt.Println(line) lines += line lines += "\n" } wg.Add(1) go disc(webhookURL, lines) wg.Wait() } func isStdin() bool { info, err := os.Stdin.Stat() if err != nil { return false } if info.Mode()&os.ModeNamedPipe == 0 { //to make sure the input comes from the pipe return false } return true } type data struct { Content string `json:"content"` } func disc(url string, line string) { data, _ := json.Marshal(data{ Content: line}) http.Post(url, "application/json", strings.NewReader(string(data))) wg.Done() }
[ "\"DISCORD_WEBHOOK_URL\"" ]
[]
[ "DISCORD_WEBHOOK_URL" ]
[]
["DISCORD_WEBHOOK_URL"]
go
1
0
vendor/9fans.net/go/draw/init_plan9.go
package draw import ( "encoding/binary" "fmt" "image" "io/ioutil" "os" "path" "strconv" "strings" "sync" "syscall" ) // Display locking: // The Exported methods of Display, being entry points for clients, lock the Display structure. // The unexported ones do not. // The methods for Font, Image and Screen also lock the associated display by the same rules. // drawFile exists solely to provide a ReadDraw, which makes Display.Conn compatible with a call in unloadimage, used by the devdraw version. type drawFile struct { *os.File } func (f *drawFile) ReadDraw(buf []byte) (int, error) { return f.Read(buf) } type Display struct { mu sync.Mutex debug bool errch chan<- error bufsize int buf []byte imageid uint32 qmask *Image Image *Image Screen *Screen ScreenImage *Image Windows *Image DPI int firstfont *Font lastfont *Font White *Image // Pre-allocated color. Black *Image // Pre-allocated color. Opaque *Image // Pre-allocated color. Transparent *Image // Pre-allocated color. DefaultFont *Font DefaultSubfont *Subfont conn *drawFile ctl *os.File mousectl *Mousectl keyboardctl *Keyboardctl mtpt string } // An Image represents an image on the server, possibly visible on the display. type Image struct { Display *Display id uint32 Pix Pix // The pixel format for the image. Depth int // The depth of the pixels in bits. Repl bool // Whether the image is replicated (tiles the rectangle). R image.Rectangle // The extent of the image. Clipr image.Rectangle // The clip region. Origin image.Point // Of image in screen, for mouse warping. next *Image Screen *Screen // If non-nil, the associated screen; this is a window. } // A Screen is a collection of windows that are visible on an image. type Screen struct { Display *Display // Display connected to the server. id uint32 Fill *Image // Background image behind the windows. } // Refresh algorithms to execute when a window is resized or uncovered. // Refmesg is almost always the correct one to use. const ( Refbackup = 0 Refnone = 1 Refmesg = 2 ) const deffontname = "*default*" // Init starts and connects to a server and returns a Display structure through // which all graphics will be mediated. The arguments are an error channel on // which to deliver errors (currently unused), the name of the font to use (the // empty string may be used to represent the default font), the window label, // and the window size as a string in the form XxY, as in "1000x500"; the units // are pixels. func Init(errch chan<- error, fontname, label, winsize string) (*Display, error) { if errch == nil { errch = make(chan error, 1) } var err error d := &Display{ errch: errch, } // Lock Display so we maintain the contract within this library. d.mu.Lock() defer d.mu.Unlock() if dbg := os.Getenv("DRAWDEBUG"); dbg != "" { d.debug = true } width, height := 800, 600 if winsize != "" { t := strings.Split(winsize, "x") if len(t) != 2 { return nil, fmt.Errorf("bad winsize, must be $widthx$height") } width, err = strconv.Atoi(t[0]) if err != nil { return nil, fmt.Errorf("bad width in winsize: %s", err) } height, err = strconv.Atoi(t[1]) if err != nil { return nil, fmt.Errorf("bad height in winsize: %s", err) } } wsys := os.Getenv("wsys") if wsys == "" { return nil, fmt.Errorf("$wsys not set") } wsysfd, err := os.OpenFile(wsys, os.O_RDWR, 0666) if err != nil { return nil, err } // note: must not close wsysfd, or fd's get mixed up... d.mtpt = "/n/duit." + path.Base(wsys) err = syscall.Mount(int(wsysfd.Fd()), -1, d.mtpt, 0, fmt.Sprintf("new -r 0 0 %d %d", width, height)) if err != nil { return nil, err } d.ctl, err = os.OpenFile("/dev/draw/new", os.O_RDWR|syscall.O_CLOEXEC, 0666) if err != nil { return nil, err } info, err := d.readctl() if err != nil { return nil, err } id := atoi(info[:1*12]) drawDir := fmt.Sprintf("/dev/draw/%d", id) fd, err := os.OpenFile(drawDir+"/data", os.O_RDWR|syscall.O_CLOEXEC, 0666) if err != nil { return nil, err } d.conn = &drawFile{fd} pix, _ := ParsePix(strings.TrimSpace(string(info[2*12 : 3*12]))) d.Image = &Image{ Display: d, id: 0, Pix: pix, Depth: pix.Depth(), Repl: atoi(info[3*12:]) > 0, R: ator(info[4*12:]), Clipr: ator(info[8*12:]), } d.bufsize = Iounit(int(d.conn.Fd())) if d.bufsize <= 0 { d.bufsize = 8000 } if d.bufsize < 512 { return nil, fmt.Errorf("iounit too small") } d.buf = make([]byte, 0, d.bufsize+5) d.White, err = d.allocImage(image.Rect(0, 0, 1, 1), GREY1, true, White) if err != nil { return nil, fmt.Errorf("can't allocate white: %s", err) } d.Black, err = d.allocImage(image.Rect(0, 0, 1, 1), GREY1, true, Black) if err != nil { return nil, fmt.Errorf("can't allocate black: %s", err) } d.Opaque = d.White d.Transparent = d.Black /* * Set up default font */ df, err := getdefont(d) if err != nil { return nil, err } d.DefaultSubfont = df if fontname == "" { fontname = os.Getenv("font") } /* * Build fonts with caches==depth of screen, for speed. * If conversion were faster, we'd use 0 and save memory. */ var font *Font if fontname == "" { buf := []byte(fmt.Sprintf("%d %d\n0 %d\t%s\n", df.Height, df.Ascent, df.N-1, deffontname)) //fmt.Printf("%q\n", buf) //BUG: Need something better for this installsubfont("*default*", df); font, err = d.buildFont(buf, deffontname) } else { font, err = d.openFont(fontname) // BUG: grey fonts } if err != nil { return nil, err } d.DefaultFont = font err = ioutil.WriteFile(d.mtpt+"/label", []byte(label), 0600) if err != nil { return nil, err } err = gengetwindow(d, d.mtpt+"/winname", Refnone) if err != nil { d.close() return nil, err } d.mousectl = d.initMouse() d.keyboardctl = d.initKeyboard() return d, nil } // Attach (re-)attaches to a display, typically after a resize, updating the // display's associated image, screen, and screen image data structures. func (d *Display) Attach(ref int) error { d.mu.Lock() defer d.mu.Unlock() return d.getwindow(ref) } func (d *Display) getwindow(ref int) error { return gengetwindow(d, d.mtpt+"/winname", ref) } // Attach, or possibly reattach, to window. // If reattaching, maintain value of screen pointer. func gengetwindow(d *Display, winname string, ref int) error { var i *Image buf, err := ioutil.ReadFile(winname) if err != nil { return fmt.Errorf("gengetwindow: %s", err) } i, err = d.namedimage(buf) if err != nil { return fmt.Errorf("namedimage %s: %s", buf, err) } if d.ScreenImage != nil { d.ScreenImage.free() d.Screen.free() d.Screen = nil } if i == nil { d.ScreenImage = nil return fmt.Errorf("namedimage returned nil image") } d.Screen, err = i.allocScreen(d.White, false) if err != nil { return err } r := i.R const Borderwidth = 4 r = i.R.Inset(Borderwidth) d.ScreenImage = d.Image d.ScreenImage, err = allocwindow(nil, d.Screen, r, 0, White) if err != nil { return err } err = originwindow(d.ScreenImage, image.Pt(0, 0), r.Min) if err != nil { return err } screen := d.ScreenImage screen.draw(screen.R, d.White, nil, image.ZP) if err := d.flush(true); err != nil { return err } return nil } func (d *Display) readctl() ([]byte, error) { buf := make([]byte, 12*12) n, err := d.ctl.Read(buf) if err == nil && n < 143 { return nil, fmt.Errorf("bad ctl read, expected 143 bytes, saw %d", n) } return buf[:n], err } /* implements message 'n' */ func (d *Display) namedimage(name []byte) (*Image, error) { err := d.flush(false) if err != nil { return nil, err } a := d.bufimage(1 + 4 + 1 + len(name)) d.imageid++ id := d.imageid a[0] = 'n' bplong(a[1:], id) a[5] = byte(len(name)) copy(a[6:], name) err = d.flush(false) if err != nil { return nil, fmt.Errorf("namedimage: %s", err) } ctlbuf, err := d.readctl() if err != nil { return nil, fmt.Errorf("namedimage: %s", err) } pix, _ := ParsePix(string(ctlbuf[2*12 : 3*12])) image := &Image{ Display: d, id: id, Pix: pix, Depth: pix.Depth(), Repl: atoi(ctlbuf[3*12:]) > 0, R: ator(ctlbuf[4*12:]), Clipr: ator(ctlbuf[8*12:]), next: nil, Screen: nil, } return image, nil } // Close closes the Display. func (d *Display) Close() error { d.mu.Lock() defer d.mu.Unlock() return d.close() } func (d *Display) close() error { d.keyboardctl.fd.Close() d.keyboardctl.ctlfd.Close() d.mousectl.mfd.Close() d.mousectl.cfd.Close() ioutil.WriteFile(d.mtpt+"/wctl", []byte("delete"), 0666) d.conn.Close() d.ctl.Close() return nil } // TODO: drawerror func (d *Display) flushBuffer() error { if len(d.buf) == 0 { return nil } _, err := d.conn.Write(d.buf) d.buf = d.buf[:0] if err != nil { fmt.Fprintf(os.Stderr, "doflush: %s\n", err) return err } return nil } // Flush writes any pending data to the screen. func (d *Display) Flush() error { d.mu.Lock() defer d.mu.Unlock() return d.flush(true) } // flush data, maybe make visible func (d *Display) flush(vis bool) error { if vis { d.bufsize++ a := d.bufimage(1) d.bufsize-- a[0] = 'v' } return d.flushBuffer() } func (d *Display) bufimage(n int) []byte { if d == nil || n < 0 || n > d.bufsize { panic("bad count in bufimage") } if len(d.buf)+n > d.bufsize { if err := d.flushBuffer(); err != nil { panic("bufimage flush: " + err.Error()) } } i := len(d.buf) d.buf = d.buf[:i+n] return d.buf[i:] } const DefaultDPI = 133 // TODO: Document. func (d *Display) Scale(n int) int { if d == nil || d.DPI <= DefaultDPI { return n } return (n*d.DPI + DefaultDPI/2) / DefaultDPI } func atoi(b []byte) int { i := 0 for i < len(b) && b[i] == ' ' { i++ } n := 0 for ; i < len(b) && '0' <= b[i] && b[i] <= '9'; i++ { n = n*10 + int(b[i]) - '0' } return n } func atop(b []byte) image.Point { return image.Pt(atoi(b), atoi(b[12:])) } func ator(b []byte) image.Rectangle { return image.Rectangle{atop(b), atop(b[2*12:])} } func bplong(b []byte, n uint32) { binary.LittleEndian.PutUint32(b, n) } func bpshort(b []byte, n uint16) { binary.LittleEndian.PutUint16(b, n) } func (d *Display) HiDPI() bool { return d.DPI >= DefaultDPI*3/2 } func (d *Display) ScaleSize(n int) int { if d == nil || d.DPI <= DefaultDPI { return n } return (n*d.DPI + DefaultDPI/2) / DefaultDPI } func originwindow(i *Image, log, scr image.Point) error { d := i.Display err := d.flush(false) if err != nil { return err } b := d.bufimage(1 + 4 + 2*4 + 2*4) b[0] = 'o' bplong(b[1:], i.id) bplong(b[5:], uint32(log.X)) bplong(b[9:], uint32(log.Y)) bplong(b[13:], uint32(scr.X)) bplong(b[17:], uint32(scr.Y)) err = d.flush(false) if err != nil { return err } delta := log.Sub(i.R.Min) i.R = i.R.Add(delta) i.Clipr = i.Clipr.Add(delta) i.Origin = i.Origin.Sub(delta) return nil }
[ "\"DRAWDEBUG\"", "\"wsys\"", "\"font\"" ]
[]
[ "wsys", "DRAWDEBUG", "font" ]
[]
["wsys", "DRAWDEBUG", "font"]
go
3
0
integration_tests/vault_test.go
package integration import ( "os" "testing" "github.com/instructure-bridge/truss-cli/truss" . "github.com/smartystreets/goconvey/convey" ) func TestVault(t *testing.T) { Convey("Vault", t, func() { var auth truss.VaultAuth awsrole, ok := os.LookupEnv("TEST_AWS_ROLE") if ok { vaultrole := os.Getenv("TEST_VAULT_ROLE") auth = truss.VaultAuthAWS(vaultrole, awsrole, "us-east-1") } vault := truss.Vault("", auth) Convey("PortForward", func() { Convey("runs no errors", func() { port, err := vault.PortForward() So(err, ShouldBeNil) So(port, ShouldNotBeEmpty) port2, err := vault.PortForward() So(err, ShouldBeNil) So(port, ShouldEqual, port2) err = vault.ClosePortForward() So(err, ShouldBeNil) }) }) Convey("Run", func() { bytes, err := vault.Run([]string{"status"}) So(err, ShouldBeNil) So(bytes, ShouldNotBeEmpty) }) }) }
[ "\"TEST_VAULT_ROLE\"" ]
[]
[ "TEST_VAULT_ROLE" ]
[]
["TEST_VAULT_ROLE"]
go
1
0
src/main/java/com/artipie/management/api/CookiesAuthScheme.java
/* * The MIT License (MIT) Copyright (c) 2020-2021 artipie.com * https://github.com/artipie/management-api/LICENSE.txt */ package com.artipie.management.api; import com.artipie.http.auth.AuthScheme; import com.artipie.http.auth.Authentication; import com.artipie.http.rq.RqHeaders; import com.jcabi.log.Logger; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyFactory; import java.security.spec.KeySpec; import java.security.spec.PKCS8EncodedKeySpec; import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import javax.crypto.Cipher; import org.apache.commons.codec.DecoderException; import org.apache.commons.codec.binary.Hex; /** * API request cookies. * * @since 0.1 */ public final class CookiesAuthScheme implements AuthScheme { /** * Auth scheme name. */ private static final String SCHEME = "Cookie"; @Override public CompletionStage<Result> authenticate( final Iterable<Map.Entry<String, String>> headers, final String line ) { return CompletableFuture.completedFuture( CookiesAuthScheme.session( Optional.ofNullable( CookiesAuthScheme.cookies( new RqHeaders(headers, CookiesAuthScheme.SCHEME) ).get("session") ) ) ); } /** * Map of cookies. * * @param raw Raw strings of cookie headers * @return Cookies map */ private static Map<String, String> cookies(final Iterable<String> raw) { final Map<String, String> map = new HashMap<>(0); for (final String value : raw) { for (final String pair : value.split(";")) { final String[] parts = pair.split("=", 2); final String key = parts[0].trim().toLowerCase(Locale.US); if (parts.length > 1 && !parts[1].isEmpty()) { map.put(key, parts[1].trim()); } else { map.remove(key); } } } return map; } /** * Decode session id to user name. * <p> * Encoded session string is hex of user id encrypted with RSA public key. * See cipher and key spec format for more details. * </p> * * @param encoded Encoded string * @return User id */ private static Result session(final Optional<String> encoded) { final String env = System.getenv("ARTIPIE_SESSION_KEY"); final Optional<Authentication.User> user; if (env == null || encoded.isEmpty()) { user = Optional.empty(); } else { final byte[] key; try { key = Files.readAllBytes(Paths.get(env)); final KeySpec spec = new PKCS8EncodedKeySpec(key); final Cipher rsa = Cipher.getInstance("RSA/ECB/OAEPWithSHA1AndMGF1Padding"); rsa.init(Cipher.DECRYPT_MODE, KeyFactory.getInstance("RSA").generatePrivate(spec)); user = Optional.of( new Authentication.User( new String( rsa.doFinal(Hex.decodeHex(encoded.get().toCharArray())), StandardCharsets.UTF_8 ) ) ); } catch (final IOException | DecoderException | GeneralSecurityException err) { Logger.error( CookiesAuthScheme.class, "Failed to read session cookie: %[exception]s" ); throw new IllegalStateException("Failed to read session cookie", err); } } return new AuthScheme.Result() { @Override public Optional<Authentication.User> user() { return user; } @Override public String challenge() { return CookiesAuthScheme.SCHEME; } }; } }
[ "\"ARTIPIE_SESSION_KEY\"" ]
[]
[ "ARTIPIE_SESSION_KEY" ]
[]
["ARTIPIE_SESSION_KEY"]
java
1
0
sdk/go/arvados/fs_site_test.go
// Copyright (C) The Arvados Authors. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 package arvados import ( "net/http" "os" check "gopkg.in/check.v1" ) const ( // Importing arvadostest would be an import cycle, so these // fixtures are duplicated here [until fs moves to a separate // package]. fixtureActiveToken = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi" fixtureAProjectUUID = "zzzzz-j7d0g-v955i6s2oi1cbso" fixtureFooAndBarFilesInDirUUID = "zzzzz-4zz18-foonbarfilesdir" fixtureFooCollectionName = "zzzzz-4zz18-fy296fx3hot09f7 added sometime" fixtureFooCollectionPDH = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45" fixtureFooCollection = "zzzzz-4zz18-fy296fx3hot09f7" fixtureNonexistentCollection = "zzzzz-4zz18-totallynotexist" ) var _ = check.Suite(&SiteFSSuite{}) type SiteFSSuite struct { client *Client fs CustomFileSystem kc keepClient } func (s *SiteFSSuite) SetUpTest(c *check.C) { s.client = &Client{ APIHost: os.Getenv("ARVADOS_API_HOST"), AuthToken: fixtureActiveToken, Insecure: true, } s.kc = &keepClientStub{ blocks: map[string][]byte{ "3858f62230ac3c915f300c664312c63f": []byte("foobar"), }} s.fs = s.client.SiteFileSystem(s.kc) } func (s *SiteFSSuite) TestHttpFileSystemInterface(c *check.C) { _, ok := s.fs.(http.FileSystem) c.Check(ok, check.Equals, true) } func (s *SiteFSSuite) TestByIDEmpty(c *check.C) { f, err := s.fs.Open("/by_id") c.Assert(err, check.IsNil) fis, err := f.Readdir(-1) c.Check(err, check.IsNil) c.Check(len(fis), check.Equals, 0) } func (s *SiteFSSuite) TestByUUIDAndPDH(c *check.C) { f, err := s.fs.Open("/by_id") c.Assert(err, check.IsNil) fis, err := f.Readdir(-1) c.Check(err, check.IsNil) c.Check(len(fis), check.Equals, 0) err = s.fs.Mkdir("/by_id/"+fixtureFooCollection, 0755) c.Check(err, check.Equals, os.ErrExist) f, err = s.fs.Open("/by_id/" + fixtureNonexistentCollection) c.Assert(err, check.Equals, os.ErrNotExist) for _, path := range []string{ fixtureFooCollection, fixtureFooCollectionPDH, fixtureAProjectUUID + "/" + fixtureFooCollectionName, } { f, err = s.fs.Open("/by_id/" + path) c.Assert(err, check.IsNil) fis, err = f.Readdir(-1) var names []string for _, fi := range fis { names = append(names, fi.Name()) } c.Check(names, check.DeepEquals, []string{"foo"}) } f, err = s.fs.Open("/by_id/" + fixtureAProjectUUID + "/A Subproject/baz_file") c.Assert(err, check.IsNil) fis, err = f.Readdir(-1) var names []string for _, fi := range fis { names = append(names, fi.Name()) } c.Check(names, check.DeepEquals, []string{"baz"}) _, err = s.fs.OpenFile("/by_id/"+fixtureNonexistentCollection, os.O_RDWR|os.O_CREATE, 0755) c.Check(err, check.Equals, ErrInvalidOperation) err = s.fs.Rename("/by_id/"+fixtureFooCollection, "/by_id/beep") c.Check(err, check.Equals, ErrInvalidArgument) err = s.fs.Rename("/by_id/"+fixtureFooCollection+"/foo", "/by_id/beep") c.Check(err, check.Equals, ErrInvalidArgument) _, err = s.fs.Stat("/by_id/beep") c.Check(err, check.Equals, os.ErrNotExist) err = s.fs.Rename("/by_id/"+fixtureFooCollection+"/foo", "/by_id/"+fixtureFooCollection+"/bar") c.Check(err, check.IsNil) err = s.fs.Rename("/by_id", "/beep") c.Check(err, check.Equals, ErrInvalidArgument) }
[ "\"ARVADOS_API_HOST\"" ]
[]
[ "ARVADOS_API_HOST" ]
[]
["ARVADOS_API_HOST"]
go
1
0
auth_token.go
/* Copyright 2015 Home Office All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "os" "github.com/hashicorp/vault/api" ) // token authentication plugin type authTokenPlugin struct { // the vault client client *api.Client } // NewUserTokenPlugin creates a new User Token plugin func NewUserTokenPlugin(client *api.Client) AuthInterface { return &authTokenPlugin{ client: client, } } // Create retrieves the token from an environment variable or file func (r authTokenPlugin) Create(cfg *vaultAuthOptions) (string, error) { if cfg.FileName != "" { content, err := readConfigFile(cfg.FileName, cfg.FileFormat) if err != nil { return "", err } // check: ensure we have a token in the file token := content.Token if token == "" { return "", fmt.Errorf("the auth file: %s does not contain a token", cfg.FileName) } return token, nil } // step: check the VAULT_TOKEN if val := os.Getenv("VAULT_TOKEN"); val != "" { return val, nil } return "", fmt.Errorf("no token provided") }
[ "\"VAULT_TOKEN\"" ]
[]
[ "VAULT_TOKEN" ]
[]
["VAULT_TOKEN"]
go
1
0
healthcheck/go/src/check_health.go
package main import ( "encoding/json" "io/ioutil" "os" ) func main() { targetFolder := os.Getenv("SUMMARY_DIR") summaryJson, err := ioutil.ReadFile(targetFolder + "/all.json") check(err) var summary map[string]*HealthSummary err = json.Unmarshal([]byte(summaryJson), &summary) check(err) os.Exit(summary["all"].Status) }
[ "\"SUMMARY_DIR\"" ]
[]
[ "SUMMARY_DIR" ]
[]
["SUMMARY_DIR"]
go
1
0
orcsome/run.py
import sys import signal import os.path import logging import argparse from . import VERSION, ev from .wm import WM from .actions import Actions from .testwm import TestWM logger = logging.getLogger(__name__) def load_config(wm, config): import orcsome orcsome._wm = wm env = {} sys.path.insert(0, os.path.dirname(config)) try: exec(compile(open(config, "rb").read(), config, 'exec'), env) except: logger.exception('Error on loading %s' % config) sys.exit(1) finally: sys.path.pop(0) def check_config(config): wm = TestWM() wm.mix(Actions) import orcsome orcsome._wm = wm env = {} sys.path.insert(0, os.path.dirname(config)) try: exec(compile(open(config, "rb").read(), config, 'exec'), env) except: logger.exception('Config file check failed %s' % config) return False finally: sys.path.pop(0) return True def run(): parser = argparse.ArgumentParser(prog='%prog ' + VERSION) parser.add_argument('-l', '--log', dest='log', metavar='FILE', help='Path to log file (log to stdout by default)') parser.add_argument('--log-level', metavar='LOGLEVEL', default='INFO', help='log level, default is INFO') # config_dir = os.getenv('XDG_CONFIG_HOME', os.path.expanduser('~/.config')) # default_rcfile = os.path.join(config_dir, 'orcsome', 'rc.py') config_dir = os.path.split(os.path.realpath(__file__))[0] default_rcfile = os.path.join(config_dir, 'rc.py') parser.add_argument('-c', '--config', dest='config', metavar='FILE', default=default_rcfile, help='Path to config file (%(default)s)') args = parser.parse_args() if args.log: handler = logging.FileHandler(args.log) else: handler = logging.StreamHandler() root_logger = logging.getLogger() root_logger.setLevel(args.log_level) handler.setFormatter(logging.Formatter( "%(asctime)s %(name)s %(levelname)s: %(message)s")) root_logger.addHandler(handler) loop = ev.Loop() wm = WM(loop) wm.mix(Actions) def stop(l, w, e): wm.stop(True) loop.break_() sigint = ev.SignalWatcher(stop, signal.SIGINT) sigint.start(loop) def on_restart(): if check_config(args.config): wm.stop() logger.info('Restarting...') load_config(wm, args.config) wm.init() wm.restart_handler = on_restart load_config(wm, args.config) wm.init() loop.run()
[]
[]
[ "XDG_CONFIG_HOME" ]
[]
["XDG_CONFIG_HOME"]
python
1
0
javatests/com/google/turbine/lower/LowerIntegrationTest.java
/* * Copyright 2016 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.turbine.lower; import static com.google.common.truth.Truth.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.stream.Collectors.toList; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.io.ByteStreams; import java.io.IOError; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.List; import java.util.Map; import java.util.jar.JarEntry; import java.util.jar.JarOutputStream; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; @RunWith(Parameterized.class) public class LowerIntegrationTest { @Parameters(name = "{index}: {0}") public static Iterable<Object[]> parameters() { String[] testCases = { "abstractenum.test", "access1.test", "anonymous.test", "asset.test", "outerparam.test", "basic_field.test", "basic_nested.test", "bcp.test", "builder.test", "byte.test", "byte2.test", "circ_cvar.test", "clash.test", "ctorvis.test", "cvar_qualified.test", "cycle.test", "default_fbound.test", "default_rawfbound.test", "default_simple.test", "enum1.test", "enumctor.test", "enumctor2.test", "enumimpl.test", "enumingeneric.test", "enuminner.test", "enumint.test", "enumint2.test", "enumint3.test", "enumint_byte.test", "enumint_objectmethod.test", "enumint_objectmethod2.test", "enumint_objectmethod_raw.test", "enuminthacks.test", "enumstat.test", "erasurebound.test", "existingctor.test", "extend_inner.test", "extends_bound.test", "extends_otherbound.test", "extendsandimplements.test", "extrainnerclass.test", "fbound.test", "firstcomparator.test", "fuse.test", "genericarrayfield.test", "genericexn.test", "genericexn2.test", "genericret.test", "hierarchy.test", "ibound.test", "icu.test", "icu2.test", "importinner.test", "innerctor.test", "innerenum.test", "innerint.test", "innerstaticgeneric.test", "interfacemem.test", "interfaces.test", "lexical.test", "lexical2.test", "lexical4.test", "list.test", "loopthroughb.test", "mapentry.test", "member.test", "mods.test", "morefields.test", "moremethods.test", "multifield.test", "nested.test", "nested2.test", "one.test", "outer.test", "packageprivateprotectedinner.test", "param_bound.test", "privateinner.test", "proto.test", "proto2.test", "qual.test", "raw.test", "raw2.test", "rawfbound.test", "rek.test", "samepkg.test", "self.test", "semi.test", "simple.test", "simplemethod.test", "string.test", "superabstract.test", "supplierfunction.test", "tbound.test", "typaram.test", "tyvarfield.test", "useextend.test", "vanillaexception.test", "varargs.test", "wild.test", "bytenoncanon.test", "canon.test", "genericnoncanon.test", "genericnoncanon1.test", "genericnoncanon10.test", "genericnoncanon2.test", "genericnoncanon3.test", "genericnoncanon4.test", "genericnoncanon5.test", "genericnoncanon6.test", "genericnoncanon8.test", "genericnoncanon9.test", "genericnoncanon_byte.test", "genericnoncanon_method3.test", "noncanon.test", "rawcanon.test", "wildboundcanon.test", "wildcanon.test", "annoconstvis.test", "const_byte.test", "const_char.test", "const_field.test", "const_types.test", "const_underscore.test", "constlevel.test", "constpack.test", "importconst.test", "const.test", "const_all.test", "const_arith.test", "const_conditional.test", "const_moreexpr.test", "const_multi.test", "field_anno.test", "annotation_bool_default.test", "annotation_class_default.test", "annotation_declaration.test", "annotation_enum_default.test", "annotations_default.test", "annouse.test", "annouse10.test", "annouse11.test", "annouse12.test", "annouse13.test", "annouse14.test", "annouse15.test", "annouse16.test", "annouse17.test", "annouse2.test", "annouse3.test", "annouse4.test", "annouse5.test", "annouse6.test", "annouse7.test", "annouse8.test", "annouse9.test", "annovis.test", "complex_param_anno.test", "enummemberanno.test", "innerannodecl.test", "source_anno_retention.test", "anno_nested.test", "nested_member_import.test", "nested_member_import_noncanon.test", "unary.test", "hex_int.test", "const_conv.test", "bmethod.test", "prim_class.test", "wild2.test", "wild3.test", "const_hiding.test", "interface_field.test", "concat.test", "static_type_import.test", "non_const.test", "bounds.test", "cast_tail.test", "marker.test", "interface_method.test", "raw_canon.test", "float_exponent.test", "boxed_const.test", "package_info.test", "import_wild_order.test", "canon_recursive.test", // TODO(cushon): crashes ASM, see: // https://gitlab.ow2.org/asm/asm/issues/317776 // "canon_array.test", "java_lang_object.test", "visible_package.test", "visible_private.test", "visible_same_package.test", "private_member.test", "visible_nested.test", "visible_qualified.test", "ascii_sub.test", "bytecode_boolean_const.test", "tyvar_bound.test", "type_anno_hello.test", "type_anno_array_dims.test", "nonconst_unary_expression.test", "type_anno_ambiguous.test", "type_anno_ambiguous_param.test", "unicode.test", "annotation_scope.test", "visible_package_private_toplevel.test", "receiver_param.test", "static_member_type_import.test", "type_anno_qual.test", "array_class_literal.test", "underscore_literal.test", "c_array.test", "type_anno_retention.test", "member_import_clash.test", "anno_repeated.test", "long_expression.test", "const_nonfinal.test", "enum_abstract.test", "deficient_types_classfile.test", "ctor_anno.test", "anno_const_coerce.test", "const_octal_underscore.test", "const_boxed.test", "interface_member_public.test", "javadoc_deprecated.test", "strictfp.test", "type_anno_raw.test", "inner_static.test", "innerclassanno.test", "type_anno_parameter_index.test", "anno_const_scope.test", "type_anno_ambiguous_qualified.test", "type_anno_array_bound.test", "type_anno_return.test", "type_anno_order.test", "canon_class_header.test", "type_anno_receiver.test", "enum_final.test", "dollar.test", "typaram_lookup.test", "typaram_lookup_enclosing.test", "B33513475.test", "B33513475b.test", "B33513475c.test", "noncanon_static_wild.test", "B8075274.test", "B8148131.test", "B8056066.test", "B8056066b.test", "source_bootclasspath_order.test", "anno_self_const.test", "type_anno_cstyle_array_dims.test", "packagedecl.test", "static_member_type_import_recursive.test", "B70953542.test", // TODO(cushon): support for source level 9 in integration tests // "B74332665.test", "memberimport.test", "type_anno_c_array.test", // https://bugs.openjdk.java.net/browse/JDK-8054064 ? "shadow_inherited.test", "static_final_boxed.test", "anno_void.test", }; List<Object[]> tests = ImmutableList.copyOf(testCases).stream().map(x -> new Object[] {x}).collect(toList()); String testShardIndex = System.getenv("TEST_SHARD_INDEX"); String testTotalShards = System.getenv("TEST_TOTAL_SHARDS"); if (testShardIndex == null || testTotalShards == null) { return tests; } String shardFile = System.getenv("TEST_SHARD_STATUS_FILE"); if (shardFile != null) { try { Files.write(Paths.get(shardFile), new byte[0]); } catch (IOException e) { throw new IOError(e); } } int index = Integer.parseInt(testShardIndex); int shards = Integer.parseInt(testTotalShards); return Lists.partition(tests, (tests.size() + shards - 1) / shards).get(index); } final String test; public LowerIntegrationTest(String test) { this.test = test; } @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void test() throws Exception { IntegrationTestSupport.TestInput input = IntegrationTestSupport.TestInput.parse( new String( ByteStreams.toByteArray(getClass().getResourceAsStream("testdata/" + test)), UTF_8)); ImmutableList<Path> classpathJar = ImmutableList.of(); if (!input.classes.isEmpty()) { Map<String, byte[]> classpath = IntegrationTestSupport.runJavac(input.classes, ImmutableList.of()); Path lib = temporaryFolder.newFile("lib.jar").toPath(); try (JarOutputStream jos = new JarOutputStream(Files.newOutputStream(lib))) { for (Map.Entry<String, byte[]> entry : classpath.entrySet()) { jos.putNextEntry(new JarEntry(entry.getKey() + ".class")); jos.write(entry.getValue()); } } classpathJar = ImmutableList.of(lib); } Map<String, byte[]> expected = IntegrationTestSupport.runJavac(input.sources, classpathJar); Map<String, byte[]> actual = IntegrationTestSupport.runTurbine(input.sources, classpathJar); assertThat(IntegrationTestSupport.dump(IntegrationTestSupport.sortMembers(actual))) .isEqualTo(IntegrationTestSupport.dump(IntegrationTestSupport.canonicalize(expected))); } }
[ "\"TEST_SHARD_INDEX\"", "\"TEST_TOTAL_SHARDS\"", "\"TEST_SHARD_STATUS_FILE\"" ]
[]
[ "TEST_TOTAL_SHARDS", "TEST_SHARD_INDEX", "TEST_SHARD_STATUS_FILE" ]
[]
["TEST_TOTAL_SHARDS", "TEST_SHARD_INDEX", "TEST_SHARD_STATUS_FILE"]
java
3
0
examples/Kafka/blueprint.py
# Calm DSL for Kafka (2.5.0) on AHV import os from calm.dsl.builtins import * # no_qa # Get env variables # read_env() reads from .env file present in blueprint top-level # directory and returns a dict of blueprint env variables and os env variables. # If it does not exist, it returns a dict of os env present in os.environ. # Custom env file location can also be given with relpath param. # relpath will look for file relative to blueprint top-level directory. # Examples: # read_env() # read_env(relpath=".env2") # read_env(relpath="env/dev") ENV = read_env() CENTOS_USER = ENV.get("CENTOS_USER", "centos") CENTOS_IMAGE_SOURCE = ENV.get( "CENTOS_IMAGE_SOURCE", "http://download.nutanix.com/calm/CentOS-7-x86_64-1810.qcow2" ) CENTOS_SSH_PRIVATE_KEY_NAME = ENV.get("CENTOS_SSH_PRIVATE_KEY_NAME", "centos") CENTOS_SSH_PUBLIC_KEY_NAME = ENV.get("CENTOS_SSH_PUBLIC_KEY_NAME", "centos_pub") AHV_NIC_NAME = ENV.get("AHV_NIC_NAME", "vlan.0") AHV_MEM = ENV.get("AHV_MEM", "4") KAFKA_URL = ENV.get( "KAFKA_URL", "http://www-us.apache.org/dist/kafka/2.5.0/kafka_2.12-2.5.0.tgz" ) ZOOKEEPER_DATA_DIR = ENV.get("ZOOKEEPER_DATA_DIR", "/home/centos/zookeepeer/data/") KAFKA_LOG_DIRS = ENV.get("KAFKA_LOG_DIRS", "/var/log/kafka-logs") NUMBER_OF_PARTITION = ENV.get("NUMBER_OF_PARTITION", "2") NUMBER_OF_NODES = ENV.get("NUMBER_OF_NODES", "3") # SSH Credentials # read_local_file() reads file from .local folder. # If it does not exist, it reads from [LOCAL_DIR] location given in ~/.calm/init.ini. CENTOS_KEY = read_local_file(CENTOS_SSH_PRIVATE_KEY_NAME) CENTOS_PUBLIC_KEY = read_local_file(CENTOS_SSH_PUBLIC_KEY_NAME) CENTOS_CRED = basic_cred( CENTOS_USER, CENTOS_KEY, name="Centos", type="KEY", default=True, ) # OS Image details for VM CENTOS_PACKAGE = vm_disk_package( name="centos_disk", config={"image": {"source": CENTOS_IMAGE_SOURCE}}, ) class Kafka(Service): @action def __start__(): CalmTask.Exec.ssh(name="Start kafka", filename="scripts/Startkafka.sh") class KafkaAhvVmResources(AhvVmResources): memory = int(AHV_MEM) vCPUs = 2 cores_per_vCPU = 1 disks = [ AhvVmDisk.Disk.Scsi.cloneFromVMDiskPackage(CENTOS_PACKAGE, bootable=True), ] nics = [AhvVmNic.DirectNic.ingress(AHV_NIC_NAME)] guest_customization = AhvVmGC.CloudInit( config={ "users": [ { "name": CENTOS_USER, "ssh-authorized-keys": [CENTOS_PUBLIC_KEY], "sudo": ["ALL=(ALL) NOPASSWD:ALL"], } ] } ) class KafkaAhvVm(AhvVm): resources = KafkaAhvVmResources class KafkaAhvSubstrate(Substrate): provider_spec = KafkaAhvVm class KafkaPackage(Package): services = [ref(Kafka)] @action def __install__(): CalmTask.Exec.ssh(name="Install Java", filename="scripts/InstallJava.sh") CalmTask.Exec.ssh(name="Install Kafka", filename="scripts/InstallKafka.sh") CalmTask.Exec.ssh(name="Configure Kafka", filename="scripts/ConfigureKafka.sh") class KafkaAhvDeployment(Deployment): min_replicas = NUMBER_OF_NODES max_replicas = NUMBER_OF_NODES packages = [ref(KafkaPackage)] substrate = ref(KafkaAhvSubstrate) class Ahv(Profile): deployments = [KafkaAhvDeployment] KAFKA_URL = CalmVariable.Simple(KAFKA_URL) ZOOKEEPER_DATA_DIR = CalmVariable.Simple(ZOOKEEPER_DATA_DIR) KAFKA_LOG_DIRS = CalmVariable.Simple(KAFKA_LOG_DIRS) NUMBER_OF_PARTITIONS = CalmVariable.Simple(NUMBER_OF_PARTITION) class KafkaBlueprint(Blueprint): """Three node Kafka Cluster""" services = [Kafka] packages = [KafkaPackage, CENTOS_PACKAGE] substrates = [KafkaAhvSubstrate] profiles = [Ahv] credentials = [CENTOS_CRED]
[]
[]
[]
[]
[]
python
0
0
src/wattbuild.py
import itertools import json import logging import os import platform import shutil import subprocess import textwrap from argparse import ArgumentParser from pathlib import Path from subprocess import PIPE def main() -> None: parser = ArgumentParser() parser.add_argument('--toolchain') parser.add_argument('--proc-macro2-rev', nargs='?', default=None) parser.add_argument('build_dependencies', nargs='+') args = parser.parse_args() logger = logging.getLogger() env = os.environ.copy() if args.toolchain: rustup_exe = shutil.which('rustup') if rustup_exe is None: raise Exception('`rustup` not found') env['CARGO'] = subprocess.run( [rustup_exe, 'which', 'cargo', '--toolchain', args.toolchain], stdout=PIPE, check=True, ).stdout.decode() cargo_command = [rustup_exe, 'run', args.toolchain, 'cargo'] else: if Path(os.environ['CARGO']).stem != 'cargo': cargo_exe = str(Path(os.environ['CARGO']).with_stem('cargo')) if not Path(cargo_exe).exists(): which_cargo = shutil.which('cargo') if which_cargo is None: raise Exception('`cargo` not found') cargo_exe = which_cargo logger.warning(f'`{os.environ["CARGO"]}` → `{cargo_exe}`') env['CARGO'] = cargo_exe cargo_command = [env['CARGO']] workdir = cache_dir() / 'wattbuild' workdir.mkdir(parents=True, exist_ok=True) if args.proc_macro2_rev is None: rev = '' else: rev = f', rev = "{args.proc_macro2_rev}"' manifest = textwrap.dedent( f'''\ [workspace] [patch.crates-io] proc-macro2 = {{ git = "https://github.com/dtolnay/watt"{rev} }} [package] name = "wattbuild-build" version = "0.0.0" edition = "2018" [build-dependencies] ''' ) for i, value in enumerate(args.build_dependencies): manifest += f'_{i} = {value}\n' with open(workdir / 'Cargo.toml', 'w') as file: file.write(manifest) (workdir / 'src').mkdir(exist_ok=True) with open(workdir / 'src' / 'lib.rs', 'w') as file: file.write('') subprocess.run([*cargo_command, 'update'], cwd=workdir, env=env, check=True) metadata = json.loads(subprocess.run( [*cargo_command, 'metadata', '--format-version', '1'], stdout=PIPE, cwd=workdir, env=env, check=True, ).stdout.decode()) node = next(node for node in metadata['resolve']['nodes'] if node['id'] == metadata['resolve']['root']) build_dependencies = [package for package in metadata['packages'] if package['id'] in node['dependencies']] subprocess.run( [*cargo_command, 'build', '--release', *itertools.chain.from_iterable( ['-p', f'{package["name"]}:{package["version"]}'] for package in build_dependencies ), '--target', 'wasm32-unknown-unknown'], stdout=PIPE, cwd=workdir, env=env, check=True, ) for path in Path(metadata['target_directory'], 'wasm32-unknown-unknown', 'release').glob('*.wasm'): shutil.copy(path, os.environ['OUT_DIR']) def cache_dir() -> Path: system = platform.uname().system home = Path(os.path.expanduser('~')) if system == 'Windows': if 'APPDATA' in os.environ: return Path(os.environ['APPDATA'], 'Local') return home / 'AppData' / 'Local' if system == 'Darwin': return home / 'Library' / 'Caches' if 'XDG_CACHE_DIR' in os.environ: return Path(os.environ['XDG_CACHE_DIR']) return home / '.cache' if __name__ == '__main__': main()
[]
[]
[ "APPDATA", "XDG_CACHE_DIR", "OUT_DIR", "CARGO" ]
[]
["APPDATA", "XDG_CACHE_DIR", "OUT_DIR", "CARGO"]
python
4
0
api/server.go
package api import ( "fmt" "log" "os" "github.com/elton/go-jwt-api/api/controllers" "github.com/joho/godotenv" ) var server = controllers.Server{} func Run() { err := godotenv.Load() if err != nil { log.Fatalf("Error getting env, not comming through %v", err) } else { fmt.Println("We are getting the env values.") } server.Initialize(os.Getenv("DB_DRIVER"), os.Getenv("DB_USER"), os.Getenv("DB_PASSWORD"), os.Getenv("DB_HOST"), os.Getenv("DB_PORT"), os.Getenv("DB_NAME")) server.Run(":8080") }
[ "\"DB_DRIVER\"", "\"DB_USER\"", "\"DB_PASSWORD\"", "\"DB_HOST\"", "\"DB_PORT\"", "\"DB_NAME\"" ]
[]
[ "DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "DB_DRIVER", "DB_USER" ]
[]
["DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "DB_DRIVER", "DB_USER"]
go
6
0
providers/azure/aci.go
package azure import ( "bytes" "context" "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net" "net/http" "os" "reflect" "strings" "sync" "time" "github.com/gorilla/websocket" client "github.com/virtual-kubelet/azure-aci/client" "github.com/virtual-kubelet/azure-aci/client/aci" "github.com/virtual-kubelet/azure-aci/client/network" "github.com/virtual-kubelet/virtual-kubelet/log" "github.com/virtual-kubelet/virtual-kubelet/manager" "github.com/virtual-kubelet/virtual-kubelet/providers" "github.com/virtual-kubelet/virtual-kubelet/trace" v1 "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" clientcmdv1 "k8s.io/client-go/tools/clientcmd/api/v1" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" ) const ( // The service account secret mount path. serviceAccountSecretMountPath = "/var/run/secrets/kubernetes.io/serviceaccount" virtualKubeletDNSNameLabel = "virtualkubelet.io/dnsnamelabel" subnetsAction = "Microsoft.Network/virtualNetworks/subnets/action" subnetDelegationService = "Microsoft.ContainerInstance/containerGroups" ) // DNS configuration settings const ( maxDNSNameservers = 3 maxDNSSearchPaths = 6 maxDNSSearchListChars = 256 ) const ( gpuResourceName v1.ResourceName = "nvidia.com/gpu" gpuTypeAnnotation = "virtual-kubelet.io/gpu-type" ) // ACIProvider implements the virtual-kubelet provider interface and communicates with Azure's ACI APIs. type ACIProvider struct { aciClient *aci.Client resourceManager *manager.ResourceManager resourceGroup string region string nodeName string operatingSystem string cpu string memory string pods string gpu string gpuSKUs []aci.GPUSKU internalIP string daemonEndpointPort int32 diagnostics *aci.ContainerGroupDiagnostics subnetName string subnetCIDR string vnetName string vnetResourceGroup string networkProfile string kubeProxyExtension *aci.Extension kubeDNSIP string extraUserAgent string metricsSync sync.Mutex metricsSyncTime time.Time lastMetric *stats.Summary } // AuthConfig is the secret returned from an ImageRegistryCredential type AuthConfig struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` Auth string `json:"auth,omitempty"` Email string `json:"email,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` IdentityToken string `json:"identitytoken,omitempty"` RegistryToken string `json:"registrytoken,omitempty"` } // See https://azure.microsoft.com/en-us/status/ for valid regions. var validAciRegions = []string{ "australiaeast", "canadacentral", "centralindia", "centralus", "eastasia", "eastus", "eastus2", "eastus2euap", "japaneast", "northcentralus", "northeurope", "southcentralus", "southeastasia", "southindia", "uksouth", "westcentralus", "westus", "westus2", "westeurope", } // isValidACIRegion checks to make sure we're using a valid ACI region func isValidACIRegion(region string) bool { regionLower := strings.ToLower(region) regionTrimmed := strings.Replace(regionLower, " ", "", -1) for _, validRegion := range validAciRegions { if regionTrimmed == validRegion { return true } } return false } // NewACIProvider creates a new ACIProvider. func NewACIProvider(config string, rm *manager.ResourceManager, nodeName, operatingSystem string, internalIP string, daemonEndpointPort int32) (*ACIProvider, error) { var p ACIProvider var err error p.resourceManager = rm if config != "" { f, err := os.Open(config) if err != nil { return nil, err } defer f.Close() if err := p.loadConfig(f); err != nil { return nil, err } } var azAuth *client.Authentication if authFilepath := os.Getenv("AZURE_AUTH_LOCATION"); authFilepath != "" { auth, err := client.NewAuthenticationFromFile(authFilepath) if err != nil { return nil, err } azAuth = auth } if acsFilepath := os.Getenv("ACS_CREDENTIAL_LOCATION"); acsFilepath != "" { acsCredential, err := NewAcsCredential(acsFilepath) if err != nil { return nil, err } if acsCredential != nil { if acsCredential.Cloud != client.PublicCloud.Name { return nil, fmt.Errorf("ACI only supports Public Azure. '%v' is not supported", acsCredential.Cloud) } azAuth = client.NewAuthentication( acsCredential.Cloud, acsCredential.ClientID, acsCredential.ClientSecret, acsCredential.SubscriptionID, acsCredential.TenantID) p.resourceGroup = acsCredential.ResourceGroup p.region = acsCredential.Region p.vnetName = acsCredential.VNetName p.vnetResourceGroup = acsCredential.VNetResourceGroup if p.vnetResourceGroup == "" { p.vnetResourceGroup = p.resourceGroup } } } if clientID := os.Getenv("AZURE_CLIENT_ID"); clientID != "" { azAuth.ClientID = clientID } if clientSecret := os.Getenv("AZURE_CLIENT_SECRET"); clientSecret != "" { azAuth.ClientSecret = clientSecret } if tenantID := os.Getenv("AZURE_TENANT_ID"); tenantID != "" { azAuth.TenantID = tenantID } if subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID"); subscriptionID != "" { azAuth.SubscriptionID = subscriptionID } p.extraUserAgent = os.Getenv("ACI_EXTRA_USER_AGENT") p.aciClient, err = aci.NewClient(azAuth, p.extraUserAgent) if err != nil { return nil, err } // If the log analytics file has been specified, load workspace credentials from the file if logAnalyticsAuthFile := os.Getenv("LOG_ANALYTICS_AUTH_LOCATION"); logAnalyticsAuthFile != "" { p.diagnostics, err = aci.NewContainerGroupDiagnosticsFromFile(logAnalyticsAuthFile) if err != nil { return nil, err } } // If we have both the log analytics workspace id and key, add them to the provider // Environment variables overwrite the values provided in the file if logAnalyticsID := os.Getenv("LOG_ANALYTICS_ID"); logAnalyticsID != "" { if logAnalyticsKey := os.Getenv("LOG_ANALYTICS_KEY"); logAnalyticsKey != "" { p.diagnostics, err = aci.NewContainerGroupDiagnostics(logAnalyticsID, logAnalyticsKey) if err != nil { return nil, err } } } if clusterResourceID := os.Getenv("CLUSTER_RESOURCE_ID"); clusterResourceID != "" { if p.diagnostics != nil && p.diagnostics.LogAnalytics != nil { p.diagnostics.LogAnalytics.LogType = aci.LogAnlyticsLogTypeContainerInsights p.diagnostics.LogAnalytics.Metadata = map[string]string{ aci.LogAnalyticsMetadataKeyClusterResourceID: clusterResourceID, aci.LogAnalyticsMetadataKeyNodeName: nodeName, } } } if rg := os.Getenv("ACI_RESOURCE_GROUP"); rg != "" { p.resourceGroup = rg } if p.resourceGroup == "" { return nil, errors.New("Resource group can not be empty please set ACI_RESOURCE_GROUP") } if r := os.Getenv("ACI_REGION"); r != "" { p.region = r } if p.region == "" { return nil, errors.New("Region can not be empty please set ACI_REGION") } if r := p.region; !isValidACIRegion(r) { unsupportedRegionMessage := fmt.Sprintf("Region %s is invalid. Current supported regions are: %s", r, strings.Join(validAciRegions, ", ")) return nil, errors.New(unsupportedRegionMessage) } if err := p.setupCapacity(context.TODO()); err != nil { return nil, err } p.operatingSystem = operatingSystem p.nodeName = nodeName p.internalIP = internalIP p.daemonEndpointPort = daemonEndpointPort if subnetName := os.Getenv("ACI_SUBNET_NAME"); p.vnetName != "" && subnetName != "" { p.subnetName = subnetName } if subnetCIDR := os.Getenv("ACI_SUBNET_CIDR"); subnetCIDR != "" { if p.subnetName == "" { return nil, fmt.Errorf("subnet CIDR defined but no subnet name, subnet name is required to set a subnet CIDR") } if _, _, err := net.ParseCIDR(subnetCIDR); err != nil { return nil, fmt.Errorf("error parsing provided subnet range: %v", err) } p.subnetCIDR = subnetCIDR } if p.subnetName != "" { if err := p.setupNetworkProfile(azAuth); err != nil { return nil, fmt.Errorf("error setting up network profile: %v", err) } masterURI := os.Getenv("MASTER_URI") if masterURI == "" { masterURI = "10.0.0.1" } clusterCIDR := os.Getenv("CLUSTER_CIDR") if clusterCIDR == "" { clusterCIDR = "10.240.0.0/16" } p.kubeProxyExtension, err = getKubeProxyExtension(serviceAccountSecretMountPath, masterURI, clusterCIDR) if err != nil { return nil, fmt.Errorf("error creating kube proxy extension: %v", err) } p.kubeDNSIP = "10.0.0.10" if kubeDNSIP := os.Getenv("KUBE_DNS_IP"); kubeDNSIP != "" { p.kubeDNSIP = kubeDNSIP } } return &p, err } func (p *ACIProvider) setupCapacity(ctx context.Context) error { ctx, span := trace.StartSpan(ctx, "setupCapacity") defer span.End() logger := log.G(ctx).WithField("method", "setupCapacity") // Set sane defaults for Capacity in case config is not supplied p.cpu = "800" p.memory = "4Ti" p.pods = "800" if cpuQuota := os.Getenv("ACI_QUOTA_CPU"); cpuQuota != "" { p.cpu = cpuQuota } if memoryQuota := os.Getenv("ACI_QUOTA_MEMORY"); memoryQuota != "" { p.memory = memoryQuota } if podsQuota := os.Getenv("ACI_QUOTA_POD"); podsQuota != "" { p.pods = podsQuota } metadata, err := p.aciClient.GetResourceProviderMetadata(ctx) if err != nil { msg := "Unable to fetch the ACI metadata" logger.WithError(err).Error(msg) return err } if metadata == nil || metadata.GPURegionalSKUs == nil { logger.Warn("ACI GPU capacity is not enabled. GPU capacity will be disabled") return nil } for _, regionalSKU := range metadata.GPURegionalSKUs { if strings.EqualFold(regionalSKU.Location, p.region) && len(regionalSKU.SKUs) != 0 { p.gpu = "100" if gpu := os.Getenv("ACI_QUOTA_GPU"); gpu != "" { p.gpu = gpu } p.gpuSKUs = regionalSKU.SKUs } } return nil } func (p *ACIProvider) setupNetworkProfile(auth *client.Authentication) error { c, err := network.NewClient(auth, p.extraUserAgent) if err != nil { return fmt.Errorf("error creating azure networking client: %v", err) } createSubnet := true subnet, err := c.GetSubnet(p.vnetResourceGroup, p.vnetName, p.subnetName) if err != nil && !network.IsNotFound(err) { return fmt.Errorf("error while looking up subnet: %v", err) } if network.IsNotFound(err) && p.subnetCIDR == "" { return fmt.Errorf("subnet '%s' is not found in vnet '%s' in resource group '%s' and subnet CIDR is not specified", p.subnetName, p.vnetName, p.vnetResourceGroup) } if err == nil { if p.subnetCIDR == "" { p.subnetCIDR = *subnet.SubnetPropertiesFormat.AddressPrefix } if p.subnetCIDR != *subnet.SubnetPropertiesFormat.AddressPrefix { return fmt.Errorf("found subnet '%s' using different CIDR: '%s'. desired: '%s'", p.subnetName, *subnet.SubnetPropertiesFormat.AddressPrefix, p.subnetCIDR) } if subnet.SubnetPropertiesFormat.RouteTable != nil { return fmt.Errorf("unable to delegate subnet '%s' to Azure Container Instance since it references the route table '%s'.", p.subnetName, *subnet.SubnetPropertiesFormat.RouteTable.ID) } if subnet.SubnetPropertiesFormat.ServiceAssociationLinks != nil { for _, l := range *subnet.SubnetPropertiesFormat.ServiceAssociationLinks { if l.ServiceAssociationLinkPropertiesFormat != nil && *l.ServiceAssociationLinkPropertiesFormat.LinkedResourceType == subnetDelegationService { createSubnet = false break } return fmt.Errorf("unable to delegate subnet '%s' to Azure Container Instance as it is used by other Azure resource: '%v'.", p.subnetName, l) } } else { for _, d := range *subnet.SubnetPropertiesFormat.Delegations { if d.ServiceDelegationPropertiesFormat != nil && *d.ServiceDelegationPropertiesFormat.ServiceName == subnetDelegationService { createSubnet = false break } } } } if createSubnet { subnet = network.NewSubnetWithContainerInstanceDelegation(p.subnetName, p.subnetCIDR) subnet, err = c.CreateOrUpdateSubnet(p.vnetResourceGroup, p.vnetName, subnet) if err != nil { return fmt.Errorf("error creating subnet: %v", err) } } networkProfileName := getNetworkProfileName(*subnet.ID) profile, err := c.GetProfile(p.resourceGroup, networkProfileName) if err != nil && !network.IsNotFound(err) { return fmt.Errorf("error while looking up network profile: %v", err) } if err == nil { for _, config := range *profile.ProfilePropertiesFormat.ContainerNetworkInterfaceConfigurations { for _, ipConfig := range *config.ContainerNetworkInterfaceConfigurationPropertiesFormat.IPConfigurations { if *ipConfig.IPConfigurationProfilePropertiesFormat.Subnet.ID == *subnet.ID { p.networkProfile = *profile.ID return nil } } } } // at this point, profile should be nil profile = network.NewNetworkProfile(networkProfileName, p.region, *subnet.ID) profile, err = c.CreateOrUpdateProfile(p.resourceGroup, profile) if err != nil { return err } p.networkProfile = *profile.ID return nil } func getNetworkProfileName(subnetID string) string { h := sha256.New() h.Write([]byte(strings.ToUpper(subnetID))) hashBytes := h.Sum(nil) return fmt.Sprintf("vk-%s", hex.EncodeToString(hashBytes)) } func getKubeProxyExtension(secretPath, masterURI, clusterCIDR string) (*aci.Extension, error) { ca, err := ioutil.ReadFile(secretPath + "/ca.crt") if err != nil { return nil, fmt.Errorf("failed to read ca.crt file: %v", err) } var token []byte token, err = ioutil.ReadFile(secretPath + "/token") if err != nil { return nil, fmt.Errorf("failed to read token file: %v", err) } name := "virtual-kubelet" config := clientcmdv1.Config{ APIVersion: "v1", Kind: "Config", Clusters: []clientcmdv1.NamedCluster{ clientcmdv1.NamedCluster{ Name: name, Cluster: clientcmdv1.Cluster{ Server: masterURI, CertificateAuthorityData: ca, }, }, }, AuthInfos: []clientcmdv1.NamedAuthInfo{ clientcmdv1.NamedAuthInfo{ Name: name, AuthInfo: clientcmdv1.AuthInfo{ Token: string(token), }, }, }, Contexts: []clientcmdv1.NamedContext{ clientcmdv1.NamedContext{ Name: name, Context: clientcmdv1.Context{ Cluster: name, AuthInfo: name, }, }, }, CurrentContext: name, } b := new(bytes.Buffer) if err := json.NewEncoder(b).Encode(config); err != nil { return nil, fmt.Errorf("failed to encode the kubeconfig: %v", err) } extension := aci.Extension{ Name: "kube-proxy", Properties: &aci.ExtensionProperties{ Type: aci.ExtensionTypeKubeProxy, Version: aci.ExtensionVersion1_0, Settings: map[string]string{ aci.KubeProxyExtensionSettingClusterCIDR: clusterCIDR, aci.KubeProxyExtensionSettingKubeVersion: aci.KubeProxyExtensionKubeVersion, }, ProtectedSettings: map[string]string{ aci.KubeProxyExtensionSettingKubeConfig: base64.StdEncoding.EncodeToString(b.Bytes()), }, }, } return &extension, nil } func addAzureAttributes(ctx context.Context, span trace.Span, p *ACIProvider) context.Context { return span.WithFields(ctx, log.Fields{ "azure.resourceGroup": p.resourceGroup, "azure.region": p.region, }) } // CreatePod accepts a Pod definition and creates // an ACI deployment func (p *ACIProvider) CreatePod(ctx context.Context, pod *v1.Pod) error { ctx, span := trace.StartSpan(ctx, "aci.CreatePod") defer span.End() ctx = addAzureAttributes(ctx, span, p) var containerGroup aci.ContainerGroup containerGroup.Location = p.region containerGroup.RestartPolicy = aci.ContainerGroupRestartPolicy(pod.Spec.RestartPolicy) containerGroup.ContainerGroupProperties.OsType = aci.OperatingSystemTypes(p.OperatingSystem()) // get containers containers, err := p.getContainers(pod) if err != nil { return err } // get registry creds creds, err := p.getImagePullSecrets(pod) if err != nil { return err } // get volumes volumes, err := p.getVolumes(pod) if err != nil { return err } // assign all the things containerGroup.ContainerGroupProperties.Containers = containers containerGroup.ContainerGroupProperties.Volumes = volumes containerGroup.ContainerGroupProperties.ImageRegistryCredentials = creds containerGroup.ContainerGroupProperties.Diagnostics = p.getDiagnostics(pod) filterServiceAccountSecretVolume(p.operatingSystem, &containerGroup) // create ipaddress if containerPort is used count := 0 for _, container := range containers { count = count + len(container.Ports) } ports := make([]aci.Port, 0, count) for _, container := range containers { for _, containerPort := range container.Ports { ports = append(ports, aci.Port{ Port: containerPort.Port, Protocol: aci.ContainerGroupNetworkProtocol("TCP"), }) } } if len(ports) > 0 && p.subnetName == "" { containerGroup.ContainerGroupProperties.IPAddress = &aci.IPAddress{ Ports: ports, Type: "Public", } if dnsNameLabel := pod.Annotations[virtualKubeletDNSNameLabel]; dnsNameLabel != "" { containerGroup.ContainerGroupProperties.IPAddress.DNSNameLabel = dnsNameLabel } } podUID := string(pod.UID) podCreationTimestamp := pod.CreationTimestamp.String() containerGroup.Tags = map[string]string{ "PodName": pod.Name, "ClusterName": pod.ClusterName, "NodeName": pod.Spec.NodeName, "Namespace": pod.Namespace, "UID": podUID, "CreationTimestamp": podCreationTimestamp, } p.amendVnetResources(&containerGroup, pod) _, err = p.aciClient.CreateContainerGroup( ctx, p.resourceGroup, containerGroupName(pod), containerGroup, ) return err } func (p *ACIProvider) amendVnetResources(containerGroup *aci.ContainerGroup, pod *v1.Pod) { if p.networkProfile == "" { return } containerGroup.NetworkProfile = &aci.NetworkProfileDefinition{ID: p.networkProfile} containerGroup.ContainerGroupProperties.Extensions = []*aci.Extension{p.kubeProxyExtension} containerGroup.ContainerGroupProperties.DNSConfig = p.getDNSConfig(pod.Spec.DNSPolicy, pod.Spec.DNSConfig) } func (p *ACIProvider) getDNSConfig(dnsPolicy v1.DNSPolicy, dnsConfig *v1.PodDNSConfig) *aci.DNSConfig { nameServers := make([]string, 0) if dnsPolicy == v1.DNSClusterFirst || dnsPolicy == v1.DNSClusterFirstWithHostNet { nameServers = append(nameServers, p.kubeDNSIP) } searchDomains := []string{} options := []string{} if dnsConfig != nil { nameServers = omitDuplicates(append(nameServers, dnsConfig.Nameservers...)) searchDomains = omitDuplicates(dnsConfig.Searches) for _, option := range dnsConfig.Options { op := option.Name if option.Value != nil && *(option.Value) != "" { op = op + ":" + *(option.Value) } options = append(options, op) } } if len(nameServers) == 0 { return nil } result := aci.DNSConfig{ NameServers: formDNSNameserversFitsLimits(nameServers), SearchDomains: formDNSSearchFitsLimits(searchDomains), Options: strings.Join(options, " "), } return &result } func omitDuplicates(strs []string) []string { uniqueStrs := make(map[string]bool) var ret []string for _, str := range strs { if !uniqueStrs[str] { ret = append(ret, str) uniqueStrs[str] = true } } return ret } func formDNSNameserversFitsLimits(nameservers []string) []string { if len(nameservers) > maxDNSNameservers { nameservers = nameservers[:maxDNSNameservers] msg := fmt.Sprintf("Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: %s", strings.Join(nameservers, ";")) log.G(context.TODO()).WithField("method", "formDNSNameserversFitsLimits").Warn(msg) } return nameservers } func formDNSSearchFitsLimits(searches []string) string { limitsExceeded := false if len(searches) > maxDNSSearchPaths { searches = searches[:maxDNSSearchPaths] limitsExceeded = true } if resolvSearchLineStrLen := len(strings.Join(searches, " ")); resolvSearchLineStrLen > maxDNSSearchListChars { cutDomainsNum := 0 cutDomainsLen := 0 for i := len(searches) - 1; i >= 0; i-- { cutDomainsLen += len(searches[i]) + 1 cutDomainsNum++ if (resolvSearchLineStrLen - cutDomainsLen) <= maxDNSSearchListChars { break } } searches = searches[:(len(searches) - cutDomainsNum)] limitsExceeded = true } if limitsExceeded { msg := fmt.Sprintf("Search Line limits were exceeded, some search paths have been omitted, the applied search line is: %s", strings.Join(searches, ";")) log.G(context.TODO()).WithField("method", "formDNSSearchFitsLimits").Warn(msg) } return strings.Join(searches, " ") } func (p *ACIProvider) getDiagnostics(pod *v1.Pod) *aci.ContainerGroupDiagnostics { if p.diagnostics != nil && p.diagnostics.LogAnalytics != nil && p.diagnostics.LogAnalytics.LogType == aci.LogAnlyticsLogTypeContainerInsights { d := *p.diagnostics d.LogAnalytics.Metadata[aci.LogAnalyticsMetadataKeyPodUUID] = string(pod.ObjectMeta.UID) return &d } return p.diagnostics } func containerGroupName(pod *v1.Pod) string { return fmt.Sprintf("%s-%s", pod.Namespace, pod.Name) } // UpdatePod is a noop, ACI currently does not support live updates of a pod. func (p *ACIProvider) UpdatePod(ctx context.Context, pod *v1.Pod) error { return nil } // DeletePod deletes the specified pod out of ACI. func (p *ACIProvider) DeletePod(ctx context.Context, pod *v1.Pod) error { ctx, span := trace.StartSpan(ctx, "aci.DeletePod") defer span.End() ctx = addAzureAttributes(ctx, span, p) err := p.aciClient.DeleteContainerGroup(ctx, p.resourceGroup, fmt.Sprintf("%s-%s", pod.Namespace, pod.Name)) return wrapError(err) } // GetPod returns a pod by name that is running inside ACI // returns nil if a pod by that name is not found. func (p *ACIProvider) GetPod(ctx context.Context, namespace, name string) (*v1.Pod, error) { ctx, span := trace.StartSpan(ctx, "aci.GetPod") defer span.End() ctx = addAzureAttributes(ctx, span, p) cg, status, err := p.aciClient.GetContainerGroup(ctx, p.resourceGroup, fmt.Sprintf("%s-%s", namespace, name)) if err != nil { if status != nil && *status == http.StatusNotFound { return nil, nil } return nil, err } if cg.Tags["NodeName"] != p.nodeName { return nil, nil } return containerGroupToPod(cg) } // GetContainerLogs returns the logs of a pod by name that is running inside ACI. func (p *ACIProvider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, tail int) (string, error) { ctx, span := trace.StartSpan(ctx, "aci.GetContainerLogs") defer span.End() ctx = addAzureAttributes(ctx, span, p) logContent := "" cg, _, err := p.aciClient.GetContainerGroup(ctx, p.resourceGroup, fmt.Sprintf("%s-%s", namespace, podName)) if err != nil { return logContent, err } if cg.Tags["NodeName"] != p.nodeName { return logContent, nil } // get logs from cg retry := 10 var retries int for retries = 0; retries < retry; retries++ { cLogs, err := p.aciClient.GetContainerLogs(ctx, p.resourceGroup, cg.Name, containerName, tail) if err != nil { log.G(ctx).WithField("method", "GetContainerLogs").WithError(err).Debug("Error getting container logs, retrying") time.Sleep(5000 * time.Millisecond) } else { logContent = cLogs.Content break } } return logContent, err } // GetPodFullName as defined in the provider context func (p *ACIProvider) GetPodFullName(namespace string, pod string) string { return fmt.Sprintf("%s-%s", namespace, pod) } // RunInContainer executes a command in a container in the pod, copying data // between in/out/err and the container's stdin/stdout/stderr. func (p *ACIProvider) RunInContainer(ctx context.Context, namespace, name, container string, cmd []string, attach providers.AttachIO) error { out := attach.Stdout() if out != nil { defer out.Close() } cg, _, err := p.aciClient.GetContainerGroup(ctx, p.resourceGroup, p.GetPodFullName(namespace, name)) if err != nil { return err } // Set default terminal size size := providers.TermSize{ Height: 60, Width: 120, } resize := attach.Resize() if resize != nil { select { case size = <-resize: case <-ctx.Done(): return ctx.Err() } } ts := aci.TerminalSizeRequest{Height: int(size.Height), Width: int(size.Width)} xcrsp, err := p.aciClient.LaunchExec(p.resourceGroup, cg.Name, container, cmd[0], ts) if err != nil { return err } wsURI := xcrsp.WebSocketURI password := xcrsp.Password c, _, _ := websocket.DefaultDialer.Dial(wsURI, nil) c.WriteMessage(websocket.TextMessage, []byte(password)) // Websocket password needs to be sent before WS terminal is active // Cleanup on exit defer c.Close() in := attach.Stdin() if in != nil { go func() { for { select { case <-ctx.Done(): return default: } var msg = make([]byte, 512) n, err := in.Read(msg) if err == io.EOF { // Handle EOF } if err != nil { // Handle errors return } if n > 0 { // Only call WriteMessage if there is data to send c.WriteMessage(websocket.BinaryMessage, msg[:n]) } } }() } if out != nil { for { select { case <-ctx.Done(): break default: } _, cr, err := c.NextReader() if err != nil { // Handle errors break } io.Copy(out, cr) } } return ctx.Err() } // GetPodStatus returns the status of a pod by name that is running inside ACI // returns nil if a pod by that name is not found. func (p *ACIProvider) GetPodStatus(ctx context.Context, namespace, name string) (*v1.PodStatus, error) { ctx, span := trace.StartSpan(ctx, "aci.GetPodStatus") defer span.End() ctx = addAzureAttributes(ctx, span, p) pod, err := p.GetPod(ctx, namespace, name) if err != nil { return nil, err } if pod == nil { return nil, nil } return &pod.Status, nil } // GetPods returns a list of all pods known to be running within ACI. func (p *ACIProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) { ctx, span := trace.StartSpan(ctx, "aci.GetPods") defer span.End() ctx = addAzureAttributes(ctx, span, p) cgs, err := p.aciClient.ListContainerGroups(ctx, p.resourceGroup) if err != nil { return nil, err } pods := make([]*v1.Pod, 0, len(cgs.Value)) for _, cg := range cgs.Value { c := cg if cg.Tags["NodeName"] != p.nodeName { continue } p, err := containerGroupToPod(&c) if err != nil { log.G(ctx).WithFields(log.Fields{ "name": c.Name, "id": c.ID, }).WithError(err).Error("error converting container group to pod") continue } pods = append(pods, p) } return pods, nil } // Capacity returns a resource list containing the capacity limits set for ACI. func (p *ACIProvider) Capacity(ctx context.Context) v1.ResourceList { resourceList := v1.ResourceList{ v1.ResourceCPU: resource.MustParse(p.cpu), v1.ResourceMemory: resource.MustParse(p.memory), v1.ResourcePods: resource.MustParse(p.pods), } if p.gpu != "" { resourceList[gpuResourceName] = resource.MustParse(p.gpu) } return resourceList } // NodeConditions returns a list of conditions (Ready, OutOfDisk, etc), for updates to the node status // within Kubernetes. func (p *ACIProvider) NodeConditions(ctx context.Context) []v1.NodeCondition { // TODO: Make these dynamic and augment with custom ACI specific conditions of interest return []v1.NodeCondition{ { Type: "Ready", Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletReady", Message: "kubelet is ready.", }, { Type: "OutOfDisk", Status: v1.ConditionFalse, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletHasSufficientDisk", Message: "kubelet has sufficient disk space available", }, { Type: "MemoryPressure", Status: v1.ConditionFalse, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletHasSufficientMemory", Message: "kubelet has sufficient memory available", }, { Type: "DiskPressure", Status: v1.ConditionFalse, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletHasNoDiskPressure", Message: "kubelet has no disk pressure", }, { Type: "NetworkUnavailable", Status: v1.ConditionFalse, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "RouteCreated", Message: "RouteController created a route", }, } } // NodeAddresses returns a list of addresses for the node status // within Kubernetes. func (p *ACIProvider) NodeAddresses(ctx context.Context) []v1.NodeAddress { // TODO: Make these dynamic and augment with custom ACI specific conditions of interest return []v1.NodeAddress{ { Type: "InternalIP", Address: p.internalIP, }, } } // NodeDaemonEndpoints returns NodeDaemonEndpoints for the node status // within Kubernetes. func (p *ACIProvider) NodeDaemonEndpoints(ctx context.Context) *v1.NodeDaemonEndpoints { return &v1.NodeDaemonEndpoints{ KubeletEndpoint: v1.DaemonEndpoint{ Port: p.daemonEndpointPort, }, } } // OperatingSystem returns the operating system that was provided by the config. func (p *ACIProvider) OperatingSystem() string { return p.operatingSystem } func (p *ACIProvider) getImagePullSecrets(pod *v1.Pod) ([]aci.ImageRegistryCredential, error) { ips := make([]aci.ImageRegistryCredential, 0, len(pod.Spec.ImagePullSecrets)) for _, ref := range pod.Spec.ImagePullSecrets { secret, err := p.resourceManager.GetSecret(ref.Name, pod.Namespace) if err != nil { return ips, err } if secret == nil { return nil, fmt.Errorf("error getting image pull secret") } // TODO: Check if secret type is v1.SecretTypeDockercfg and use DockerConfigKey instead of hardcoded value // TODO: Check if secret type is v1.SecretTypeDockerConfigJson and use DockerConfigJsonKey to determine if it's in json format // TODO: Return error if it's not one of these two types switch secret.Type { case v1.SecretTypeDockercfg: ips, err = readDockerCfgSecret(secret, ips) case v1.SecretTypeDockerConfigJson: ips, err = readDockerConfigJSONSecret(secret, ips) default: return nil, fmt.Errorf("image pull secret type is not one of kubernetes.io/dockercfg or kubernetes.io/dockerconfigjson") } if err != nil { return ips, err } } return ips, nil } func makeRegistryCredential(server string, authConfig AuthConfig) (*aci.ImageRegistryCredential, error) { username := authConfig.Username password := authConfig.Password if username == "" { if authConfig.Auth == "" { return nil, fmt.Errorf("no username present in auth config for server: %s", server) } decoded, err := base64.StdEncoding.DecodeString(authConfig.Auth) if err != nil { return nil, fmt.Errorf("error decoding the auth for server: %s Error: %v", server, err) } parts := strings.Split(string(decoded), ":") if len(parts) != 2 { return nil, fmt.Errorf("malformed auth for server: %s", server) } username = parts[0] password = parts[1] } cred := aci.ImageRegistryCredential{ Server: server, Username: username, Password: password, } return &cred, nil } func readDockerCfgSecret(secret *v1.Secret, ips []aci.ImageRegistryCredential) ([]aci.ImageRegistryCredential, error) { var err error var authConfigs map[string]AuthConfig repoData, ok := secret.Data[string(v1.DockerConfigKey)] if !ok { return ips, fmt.Errorf("no dockercfg present in secret") } err = json.Unmarshal(repoData, &authConfigs) if err != nil { return ips, err } for server := range authConfigs { cred, err := makeRegistryCredential(server, authConfigs[server]) if err != nil { return ips, err } ips = append(ips, *cred) } return ips, err } func readDockerConfigJSONSecret(secret *v1.Secret, ips []aci.ImageRegistryCredential) ([]aci.ImageRegistryCredential, error) { var err error repoData, ok := secret.Data[string(v1.DockerConfigJsonKey)] if !ok { return ips, fmt.Errorf("no dockerconfigjson present in secret") } var authConfigs map[string]map[string]AuthConfig err = json.Unmarshal(repoData, &authConfigs) if err != nil { return ips, err } auths, ok := authConfigs["auths"] if !ok { return ips, fmt.Errorf("malformed dockerconfigjson in secret") } for server := range auths { cred, err := makeRegistryCredential(server, auths[server]) if err != nil { return ips, err } ips = append(ips, *cred) } return ips, err } func (p *ACIProvider) getContainers(pod *v1.Pod) ([]aci.Container, error) { containers := make([]aci.Container, 0, len(pod.Spec.Containers)) for _, container := range pod.Spec.Containers { c := aci.Container{ Name: container.Name, ContainerProperties: aci.ContainerProperties{ Image: container.Image, Command: append(container.Command, container.Args...), Ports: make([]aci.ContainerPort, 0, len(container.Ports)), }, } for _, p := range container.Ports { c.Ports = append(c.Ports, aci.ContainerPort{ Port: p.ContainerPort, Protocol: getProtocol(p.Protocol), }) } c.VolumeMounts = make([]aci.VolumeMount, 0, len(container.VolumeMounts)) for _, v := range container.VolumeMounts { c.VolumeMounts = append(c.VolumeMounts, aci.VolumeMount{ Name: v.Name, MountPath: v.MountPath, ReadOnly: v.ReadOnly, }) } c.EnvironmentVariables = make([]aci.EnvironmentVariable, 0, len(container.Env)) for _, e := range container.Env { if e.Value != "" { envVar := getACIEnvVar(e) c.EnvironmentVariables = append(c.EnvironmentVariables, envVar) } } // NOTE(robbiezhang): ACI CPU request must be times of 10m cpuRequest := 1.00 if _, ok := container.Resources.Requests[v1.ResourceCPU]; ok { cpuRequest = float64(container.Resources.Requests.Cpu().MilliValue()/10.00) / 100.00 if cpuRequest < 0.01 { cpuRequest = 0.01 } } // NOTE(robbiezhang): ACI memory request must be times of 0.1 GB memoryRequest := 1.50 if _, ok := container.Resources.Requests[v1.ResourceMemory]; ok { memoryRequest = float64(container.Resources.Requests.Memory().Value()/100000000.00) / 10.00 if memoryRequest < 0.10 { memoryRequest = 0.10 } } c.Resources = aci.ResourceRequirements{ Requests: &aci.ComputeResources{ CPU: cpuRequest, MemoryInGB: memoryRequest, }, } if container.Resources.Limits != nil { cpuLimit := cpuRequest if _, ok := container.Resources.Limits[v1.ResourceCPU]; ok { cpuLimit = float64(container.Resources.Limits.Cpu().MilliValue()) / 1000.00 } memoryLimit := memoryRequest if _, ok := container.Resources.Limits[v1.ResourceMemory]; ok { memoryLimit = float64(container.Resources.Limits.Memory().Value()) / 1000000000.00 } c.Resources.Limits = &aci.ComputeResources{ CPU: cpuLimit, MemoryInGB: memoryLimit, } if gpu, ok := container.Resources.Limits[gpuResourceName]; ok { sku, err := p.getGPUSKU(pod) if err != nil { return nil, err } if gpu.Value() == 0 { return nil, errors.New("GPU must be a integer number") } gpuResource := &aci.GPUResource{ Count: int32(gpu.Value()), SKU: sku, } c.Resources.Requests.GPU = gpuResource c.Resources.Limits.GPU = gpuResource } } if container.LivenessProbe != nil { probe, err := getProbe(container.LivenessProbe, container.Ports) if err != nil { return nil, err } c.LivenessProbe = probe } if container.ReadinessProbe != nil { probe, err := getProbe(container.ReadinessProbe, container.Ports) if err != nil { return nil, err } c.ReadinessProbe = probe } containers = append(containers, c) } return containers, nil } func (p *ACIProvider) getGPUSKU(pod *v1.Pod) (aci.GPUSKU, error) { if len(p.gpuSKUs) == 0 { return "", fmt.Errorf("The pod requires GPU resource, but ACI doesn't provide GPU enabled container group in region %s", p.region) } if desiredSKU, ok := pod.Annotations[gpuTypeAnnotation]; ok { for _, supportedSKU := range p.gpuSKUs { if strings.EqualFold(string(desiredSKU), string(supportedSKU)) { return supportedSKU, nil } } return "", fmt.Errorf("The pod requires GPU SKU %s, but ACI only supports SKUs %v in region %s", desiredSKU, p.region, p.gpuSKUs) } return p.gpuSKUs[0], nil } func getProbe(probe *v1.Probe, ports []v1.ContainerPort) (*aci.ContainerProbe, error) { if probe.Handler.Exec != nil && probe.Handler.HTTPGet != nil { return nil, fmt.Errorf("probe may not specify more than one of \"exec\" and \"httpGet\"") } if probe.Handler.Exec == nil && probe.Handler.HTTPGet == nil { return nil, fmt.Errorf("probe must specify one of \"exec\" and \"httpGet\"") } // Probes have can have a Exec or HTTP Get Handler. // Create those if they exist, then add to the // ContainerProbe struct var exec *aci.ContainerExecProbe if probe.Handler.Exec != nil { exec = &aci.ContainerExecProbe{ Command: probe.Handler.Exec.Command, } } var httpGET *aci.ContainerHTTPGetProbe if probe.Handler.HTTPGet != nil { var portValue int port := probe.Handler.HTTPGet.Port switch port.Type { case intstr.Int: portValue = port.IntValue() case intstr.String: portName := port.String() for _, p := range ports { if portName == p.Name { portValue = int(p.ContainerPort) break } } if portValue == 0 { return nil, fmt.Errorf("unable to find named port: %s", portName) } } httpGET = &aci.ContainerHTTPGetProbe{ Port: portValue, Path: probe.Handler.HTTPGet.Path, Scheme: string(probe.Handler.HTTPGet.Scheme), } } return &aci.ContainerProbe{ Exec: exec, HTTPGet: httpGET, InitialDelaySeconds: probe.InitialDelaySeconds, Period: probe.PeriodSeconds, FailureThreshold: probe.FailureThreshold, SuccessThreshold: probe.SuccessThreshold, TimeoutSeconds: probe.TimeoutSeconds, }, nil } func (p *ACIProvider) getVolumes(pod *v1.Pod) ([]aci.Volume, error) { volumes := make([]aci.Volume, 0, len(pod.Spec.Volumes)) for _, v := range pod.Spec.Volumes { // Handle the case for the AzureFile volume. if v.AzureFile != nil { secret, err := p.resourceManager.GetSecret(v.AzureFile.SecretName, pod.Namespace) if err != nil { return volumes, err } if secret == nil { return nil, fmt.Errorf("Getting secret for AzureFile volume returned an empty secret") } volumes = append(volumes, aci.Volume{ Name: v.Name, AzureFile: &aci.AzureFileVolume{ ShareName: v.AzureFile.ShareName, ReadOnly: v.AzureFile.ReadOnly, StorageAccountName: string(secret.Data["azurestorageaccountname"]), StorageAccountKey: string(secret.Data["azurestorageaccountkey"]), }, }) continue } // Handle the case for the EmptyDir. if v.EmptyDir != nil { volumes = append(volumes, aci.Volume{ Name: v.Name, EmptyDir: map[string]interface{}{}, }) continue } // Handle the case for GitRepo volume. if v.GitRepo != nil { volumes = append(volumes, aci.Volume{ Name: v.Name, GitRepo: &aci.GitRepoVolume{ Directory: v.GitRepo.Directory, Repository: v.GitRepo.Repository, Revision: v.GitRepo.Revision, }, }) continue } // Handle the case for Secret volume. if v.Secret != nil { paths := make(map[string]string) secret, err := p.resourceManager.GetSecret(v.Secret.SecretName, pod.Namespace) if v.Secret.Optional != nil && !*v.Secret.Optional && k8serr.IsNotFound(err) { return nil, fmt.Errorf("Secret %s is required by Pod %s and does not exist", v.Secret.SecretName, pod.Name) } if secret == nil { continue } for k, v := range secret.Data { paths[k] = base64.StdEncoding.EncodeToString(v) } if len(paths) != 0 { volumes = append(volumes, aci.Volume{ Name: v.Name, Secret: paths, }) } continue } // Handle the case for ConfigMap volume. if v.ConfigMap != nil { paths := make(map[string]string) configMap, err := p.resourceManager.GetConfigMap(v.ConfigMap.Name, pod.Namespace) if v.ConfigMap.Optional != nil && !*v.ConfigMap.Optional && k8serr.IsNotFound(err) { return nil, fmt.Errorf("ConfigMap %s is required by Pod %s and does not exist", v.ConfigMap.Name, pod.Name) } if configMap == nil { continue } for k, v := range configMap.BinaryData { paths[k] = base64.StdEncoding.EncodeToString(v) } if len(paths) != 0 { volumes = append(volumes, aci.Volume{ Name: v.Name, Secret: paths, }) } continue } // If we've made it this far we have found a volume type that isn't supported return nil, fmt.Errorf("Pod %s requires volume %s which is of an unsupported type", pod.Name, v.Name) } return volumes, nil } func getProtocol(pro v1.Protocol) aci.ContainerNetworkProtocol { switch pro { case v1.ProtocolUDP: return aci.ContainerNetworkProtocolUDP default: return aci.ContainerNetworkProtocolTCP } } func containerGroupToPod(cg *aci.ContainerGroup) (*v1.Pod, error) { var podCreationTimestamp metav1.Time if cg.Tags["CreationTimestamp"] != "" { t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", cg.Tags["CreationTimestamp"]) if err != nil { return nil, err } podCreationTimestamp = metav1.NewTime(t) } containerStartTime := metav1.NewTime(time.Time(cg.Containers[0].ContainerProperties.InstanceView.CurrentState.StartTime)) // Use the Provisioning State if it's not Succeeded, // otherwise use the state of the instance. aciState := cg.ContainerGroupProperties.ProvisioningState if aciState == "Succeeded" { aciState = cg.ContainerGroupProperties.InstanceView.State } containers := make([]v1.Container, 0, len(cg.Containers)) containerStatuses := make([]v1.ContainerStatus, 0, len(cg.Containers)) for _, c := range cg.Containers { container := v1.Container{ Name: c.Name, Image: c.Image, Command: c.Command, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", c.Resources.Requests.CPU)), v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%gG", c.Resources.Requests.MemoryInGB)), }, }, } if c.Resources.Requests.GPU != nil { container.Resources.Requests[gpuResourceName] = resource.MustParse(fmt.Sprintf("%d", c.Resources.Requests.GPU.Count)) } if c.Resources.Limits != nil { container.Resources.Limits = v1.ResourceList{ v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", c.Resources.Limits.CPU)), v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%gG", c.Resources.Limits.MemoryInGB)), } if c.Resources.Limits.GPU != nil { container.Resources.Limits[gpuResourceName] = resource.MustParse(fmt.Sprintf("%d", c.Resources.Requests.GPU.Count)) } } containers = append(containers, container) containerStatus := v1.ContainerStatus{ Name: c.Name, State: aciContainerStateToContainerState(c.InstanceView.CurrentState), LastTerminationState: aciContainerStateToContainerState(c.InstanceView.PreviousState), Ready: aciStateToPodPhase(c.InstanceView.CurrentState.State) == v1.PodRunning, RestartCount: c.InstanceView.RestartCount, Image: c.Image, ImageID: "", ContainerID: getContainerID(cg.ID, c.Name), } // Add to containerStatuses containerStatuses = append(containerStatuses, containerStatus) } ip := "" if cg.IPAddress != nil { ip = cg.IPAddress.IP } p := v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: cg.Tags["PodName"], Namespace: cg.Tags["Namespace"], ClusterName: cg.Tags["ClusterName"], UID: types.UID(cg.Tags["UID"]), CreationTimestamp: podCreationTimestamp, }, Spec: v1.PodSpec{ NodeName: cg.Tags["NodeName"], Volumes: []v1.Volume{}, Containers: containers, }, Status: v1.PodStatus{ Phase: aciStateToPodPhase(aciState), Conditions: aciStateToPodConditions(aciState, podCreationTimestamp), Message: "", Reason: "", HostIP: "", PodIP: ip, StartTime: &containerStartTime, ContainerStatuses: containerStatuses, }, } return &p, nil } func getContainerID(cgID, containerName string) string { if cgID == "" { return "" } containerResourceID := fmt.Sprintf("%s/containers/%s", cgID, containerName) h := sha256.New() h.Write([]byte(strings.ToUpper(containerResourceID))) hashBytes := h.Sum(nil) return fmt.Sprintf("aci://%s", hex.EncodeToString(hashBytes)) } func aciStateToPodPhase(state string) v1.PodPhase { switch state { case "Running": return v1.PodRunning case "Succeeded": return v1.PodSucceeded case "Failed": return v1.PodFailed case "Canceled": return v1.PodFailed case "Creating": return v1.PodPending case "Repairing": return v1.PodPending case "Pending": return v1.PodPending case "Accepted": return v1.PodPending } return v1.PodUnknown } func aciStateToPodConditions(state string, transitiontime metav1.Time) []v1.PodCondition { switch state { case "Running", "Succeeded": return []v1.PodCondition{ v1.PodCondition{ Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: transitiontime, }, v1.PodCondition{ Type: v1.PodInitialized, Status: v1.ConditionTrue, LastTransitionTime: transitiontime, }, v1.PodCondition{ Type: v1.PodScheduled, Status: v1.ConditionTrue, LastTransitionTime: transitiontime, }, } } return []v1.PodCondition{} } func aciContainerStateToContainerState(cs aci.ContainerState) v1.ContainerState { startTime := metav1.NewTime(time.Time(cs.StartTime)) // Handle the case where the container is running. if cs.State == "Running" || cs.State == "Succeeded" { return v1.ContainerState{ Running: &v1.ContainerStateRunning{ StartedAt: startTime, }, } } // Handle the case where the container failed. if cs.State == "Failed" || cs.State == "Canceled" { return v1.ContainerState{ Terminated: &v1.ContainerStateTerminated{ ExitCode: cs.ExitCode, Reason: cs.State, Message: cs.DetailStatus, StartedAt: startTime, FinishedAt: metav1.NewTime(time.Time(cs.FinishTime)), }, } } state := cs.State if state == "" { state = "Creating" } // Handle the case where the container is pending. // Which should be all other aci states. return v1.ContainerState{ Waiting: &v1.ContainerStateWaiting{ Reason: state, Message: cs.DetailStatus, }, } } // Filters service account secret volume for Windows. // Service account secret volume gets automatically turned on if not specified otherwise. // ACI doesn't support secret volume for Windows, so we need to filter it. func filterServiceAccountSecretVolume(osType string, containerGroup *aci.ContainerGroup) { if strings.EqualFold(osType, "Windows") { serviceAccountSecretVolumeName := make(map[string]bool) for index, container := range containerGroup.ContainerGroupProperties.Containers { volumeMounts := make([]aci.VolumeMount, 0, len(container.VolumeMounts)) for _, volumeMount := range container.VolumeMounts { if !strings.EqualFold(serviceAccountSecretMountPath, volumeMount.MountPath) { volumeMounts = append(volumeMounts, volumeMount) } else { serviceAccountSecretVolumeName[volumeMount.Name] = true } } containerGroup.ContainerGroupProperties.Containers[index].VolumeMounts = volumeMounts } if len(serviceAccountSecretVolumeName) == 0 { return } l := log.G(context.TODO()).WithField("containerGroup", containerGroup.Name) l.Infof("Ignoring service account secret volumes '%v' for Windows", reflect.ValueOf(serviceAccountSecretVolumeName).MapKeys()) volumes := make([]aci.Volume, 0, len(containerGroup.ContainerGroupProperties.Volumes)) for _, volume := range containerGroup.ContainerGroupProperties.Volumes { if _, ok := serviceAccountSecretVolumeName[volume.Name]; !ok { volumes = append(volumes, volume) } } containerGroup.ContainerGroupProperties.Volumes = volumes } } func getACIEnvVar(e v1.EnvVar) aci.EnvironmentVariable { var envVar aci.EnvironmentVariable // If the variable is a secret, use SecureValue if e.ValueFrom != nil && e.ValueFrom.SecretKeyRef != nil { envVar = aci.EnvironmentVariable{ Name: e.Name, SecureValue: e.Value, } } else { envVar = aci.EnvironmentVariable{ Name: e.Name, Value: e.Value, } } return envVar }
[ "\"AZURE_AUTH_LOCATION\"", "\"ACS_CREDENTIAL_LOCATION\"", "\"AZURE_CLIENT_ID\"", "\"AZURE_CLIENT_SECRET\"", "\"AZURE_TENANT_ID\"", "\"AZURE_SUBSCRIPTION_ID\"", "\"ACI_EXTRA_USER_AGENT\"", "\"LOG_ANALYTICS_AUTH_LOCATION\"", "\"LOG_ANALYTICS_ID\"", "\"LOG_ANALYTICS_KEY\"", "\"CLUSTER_RESOURCE_ID\"", "\"ACI_RESOURCE_GROUP\"", "\"ACI_REGION\"", "\"ACI_SUBNET_NAME\"", "\"ACI_SUBNET_CIDR\"", "\"MASTER_URI\"", "\"CLUSTER_CIDR\"", "\"KUBE_DNS_IP\"", "\"ACI_QUOTA_CPU\"", "\"ACI_QUOTA_MEMORY\"", "\"ACI_QUOTA_POD\"", "\"ACI_QUOTA_GPU\"" ]
[]
[ "ACI_QUOTA_GPU", "CLUSTER_CIDR", "ACI_QUOTA_POD", "KUBE_DNS_IP", "AZURE_SUBSCRIPTION_ID", "ACI_QUOTA_MEMORY", "ACI_SUBNET_CIDR", "LOG_ANALYTICS_KEY", "LOG_ANALYTICS_AUTH_LOCATION", "ACI_REGION", "CLUSTER_RESOURCE_ID", "ACI_SUBNET_NAME", "AZURE_CLIENT_SECRET", "AZURE_AUTH_LOCATION", "LOG_ANALYTICS_ID", "ACI_EXTRA_USER_AGENT", "ACS_CREDENTIAL_LOCATION", "MASTER_URI", "ACI_RESOURCE_GROUP", "AZURE_CLIENT_ID", "AZURE_TENANT_ID", "ACI_QUOTA_CPU" ]
[]
["ACI_QUOTA_GPU", "CLUSTER_CIDR", "ACI_QUOTA_POD", "KUBE_DNS_IP", "AZURE_SUBSCRIPTION_ID", "ACI_QUOTA_MEMORY", "ACI_SUBNET_CIDR", "LOG_ANALYTICS_KEY", "LOG_ANALYTICS_AUTH_LOCATION", "ACI_REGION", "CLUSTER_RESOURCE_ID", "ACI_SUBNET_NAME", "AZURE_CLIENT_SECRET", "AZURE_AUTH_LOCATION", "LOG_ANALYTICS_ID", "ACI_EXTRA_USER_AGENT", "ACS_CREDENTIAL_LOCATION", "MASTER_URI", "ACI_RESOURCE_GROUP", "AZURE_CLIENT_ID", "AZURE_TENANT_ID", "ACI_QUOTA_CPU"]
go
22
0
internal/cluster/clustersetup/activity_create_pipeline_namespace_test.go
// Copyright © 2019 Banzai Cloud // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package clustersetup import ( "context" "os" "testing" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "go.uber.org/cadence/activity" "go.uber.org/cadence/testsuite" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "sigs.k8s.io/testing_frameworks/integration" "github.com/banzaicloud/pipeline/pkg/k8sclient" ) // nolint: gochecknoglobals var createPipelineNamespaceTestActivity = CreatePipelineNamespaceActivity{} func testCreatePipelineNamespaceActivityExecute(ctx context.Context, input CreatePipelineNamespaceActivityInput) error { return createPipelineNamespaceTestActivity.Execute(ctx, input) } // nolint: gochecknoinits func init() { activity.RegisterWithOptions(testCreatePipelineNamespaceActivityExecute, activity.RegisterOptions{Name: CreatePipelineNamespaceActivityName}) } type CreatePipelineNamespaceActivityTestSuite struct { suite.Suite testsuite.WorkflowTestSuite env *testsuite.TestActivityEnvironment controlPlane *integration.ControlPlane client kubernetes.Interface } func testCreatePipelineNamespaceActivity(t *testing.T) { if os.Getenv("TEST_ASSET_KUBE_APISERVER") == "" || os.Getenv("TEST_ASSET_ETCD") == "" { t.Skip("control plane binaries are missing") } suite.Run(t, new(CreatePipelineNamespaceActivityTestSuite)) } func (s *CreatePipelineNamespaceActivityTestSuite) SetupSuite() { s.controlPlane = &integration.ControlPlane{} err := s.controlPlane.Start() s.Require().NoError(err) } func (s *CreatePipelineNamespaceActivityTestSuite) TearDownSuite() { _ = s.controlPlane.Stop() } func (s *CreatePipelineNamespaceActivityTestSuite) SetupTest() { s.env = s.NewTestActivityEnvironment() config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.controlPlane.APIURL().String()}}, ).ClientConfig() s.Require().NoError(err) client, err := k8sclient.NewClientFromConfig(config) s.Require().NoError(err) s.client = client } func (s *CreatePipelineNamespaceActivityTestSuite) Test_Execute() { clientFactory := new(MockClientFactory) clientFactory.On("FromSecret", mock.Anything, "secret").Return(s.client, nil) const pipelineNamespace = "pipeline-system" createPipelineNamespaceTestActivity = NewCreatePipelineNamespaceActivity(pipelineNamespace, clientFactory) _, err := s.env.ExecuteActivity( CreatePipelineNamespaceActivityName, CreatePipelineNamespaceActivityInput{ ConfigSecretID: "secret", }, ) s.Require().NoError(err) namespace, err := s.client.CoreV1().Namespaces().Get(pipelineNamespace, metav1.GetOptions{}) s.Require().NoError(err) s.Assert().Equal(pipelineNamespace, namespace.Name) s.Assert().Equal( map[string]string{ "scan": "noscan", "name": pipelineNamespace, "owner": "pipeline", }, namespace.Labels, ) clientFactory.AssertExpectations(s.T()) } func (s *CreatePipelineNamespaceActivityTestSuite) Test_Execute_AlreadyExists() { clientFactory := new(MockClientFactory) clientFactory.On("FromSecret", mock.Anything, "secret").Return(s.client, nil) const pipelineNamespace = "pipeline-system2" createPipelineNamespaceTestActivity = NewCreatePipelineNamespaceActivity(pipelineNamespace, clientFactory) existingNamespace, err := s.client.CoreV1().Namespaces().Create(&corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: pipelineNamespace, Labels: map[string]string{ "some": "label", }, }, }) s.Require().NoError(err) _, err = s.env.ExecuteActivity( CreatePipelineNamespaceActivityName, CreatePipelineNamespaceActivityInput{ ConfigSecretID: "secret", }, ) s.Require().NoError(err) namespace, err := s.client.CoreV1().Namespaces().Get(pipelineNamespace, metav1.GetOptions{}) s.Require().NoError(err) s.Assert().Equal(existingNamespace, namespace) clientFactory.AssertExpectations(s.T()) }
[ "\"TEST_ASSET_KUBE_APISERVER\"", "\"TEST_ASSET_ETCD\"" ]
[]
[ "TEST_ASSET_KUBE_APISERVER", "TEST_ASSET_ETCD" ]
[]
["TEST_ASSET_KUBE_APISERVER", "TEST_ASSET_ETCD"]
go
2
0
blockobserver/utils/pinger.go
package utils import ( "fmt" "os" "time" "github.com/mit-dci/pooldetective/logging" "github.com/sparrc/go-ping" ) func Pinger(host string, stop chan bool) { for { select { case <-stop: break default: } p, err := ping.NewPinger(host) if err != nil { logging.Errorf("Could not start pinger: %s", err.Error()) return } p.SetPrivileged(true) p.Count = 10 p.Run() stats := p.Statistics() target := fmt.Sprintf("%s/%s", os.Getenv("HUB_URL"), "observePeerPing") err = PostJson(target, map[string]interface{}{"observedFrom": os.Getenv("OBSERVER_NAME"), "timestamp": time.Now().Unix(), "host": host, "results": stats}, false, nil) if err != nil { logging.Warnf("Unable to post observation: %s\n", err.Error()) } time.Sleep(time.Minute * 30) } }
[ "\"HUB_URL\"", "\"OBSERVER_NAME\"" ]
[]
[ "HUB_URL", "OBSERVER_NAME" ]
[]
["HUB_URL", "OBSERVER_NAME"]
go
2
0
examples/service/video/composition/delete/composition_delete_example.go
package main import ( "log" "os" "github.com/RJPearson94/twilio-sdk-go" v1 "github.com/RJPearson94/twilio-sdk-go/service/video/v1" "github.com/RJPearson94/twilio-sdk-go/session/credentials" ) var videoClient *v1.Video func init() { creds, err := credentials.New(credentials.Account{ Sid: os.Getenv("TWILIO_ACCOUNT_SID"), AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"), }) if err != nil { log.Panicf("%s", err.Error()) } videoClient = twilio.NewWithCredentials(creds).Video.V1 } func main() { err := videoClient. Composition("CJXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"). Delete() if err != nil { log.Panicf("%s", err.Error()) } log.Printf("Composition successfully deleted") }
[ "\"TWILIO_ACCOUNT_SID\"", "\"TWILIO_AUTH_TOKEN\"" ]
[]
[ "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID" ]
[]
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
go
2
0
link_test.go
// +build linux package netlink import ( "bytes" "fmt" "net" "os" "os/exec" "syscall" "testing" "time" "github.com/ndupreez/netlink/nl" "github.com/vishvananda/netns" "golang.org/x/sys/unix" ) const ( testTxQLen int = 100 defaultTxQLen int = 1000 testTxQueues int = 4 testRxQueues int = 8 ) func testLinkAddDel(t *testing.T, link Link) { links, err := LinkList() if err != nil { t.Fatal(err) } if err := LinkAdd(link); err != nil { t.Fatal(err) } base := link.Attrs() result, err := LinkByName(base.Name) if err != nil { t.Fatal(err) } rBase := result.Attrs() if base.Index != 0 { if base.Index != rBase.Index { t.Fatalf("index is %d, should be %d", rBase.Index, base.Index) } } if base.Group > 0 { if base.Group != rBase.Group { t.Fatalf("group is %d, should be %d", rBase.Group, base.Group) } } if vlan, ok := link.(*Vlan); ok { other, ok := result.(*Vlan) if !ok { t.Fatal("Result of create is not a vlan") } if vlan.VlanId != other.VlanId { t.Fatal("Link.VlanId id doesn't match") } } if veth, ok := result.(*Veth); ok { if rBase.TxQLen != base.TxQLen { t.Fatalf("qlen is %d, should be %d", rBase.TxQLen, base.TxQLen) } if rBase.NumTxQueues != base.NumTxQueues { t.Fatalf("txQueues is %d, should be %d", rBase.NumTxQueues, base.NumTxQueues) } if rBase.NumRxQueues != base.NumRxQueues { t.Fatalf("rxQueues is %d, should be %d", rBase.NumRxQueues, base.NumRxQueues) } if rBase.MTU != base.MTU { t.Fatalf("MTU is %d, should be %d", rBase.MTU, base.MTU) } if original, ok := link.(*Veth); ok { if original.PeerName != "" { var peer *Veth other, err := LinkByName(original.PeerName) if err != nil { t.Fatalf("Peer %s not created", veth.PeerName) } if peer, ok = other.(*Veth); !ok { t.Fatalf("Peer %s is incorrect type", veth.PeerName) } if peer.TxQLen != testTxQLen { t.Fatalf("TxQLen of peer is %d, should be %d", peer.TxQLen, testTxQLen) } if peer.NumTxQueues != testTxQueues { t.Fatalf("NumTxQueues of peer is %d, should be %d", peer.NumTxQueues, testTxQueues) } if peer.NumRxQueues != testRxQueues { t.Fatalf("NumRxQueues of peer is %d, should be %d", peer.NumRxQueues, testRxQueues) } if !bytes.Equal(peer.Attrs().HardwareAddr, original.PeerHardwareAddr) { t.Fatalf("Peer MAC addr is %s, should be %s", peer.Attrs().HardwareAddr, original.PeerHardwareAddr) } } } } else { // recent kernels set the parent index for veths in the response if rBase.ParentIndex == 0 && base.ParentIndex != 0 { t.Fatalf("Created link doesn't have parent %d but it should", base.ParentIndex) } else if rBase.ParentIndex != 0 && base.ParentIndex == 0 { t.Fatalf("Created link has parent %d but it shouldn't", rBase.ParentIndex) } else if rBase.ParentIndex != 0 && base.ParentIndex != 0 { if rBase.ParentIndex != base.ParentIndex { t.Fatalf("Link.ParentIndex doesn't match %d != %d", rBase.ParentIndex, base.ParentIndex) } } } if _, ok := link.(*Wireguard); ok { _, ok := result.(*Wireguard) if !ok { t.Fatal("Result of create is not a wireguard") } } if vxlan, ok := link.(*Vxlan); ok { other, ok := result.(*Vxlan) if !ok { t.Fatal("Result of create is not a vxlan") } compareVxlan(t, vxlan, other) } if ipv, ok := link.(*IPVlan); ok { other, ok := result.(*IPVlan) if !ok { t.Fatal("Result of create is not a ipvlan") } if ipv.Mode != other.Mode { t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, ipv.Mode) } if ipv.Flag != other.Flag { t.Fatalf("Got unexpected flag: %d, expected: %d", other.Flag, ipv.Flag) } } if macv, ok := link.(*Macvlan); ok { other, ok := result.(*Macvlan) if !ok { t.Fatal("Result of create is not a macvlan") } if macv.Mode != other.Mode { t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, macv.Mode) } } if macv, ok := link.(*Macvtap); ok { other, ok := result.(*Macvtap) if !ok { t.Fatal("Result of create is not a macvtap") } if macv.Mode != other.Mode { t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, macv.Mode) } } if _, ok := link.(*Vti); ok { _, ok := result.(*Vti) if !ok { t.Fatal("Result of create is not a vti") } } if bond, ok := link.(*Bond); ok { other, ok := result.(*Bond) if !ok { t.Fatal("Result of create is not a bond") } if bond.Mode != other.Mode { t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, bond.Mode) } if bond.ArpIpTargets != nil { if other.ArpIpTargets == nil { t.Fatalf("Got unexpected ArpIpTargets: nil") } if len(bond.ArpIpTargets) != len(other.ArpIpTargets) { t.Fatalf("Got unexpected ArpIpTargets len: %d, expected: %d", len(other.ArpIpTargets), len(bond.ArpIpTargets)) } for i := range bond.ArpIpTargets { if !bond.ArpIpTargets[i].Equal(other.ArpIpTargets[i]) { t.Fatalf("Got unexpected ArpIpTargets: %s, expected: %s", other.ArpIpTargets[i], bond.ArpIpTargets[i]) } } } // Mode specific checks if os.Getenv("TRAVIS_BUILD_DIR") != "" { t.Log("Kernel in travis is too old for this check") } else { switch mode := bondModeToString[bond.Mode]; mode { case "802.3ad": if bond.AdSelect != other.AdSelect { t.Fatalf("Got unexpected AdSelect: %d, expected: %d", other.AdSelect, bond.AdSelect) } if bond.AdActorSysPrio != other.AdActorSysPrio { t.Fatalf("Got unexpected AdActorSysPrio: %d, expected: %d", other.AdActorSysPrio, bond.AdActorSysPrio) } if bond.AdUserPortKey != other.AdUserPortKey { t.Fatalf("Got unexpected AdUserPortKey: %d, expected: %d", other.AdUserPortKey, bond.AdUserPortKey) } if bytes.Compare(bond.AdActorSystem, other.AdActorSystem) != 0 { t.Fatalf("Got unexpected AdActorSystem: %d, expected: %d", other.AdActorSystem, bond.AdActorSystem) } case "balance-tlb": if bond.TlbDynamicLb != other.TlbDynamicLb { t.Fatalf("Got unexpected TlbDynamicLb: %d, expected: %d", other.TlbDynamicLb, bond.TlbDynamicLb) } } } } if _, ok := link.(*Iptun); ok { _, ok := result.(*Iptun) if !ok { t.Fatal("Result of create is not a iptun") } } if _, ok := link.(*Ip6tnl); ok { _, ok := result.(*Ip6tnl) if !ok { t.Fatal("Result of create is not a ip6tnl") } } if _, ok := link.(*Sittun); ok { _, ok := result.(*Sittun) if !ok { t.Fatal("Result of create is not a sittun") } } if geneve, ok := link.(*Geneve); ok { other, ok := result.(*Geneve) if !ok { t.Fatal("Result of create is not a Geneve") } compareGeneve(t, geneve, other) } if gretap, ok := link.(*Gretap); ok { other, ok := result.(*Gretap) if !ok { t.Fatal("Result of create is not a Gretap") } compareGretap(t, gretap, other) } if gretun, ok := link.(*Gretun); ok { other, ok := result.(*Gretun) if !ok { t.Fatal("Result of create is not a Gretun") } compareGretun(t, gretun, other) } if xfrmi, ok := link.(*Xfrmi); ok { other, ok := result.(*Xfrmi) if !ok { t.Fatal("Result of create is not a xfrmi") } compareXfrmi(t, xfrmi, other) } if tuntap, ok := link.(*Tuntap); ok { other, ok := result.(*Tuntap) if !ok { t.Fatal("Result of create is not a tuntap") } compareTuntap(t, tuntap, other) } if err = LinkDel(link); err != nil { t.Fatal(err) } links, err = LinkList() if err != nil { t.Fatal(err) } for _, l := range links { if l.Attrs().Name == link.Attrs().Name { t.Fatal("Link not removed properly") } } } func compareGeneve(t *testing.T, expected, actual *Geneve) { if actual.ID != expected.ID { t.Fatalf("Geneve.ID doesn't match: %d %d", actual.ID, expected.ID) } // set the Dport to 6081 (the linux default) if it wasn't specified at creation if expected.Dport == 0 { expected.Dport = 6081 } if actual.Dport != expected.Dport { t.Fatal("Geneve.Dport doesn't match") } if actual.Ttl != expected.Ttl { t.Fatal("Geneve.Ttl doesn't match") } if actual.Tos != expected.Tos { t.Fatal("Geneve.Tos doesn't match") } if !actual.Remote.Equal(expected.Remote) { t.Fatalf("Geneve.Remote is not equal: %s!=%s", actual.Remote, expected.Remote) } // TODO: we should implement the rest of the geneve methods } func compareGretap(t *testing.T, expected, actual *Gretap) { if actual.IKey != expected.IKey { t.Fatal("Gretap.IKey doesn't match") } if actual.OKey != expected.OKey { t.Fatal("Gretap.OKey doesn't match") } if actual.EncapSport != expected.EncapSport { t.Fatal("Gretap.EncapSport doesn't match") } if actual.EncapDport != expected.EncapDport { t.Fatal("Gretap.EncapDport doesn't match") } if expected.Local != nil && !actual.Local.Equal(expected.Local) { t.Fatal("Gretap.Local doesn't match") } if expected.Remote != nil && !actual.Remote.Equal(expected.Remote) { t.Fatal("Gretap.Remote doesn't match") } if actual.IFlags != expected.IFlags { t.Fatal("Gretap.IFlags doesn't match") } if actual.OFlags != expected.OFlags { t.Fatal("Gretap.OFlags doesn't match") } if actual.PMtuDisc != expected.PMtuDisc { t.Fatal("Gretap.PMtuDisc doesn't match") } if actual.Ttl != expected.Ttl { t.Fatal("Gretap.Ttl doesn't match") } if actual.Tos != expected.Tos { t.Fatal("Gretap.Tos doesn't match") } if actual.EncapType != expected.EncapType { t.Fatal("Gretap.EncapType doesn't match") } if actual.EncapFlags != expected.EncapFlags { t.Fatal("Gretap.EncapFlags doesn't match") } if actual.Link != expected.Link { t.Fatal("Gretap.Link doesn't match") } /* * NOTE: setting the FlowBased flag doesn't seem to work, but by lack of * a proper way to debug this, this test is disabled for now if actual.FlowBased != expected.FlowBased { t.Fatal("Gretap.FlowBased doesn't match") } */ } func compareGretun(t *testing.T, expected, actual *Gretun) { if actual.Link != expected.Link { t.Fatal("Gretun.Link doesn't match") } if actual.IFlags != expected.IFlags { t.Fatal("Gretun.IFlags doesn't match") } if actual.OFlags != expected.OFlags { t.Fatal("Gretun.OFlags doesn't match") } if actual.IKey != expected.IKey { t.Fatal("Gretun.IKey doesn't match") } if actual.OKey != expected.OKey { t.Fatal("Gretun.OKey doesn't match") } if expected.Local != nil && !actual.Local.Equal(expected.Local) { t.Fatal("Gretun.Local doesn't match") } if expected.Remote != nil && !actual.Remote.Equal(expected.Remote) { t.Fatal("Gretun.Remote doesn't match") } if actual.Ttl != expected.Ttl { t.Fatal("Gretun.Ttl doesn't match") } if actual.Tos != expected.Tos { t.Fatal("Gretun.Tos doesn't match") } if actual.PMtuDisc != expected.PMtuDisc { t.Fatal("Gretun.PMtuDisc doesn't match") } if actual.EncapType != expected.EncapType { t.Fatal("Gretun.EncapType doesn't match") } if actual.EncapFlags != expected.EncapFlags { t.Fatal("Gretun.EncapFlags doesn't match") } if actual.EncapSport != expected.EncapSport { t.Fatal("Gretun.EncapSport doesn't match") } if actual.EncapDport != expected.EncapDport { t.Fatal("Gretun.EncapDport doesn't match") } } func compareVxlan(t *testing.T, expected, actual *Vxlan) { if actual.VxlanId != expected.VxlanId { t.Fatal("Vxlan.VxlanId doesn't match") } if expected.SrcAddr != nil && !actual.SrcAddr.Equal(expected.SrcAddr) { t.Fatal("Vxlan.SrcAddr doesn't match") } if expected.Group != nil && !actual.Group.Equal(expected.Group) { t.Fatal("Vxlan.Group doesn't match") } if expected.TTL != -1 && actual.TTL != expected.TTL { t.Fatal("Vxlan.TTL doesn't match") } if expected.TOS != -1 && actual.TOS != expected.TOS { t.Fatal("Vxlan.TOS doesn't match") } if actual.Learning != expected.Learning { t.Fatal("Vxlan.Learning doesn't match") } if actual.Proxy != expected.Proxy { t.Fatal("Vxlan.Proxy doesn't match") } if actual.RSC != expected.RSC { t.Fatal("Vxlan.RSC doesn't match") } if actual.L2miss != expected.L2miss { t.Fatal("Vxlan.L2miss doesn't match") } if actual.L3miss != expected.L3miss { t.Fatal("Vxlan.L3miss doesn't match") } if actual.GBP != expected.GBP { t.Fatal("Vxlan.GBP doesn't match") } if actual.FlowBased != expected.FlowBased { t.Fatal("Vxlan.FlowBased doesn't match") } if actual.UDP6ZeroCSumTx != expected.UDP6ZeroCSumTx { t.Fatal("Vxlan.UDP6ZeroCSumTx doesn't match") } if actual.UDP6ZeroCSumRx != expected.UDP6ZeroCSumRx { t.Fatal("Vxlan.UDP6ZeroCSumRx doesn't match") } if expected.NoAge { if !actual.NoAge { t.Fatal("Vxlan.NoAge doesn't match") } } else if expected.Age > 0 && actual.Age != expected.Age { t.Fatal("Vxlan.Age doesn't match") } if expected.Limit > 0 && actual.Limit != expected.Limit { t.Fatal("Vxlan.Limit doesn't match") } if expected.Port > 0 && actual.Port != expected.Port { t.Fatal("Vxlan.Port doesn't match") } if expected.PortLow > 0 || expected.PortHigh > 0 { if actual.PortLow != expected.PortLow { t.Fatal("Vxlan.PortLow doesn't match") } if actual.PortHigh != expected.PortHigh { t.Fatal("Vxlan.PortHigh doesn't match") } } } func compareXfrmi(t *testing.T, expected, actual *Xfrmi) { if expected.Ifid != actual.Ifid { t.Fatal("Xfrmi.Ifid doesn't match") } } func compareTuntap(t *testing.T, expected, actual *Tuntap) { if expected.Mode != actual.Mode { t.Fatalf("Tuntap.Mode doesn't match: expected : %+v, got %+v", expected.Mode, actual.Mode) } if expected.Owner != actual.Owner { t.Fatal("Tuntap.Owner doesn't match") } if expected.Group != actual.Group { t.Fatal("Tuntap.Group doesn't match") } if expected.NonPersist != actual.NonPersist { t.Fatal("Tuntap.Group doesn't match") } } func TestLinkAddDelWithIndex(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Dummy{LinkAttrs{Index: 1000, Name: "foo"}}) } func TestLinkAddDelDummy(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Dummy{LinkAttrs{Name: "foo"}}) } func TestLinkAddDelDummyWithGroup(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Dummy{LinkAttrs{Name: "foo", Group: 42}}) } func TestLinkModify(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() linkName := "foo" originalMTU := 1500 updatedMTU := 1442 link := &Dummy{LinkAttrs{Name: linkName, MTU: originalMTU}} base := link.Attrs() if err := LinkAdd(link); err != nil { t.Fatal(err) } link.MTU = updatedMTU if err := pkgHandle.LinkModify(link); err != nil { t.Fatal(err) } result, err := LinkByName(linkName) if err != nil { t.Fatal(err) } rBase := result.Attrs() if rBase.MTU != updatedMTU { t.Fatalf("MTU is %d, should be %d", rBase.MTU, base.MTU) } } func TestLinkAddDelIfb(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Ifb{LinkAttrs{Name: "foo"}}) } func TestLinkAddDelBridge(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Bridge{LinkAttrs: LinkAttrs{Name: "foo", MTU: 1400}}) } func TestLinkAddDelGeneve(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Geneve{ LinkAttrs: LinkAttrs{Name: "foo4", EncapType: "geneve"}, ID: 0x1000, Remote: net.IPv4(127, 0, 0, 1)}) testLinkAddDel(t, &Geneve{ LinkAttrs: LinkAttrs{Name: "foo6", EncapType: "geneve"}, ID: 0x1000, Remote: net.ParseIP("2001:db8:ef33::2")}) } func TestGeneveCompareToIP(t *testing.T) { ns, tearDown := setUpNamedNetlinkTest(t) defer tearDown() expected := &Geneve{ ID: 0x764332, // 23 bits Remote: net.ParseIP("1.2.3.4"), Dport: 6081, } // Create interface cmd := exec.Command("ip", "netns", "exec", ns, "ip", "link", "add", "gen0", "type", "geneve", "vni", fmt.Sprint(expected.ID), "remote", expected.Remote.String(), // TODO: unit tests are currently done on ubuntu 16, and the version of iproute2 there doesn't support dstport // We can still do most of the testing by verifying that we do read the default port // "dstport", fmt.Sprint(expected.Dport), ) out := &bytes.Buffer{} cmd.Stdout = out cmd.Stderr = out if rc := cmd.Run(); rc != nil { t.Fatal("failed creating link:", rc, out.String()) } link, err := LinkByName("gen0") if err != nil { t.Fatal("Failed getting link: ", err) } actual, ok := link.(*Geneve) if !ok { t.Fatalf("resulted interface is not geneve: %T", link) } compareGeneve(t, expected, actual) } func TestLinkAddDelGretap(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Gretap{ LinkAttrs: LinkAttrs{Name: "foo4"}, IKey: 0x101, OKey: 0x101, PMtuDisc: 1, Local: net.IPv4(127, 0, 0, 1), Remote: net.IPv4(127, 0, 0, 1)}) testLinkAddDel(t, &Gretap{ LinkAttrs: LinkAttrs{Name: "foo6"}, IKey: 0x101, OKey: 0x101, Local: net.ParseIP("2001:db8:abcd::1"), Remote: net.ParseIP("2001:db8:ef33::2")}) } func TestLinkAddDelGretun(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Gretun{ LinkAttrs: LinkAttrs{Name: "foo4"}, Local: net.IPv4(127, 0, 0, 1), Remote: net.IPv4(127, 0, 0, 1)}) testLinkAddDel(t, &Gretun{ LinkAttrs: LinkAttrs{Name: "foo6"}, Local: net.ParseIP("2001:db8:abcd::1"), Remote: net.ParseIP("2001:db8:ef33::2")}) } func TestLinkAddDelGretunPointToMultiPoint(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Gretun{ LinkAttrs: LinkAttrs{Name: "foo"}, Local: net.IPv4(127, 0, 0, 1), IKey: 1234, OKey: 1234}) testLinkAddDel(t, &Gretun{ LinkAttrs: LinkAttrs{Name: "foo6"}, Local: net.ParseIP("2001:db8:1234::4"), IKey: 5678, OKey: 7890}) } func TestLinkAddDelGretapFlowBased(t *testing.T) { minKernelRequired(t, 4, 3) tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Gretap{ LinkAttrs: LinkAttrs{Name: "foo"}, FlowBased: true}) } func TestLinkAddDelVlan(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() parent := &Dummy{LinkAttrs{Name: "foo"}} if err := LinkAdd(parent); err != nil { t.Fatal(err) } testLinkAddDel(t, &Vlan{LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, 900, VLAN_PROTOCOL_8021Q}) if err := LinkDel(parent); err != nil { t.Fatal(err) } } func TestLinkAddDelMacvlan(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() parent := &Dummy{LinkAttrs{Name: "foo"}} if err := LinkAdd(parent); err != nil { t.Fatal(err) } testLinkAddDel(t, &Macvlan{ LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, Mode: MACVLAN_MODE_PRIVATE, }) testLinkAddDel(t, &Macvlan{ LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, Mode: MACVLAN_MODE_BRIDGE, }) testLinkAddDel(t, &Macvlan{ LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, Mode: MACVLAN_MODE_VEPA, }) if err := LinkDel(parent); err != nil { t.Fatal(err) } } func TestLinkAddDelMacvtap(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() parent := &Dummy{LinkAttrs{Name: "foo"}} if err := LinkAdd(parent); err != nil { t.Fatal(err) } testLinkAddDel(t, &Macvtap{ Macvlan: Macvlan{ LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, Mode: MACVLAN_MODE_PRIVATE, }, }) testLinkAddDel(t, &Macvtap{ Macvlan: Macvlan{ LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, Mode: MACVLAN_MODE_BRIDGE, }, }) testLinkAddDel(t, &Macvtap{ Macvlan: Macvlan{ LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, Mode: MACVLAN_MODE_VEPA, }, }) if err := LinkDel(parent); err != nil { t.Fatal(err) } } func TestLinkAddDelVeth(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() peerMAC, _ := net.ParseMAC("00:12:34:56:78:02") veth := &Veth{ LinkAttrs: LinkAttrs{ Name: "foo", TxQLen: testTxQLen, MTU: 1400, NumTxQueues: testTxQueues, NumRxQueues: testRxQueues, }, PeerName: "bar", PeerHardwareAddr: peerMAC, } testLinkAddDel(t, veth) } func TestLinkAddDelBond(t *testing.T) { minKernelRequired(t, 3, 13) tearDown := setUpNetlinkTest(t) defer tearDown() modes := []string{"802.3ad", "balance-tlb"} for _, mode := range modes { bond := NewLinkBond(LinkAttrs{Name: "foo"}) bond.Mode = StringToBondModeMap[mode] switch mode { case "802.3ad": bond.AdSelect = BondAdSelect(BOND_AD_SELECT_BANDWIDTH) bond.AdActorSysPrio = 1 bond.AdUserPortKey = 1 bond.AdActorSystem, _ = net.ParseMAC("06:aa:bb:cc:dd:ee") bond.ArpIpTargets = []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("1.1.1.2")} case "balance-tlb": bond.TlbDynamicLb = 1 bond.ArpIpTargets = []net.IP{net.ParseIP("1.1.1.2"), net.ParseIP("1.1.1.1")} } testLinkAddDel(t, bond) } } func TestLinkAddVethWithDefaultTxQLen(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() la := NewLinkAttrs() la.Name = "foo" veth := &Veth{LinkAttrs: la, PeerName: "bar"} if err := LinkAdd(veth); err != nil { t.Fatal(err) } link, err := LinkByName("foo") if err != nil { t.Fatal(err) } if veth, ok := link.(*Veth); !ok { t.Fatalf("unexpected link type: %T", link) } else { if veth.TxQLen != defaultTxQLen { t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen) } } peer, err := LinkByName("bar") if err != nil { t.Fatal(err) } if veth, ok := peer.(*Veth); !ok { t.Fatalf("unexpected link type: %T", link) } else { if veth.TxQLen != defaultTxQLen { t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen) } } } func TestLinkAddVethWithZeroTxQLen(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() la := NewLinkAttrs() la.Name = "foo" la.TxQLen = 0 veth := &Veth{LinkAttrs: la, PeerName: "bar"} if err := LinkAdd(veth); err != nil { t.Fatal(err) } link, err := LinkByName("foo") if err != nil { t.Fatal(err) } if veth, ok := link.(*Veth); !ok { t.Fatalf("unexpected link type: %T", link) } else { if veth.TxQLen != 0 { t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0) } } peer, err := LinkByName("bar") if err != nil { t.Fatal(err) } if veth, ok := peer.(*Veth); !ok { t.Fatalf("unexpected link type: %T", link) } else { if veth.TxQLen != 0 { t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0) } } } func TestLinkAddDelDummyWithGSO(t *testing.T) { const ( gsoMaxSegs = 16 gsoMaxSize = 1 << 14 ) minKernelRequired(t, 4, 16) tearDown := setUpNetlinkTest(t) defer tearDown() dummy := &Dummy{LinkAttrs: LinkAttrs{Name: "foo", GSOMaxSize: gsoMaxSize, GSOMaxSegs: gsoMaxSegs}} if err := LinkAdd(dummy); err != nil { t.Fatal(err) } link, err := LinkByName("foo") if err != nil { t.Fatal(err) } dummy, ok := link.(*Dummy) if !ok { t.Fatalf("unexpected link type: %T", link) } if dummy.GSOMaxSize != gsoMaxSize { t.Fatalf("GSOMaxSize is %d, should be %d", dummy.GSOMaxSize, gsoMaxSize) } if dummy.GSOMaxSegs != gsoMaxSegs { t.Fatalf("GSOMaxSeg is %d, should be %d", dummy.GSOMaxSegs, gsoMaxSegs) } } func TestLinkAddDummyWithTxQLen(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() la := NewLinkAttrs() la.Name = "foo" la.TxQLen = 1500 dummy := &Dummy{LinkAttrs: la} if err := LinkAdd(dummy); err != nil { t.Fatal(err) } link, err := LinkByName("foo") if err != nil { t.Fatal(err) } if dummy, ok := link.(*Dummy); !ok { t.Fatalf("unexpected link type: %T", link) } else { if dummy.TxQLen != 1500 { t.Fatalf("TxQLen is %d, should be %d", dummy.TxQLen, 1500) } } } func TestLinkAddDelBridgeMaster(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() master := &Bridge{LinkAttrs: LinkAttrs{Name: "foo"}} if err := LinkAdd(master); err != nil { t.Fatal(err) } testLinkAddDel(t, &Dummy{LinkAttrs{Name: "bar", MasterIndex: master.Attrs().Index}}) if err := LinkDel(master); err != nil { t.Fatal(err) } } func testLinkSetUnsetResetMaster(t *testing.T, master, newmaster Link) { slave := &Dummy{LinkAttrs{Name: "baz"}} if err := LinkAdd(slave); err != nil { t.Fatal(err) } nonexistsmaster := &Bridge{LinkAttrs: LinkAttrs{Name: "foobar"}} if err := LinkSetMaster(slave, nonexistsmaster); err == nil { t.Fatal("error expected") } if err := LinkSetMaster(slave, master); err != nil { t.Fatal(err) } link, err := LinkByName("baz") if err != nil { t.Fatal(err) } if link.Attrs().MasterIndex != master.Attrs().Index { t.Fatal("Master not set properly") } if err := LinkSetMaster(slave, newmaster); err != nil { t.Fatal(err) } link, err = LinkByName("baz") if err != nil { t.Fatal(err) } if link.Attrs().MasterIndex != newmaster.Attrs().Index { t.Fatal("Master not reset properly") } if err := LinkSetNoMaster(slave); err != nil { t.Fatal(err) } link, err = LinkByName("baz") if err != nil { t.Fatal(err) } if link.Attrs().MasterIndex != 0 { t.Fatal("Master not unset properly") } if err := LinkDel(slave); err != nil { t.Fatal(err) } } func TestLinkSetUnsetResetMaster(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() master := &Bridge{LinkAttrs: LinkAttrs{Name: "foo"}} if err := LinkAdd(master); err != nil { t.Fatal(err) } newmaster := &Bridge{LinkAttrs: LinkAttrs{Name: "bar"}} if err := LinkAdd(newmaster); err != nil { t.Fatal(err) } testLinkSetUnsetResetMaster(t, master, newmaster) if err := LinkDel(newmaster); err != nil { t.Fatal(err) } if err := LinkDel(master); err != nil { t.Fatal(err) } } func TestLinkSetUnsetResetMasterBond(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() master := NewLinkBond(LinkAttrs{Name: "foo"}) master.Mode = BOND_MODE_BALANCE_RR if err := LinkAdd(master); err != nil { t.Fatal(err) } newmaster := NewLinkBond(LinkAttrs{Name: "bar"}) newmaster.Mode = BOND_MODE_BALANCE_RR if err := LinkAdd(newmaster); err != nil { t.Fatal(err) } testLinkSetUnsetResetMaster(t, master, newmaster) if err := LinkDel(newmaster); err != nil { t.Fatal(err) } if err := LinkDel(master); err != nil { t.Fatal(err) } } func TestLinkSetNs(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() basens, err := netns.Get() if err != nil { t.Fatal("Failed to get basens") } defer basens.Close() newns, err := netns.New() if err != nil { t.Fatal("Failed to create newns") } defer newns.Close() link := &Veth{LinkAttrs{Name: "foo"}, "bar", nil, nil} if err := LinkAdd(link); err != nil { t.Fatal(err) } peer, err := LinkByName("bar") if err != nil { t.Fatal(err) } LinkSetNsFd(peer, int(basens)) if err != nil { t.Fatal("Failed to set newns for link") } _, err = LinkByName("bar") if err == nil { t.Fatal("Link bar is still in newns") } err = netns.Set(basens) if err != nil { t.Fatal("Failed to set basens") } peer, err = LinkByName("bar") if err != nil { t.Fatal("Link is not in basens") } if err := LinkDel(peer); err != nil { t.Fatal(err) } err = netns.Set(newns) if err != nil { t.Fatal("Failed to set newns") } _, err = LinkByName("foo") if err == nil { t.Fatal("Other half of veth pair not deleted") } } func TestLinkAddDelWireguard(t *testing.T) { minKernelRequired(t, 5, 6) tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Wireguard{LinkAttrs: LinkAttrs{Name: "wg0"}}) } func TestVethPeerNs(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() basens, err := netns.Get() if err != nil { t.Fatal("Failed to get basens") } defer basens.Close() newns, err := netns.New() if err != nil { t.Fatal("Failed to create newns") } defer newns.Close() link := &Veth{LinkAttrs{Name: "foo"}, "bar", nil, NsFd(basens)} if err := LinkAdd(link); err != nil { t.Fatal(err) } _, err = LinkByName("bar") if err == nil { t.Fatal("Link bar is in newns") } err = netns.Set(basens) if err != nil { t.Fatal("Failed to set basens") } _, err = LinkByName("bar") if err != nil { t.Fatal("Link bar is not in basens") } err = netns.Set(newns) if err != nil { t.Fatal("Failed to set newns") } _, err = LinkByName("foo") if err != nil { t.Fatal("Link foo is not in newns") } } func TestVethPeerNs2(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() basens, err := netns.Get() if err != nil { t.Fatal("Failed to get basens") } defer basens.Close() onens, err := netns.New() if err != nil { t.Fatal("Failed to create newns") } defer onens.Close() twons, err := netns.New() if err != nil { t.Fatal("Failed to create twons") } defer twons.Close() link := &Veth{LinkAttrs{Name: "foo", Namespace: NsFd(onens)}, "bar", nil, NsFd(basens)} if err := LinkAdd(link); err != nil { t.Fatal(err) } _, err = LinkByName("foo") if err == nil { t.Fatal("Link foo is in twons") } _, err = LinkByName("bar") if err == nil { t.Fatal("Link bar is in twons") } err = netns.Set(basens) if err != nil { t.Fatal("Failed to set basens") } _, err = LinkByName("bar") if err != nil { t.Fatal("Link bar is not in basens") } err = netns.Set(onens) if err != nil { t.Fatal("Failed to set onens") } _, err = LinkByName("foo") if err != nil { t.Fatal("Link foo is not in onens") } } func TestLinkAddDelVxlan(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() parent := &Dummy{ LinkAttrs{Name: "foo"}, } if err := LinkAdd(parent); err != nil { t.Fatal(err) } vxlan := Vxlan{ LinkAttrs: LinkAttrs{ Name: "bar", }, VxlanId: 10, VtepDevIndex: parent.Index, Learning: true, L2miss: true, L3miss: true, } testLinkAddDel(t, &vxlan) if err := LinkDel(parent); err != nil { t.Fatal(err) } } func TestLinkAddDelVxlanUdpCSum6(t *testing.T) { minKernelRequired(t, 3, 16) tearDown := setUpNetlinkTest(t) defer tearDown() parent := &Dummy{ LinkAttrs{Name: "foo"}, } if err := LinkAdd(parent); err != nil { t.Fatal(err) } vxlan := Vxlan{ LinkAttrs: LinkAttrs{ Name: "bar", }, VxlanId: 10, VtepDevIndex: parent.Index, Learning: true, L2miss: true, L3miss: true, UDP6ZeroCSumTx: true, UDP6ZeroCSumRx: true, } testLinkAddDel(t, &vxlan) if err := LinkDel(parent); err != nil { t.Fatal(err) } } func TestLinkAddDelVxlanGbp(t *testing.T) { minKernelRequired(t, 4, 0) tearDown := setUpNetlinkTest(t) defer tearDown() parent := &Dummy{ LinkAttrs{Name: "foo"}, } if err := LinkAdd(parent); err != nil { t.Fatal(err) } vxlan := Vxlan{ LinkAttrs: LinkAttrs{ Name: "bar", }, VxlanId: 10, VtepDevIndex: parent.Index, Learning: true, L2miss: true, L3miss: true, UDP6ZeroCSumTx: true, UDP6ZeroCSumRx: true, GBP: true, } testLinkAddDel(t, &vxlan) if err := LinkDel(parent); err != nil { t.Fatal(err) } } func TestLinkAddDelVxlanFlowBased(t *testing.T) { minKernelRequired(t, 4, 3) tearDown := setUpNetlinkTest(t) defer tearDown() vxlan := Vxlan{ LinkAttrs: LinkAttrs{ Name: "foo", }, Learning: false, FlowBased: true, } testLinkAddDel(t, &vxlan) } func TestLinkAddDelIPVlanL2(t *testing.T) { minKernelRequired(t, 4, 2) tearDown := setUpNetlinkTest(t) defer tearDown() parent := &Dummy{LinkAttrs{Name: "foo"}} if err := LinkAdd(parent); err != nil { t.Fatal(err) } ipv := IPVlan{ LinkAttrs: LinkAttrs{ Name: "bar", ParentIndex: parent.Index, }, Mode: IPVLAN_MODE_L2, } testLinkAddDel(t, &ipv) } func TestLinkAddDelIPVlanL3(t *testing.T) { minKernelRequired(t, 4, 2) tearDown := setUpNetlinkTest(t) defer tearDown() parent := &Dummy{LinkAttrs{Name: "foo"}} if err := LinkAdd(parent); err != nil { t.Fatal(err) } ipv := IPVlan{ LinkAttrs: LinkAttrs{ Name: "bar", ParentIndex: parent.Index, }, Mode: IPVLAN_MODE_L3, } testLinkAddDel(t, &ipv) } func TestLinkAddDelIPVlanVepa(t *testing.T) { minKernelRequired(t, 4, 15) tearDown := setUpNetlinkTest(t) defer tearDown() parent := &Dummy{LinkAttrs{Name: "foo"}} if err := LinkAdd(parent); err != nil { t.Fatal(err) } ipv := IPVlan{ LinkAttrs: LinkAttrs{ Name: "bar", ParentIndex: parent.Index, }, Mode: IPVLAN_MODE_L3, Flag: IPVLAN_FLAG_VEPA, } testLinkAddDel(t, &ipv) } func TestLinkAddDelIPVlanNoParent(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() ipv := IPVlan{ LinkAttrs: LinkAttrs{ Name: "bar", }, Mode: IPVLAN_MODE_L3, } err := LinkAdd(&ipv) if err == nil { t.Fatal("Add should fail if ipvlan creating without ParentIndex") } if err.Error() != "Can't create ipvlan link without ParentIndex" { t.Fatalf("Error should be about missing ParentIndex, got %q", err) } } func TestLinkByIndex(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() dummy := &Dummy{LinkAttrs{Name: "dummy"}} if err := LinkAdd(dummy); err != nil { t.Fatal(err) } found, err := LinkByIndex(dummy.Index) if err != nil { t.Fatal(err) } if found.Attrs().Index != dummy.Attrs().Index { t.Fatalf("Indices don't match: %v != %v", found.Attrs().Index, dummy.Attrs().Index) } LinkDel(dummy) // test not found _, err = LinkByIndex(dummy.Attrs().Index) if err == nil { t.Fatalf("LinkByIndex(%v) found deleted link", err) } } func TestLinkSet(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() iface := &Dummy{LinkAttrs{Name: "foo"}} if err := LinkAdd(iface); err != nil { t.Fatal(err) } link, err := LinkByName("foo") if err != nil { t.Fatal(err) } err = LinkSetName(link, "bar") if err != nil { t.Fatalf("Could not change interface name: %v", err) } link, err = LinkByName("bar") if err != nil { t.Fatalf("Interface name not changed: %v", err) } err = LinkSetMTU(link, 1400) if err != nil { t.Fatalf("Could not set MTU: %v", err) } link, err = LinkByName("bar") if err != nil { t.Fatal(err) } if link.Attrs().MTU != 1400 { t.Fatal("MTU not changed") } err = LinkSetTxQLen(link, 500) if err != nil { t.Fatalf("Could not set txqlen: %v", err) } link, err = LinkByName("bar") if err != nil { t.Fatal(err) } if link.Attrs().TxQLen != 500 { t.Fatal("txqlen not changed") } addr, err := net.ParseMAC("00:12:34:56:78:AB") if err != nil { t.Fatal(err) } err = LinkSetHardwareAddr(link, addr) if err != nil { t.Fatal(err) } link, err = LinkByName("bar") if err != nil { t.Fatal(err) } if !bytes.Equal(link.Attrs().HardwareAddr, addr) { t.Fatalf("hardware address not changed") } err = LinkSetAlias(link, "barAlias") if err != nil { t.Fatalf("Could not set alias: %v", err) } link, err = LinkByName("bar") if err != nil { t.Fatal(err) } if link.Attrs().Alias != "barAlias" { t.Fatalf("alias not changed") } link, err = LinkByAlias("barAlias") if err != nil { t.Fatal(err) } err = LinkSetGroup(link, 42) if err != nil { t.Fatalf("Could not set group: %v", err) } link, err = LinkByName("bar") if err != nil { t.Fatal(err) } if link.Attrs().Group != 42 { t.Fatal("Link group not changed") } } func TestLinkSetARP(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() iface := &Veth{LinkAttrs: LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1500}, PeerName: "banana"} if err := LinkAdd(iface); err != nil { t.Fatal(err) } link, err := LinkByName("foo") if err != nil { t.Fatal(err) } err = LinkSetARPOff(link) if err != nil { t.Fatal(err) } link, err = LinkByName("foo") if err != nil { t.Fatal(err) } if link.Attrs().RawFlags&unix.IFF_NOARP != uint32(unix.IFF_NOARP) { t.Fatalf("NOARP was not set") } err = LinkSetARPOn(link) if err != nil { t.Fatal(err) } link, err = LinkByName("foo") if err != nil { t.Fatal(err) } if link.Attrs().RawFlags&unix.IFF_NOARP != 0 { t.Fatalf("NOARP is still set") } } func expectLinkUpdate(ch <-chan LinkUpdate, ifaceName string, up bool) bool { for { timeout := time.After(time.Minute) select { case update := <-ch: if ifaceName == update.Link.Attrs().Name && (update.IfInfomsg.Flags&unix.IFF_UP != 0) == up { return true } case <-timeout: return false } } } func TestLinkSubscribe(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() ch := make(chan LinkUpdate) done := make(chan struct{}) defer close(done) if err := LinkSubscribe(ch, done); err != nil { t.Fatal(err) } link := &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar", nil, nil} if err := LinkAdd(link); err != nil { t.Fatal(err) } if !expectLinkUpdate(ch, "foo", false) { t.Fatal("Add update not received as expected") } if err := LinkSetUp(link); err != nil { t.Fatal(err) } if !expectLinkUpdate(ch, "foo", true) { t.Fatal("Link Up update not received as expected") } if err := LinkDel(link); err != nil { t.Fatal(err) } if !expectLinkUpdate(ch, "foo", false) { t.Fatal("Del update not received as expected") } } func TestLinkSubscribeWithOptions(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() ch := make(chan LinkUpdate) done := make(chan struct{}) defer close(done) var lastError error defer func() { if lastError != nil { t.Fatalf("Fatal error received during subscription: %v", lastError) } }() if err := LinkSubscribeWithOptions(ch, done, LinkSubscribeOptions{ ErrorCallback: func(err error) { lastError = err }, }); err != nil { t.Fatal(err) } link := &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar", nil, nil} if err := LinkAdd(link); err != nil { t.Fatal(err) } if !expectLinkUpdate(ch, "foo", false) { t.Fatal("Add update not received as expected") } } func TestLinkSubscribeAt(t *testing.T) { skipUnlessRoot(t) // Create an handle on a custom netns newNs, err := netns.New() if err != nil { t.Fatal(err) } defer newNs.Close() nh, err := NewHandleAt(newNs) if err != nil { t.Fatal(err) } defer nh.Delete() // Subscribe for Link events on the custom netns ch := make(chan LinkUpdate) done := make(chan struct{}) defer close(done) if err := LinkSubscribeAt(newNs, ch, done); err != nil { t.Fatal(err) } link := &Veth{LinkAttrs{Name: "test", TxQLen: testTxQLen, MTU: 1400}, "bar", nil, nil} if err := nh.LinkAdd(link); err != nil { t.Fatal(err) } if !expectLinkUpdate(ch, "test", false) { t.Fatal("Add update not received as expected") } if err := nh.LinkSetUp(link); err != nil { t.Fatal(err) } if !expectLinkUpdate(ch, "test", true) { t.Fatal("Link Up update not received as expected") } if err := nh.LinkDel(link); err != nil { t.Fatal(err) } if !expectLinkUpdate(ch, "test", false) { t.Fatal("Del update not received as expected") } } func TestLinkSubscribeListExisting(t *testing.T) { skipUnlessRoot(t) // Create an handle on a custom netns newNs, err := netns.New() if err != nil { t.Fatal(err) } defer newNs.Close() nh, err := NewHandleAt(newNs) if err != nil { t.Fatal(err) } defer nh.Delete() link := &Veth{LinkAttrs{Name: "test", TxQLen: testTxQLen, MTU: 1400}, "bar", nil, nil} if err := nh.LinkAdd(link); err != nil { t.Fatal(err) } // Subscribe for Link events on the custom netns ch := make(chan LinkUpdate) done := make(chan struct{}) defer close(done) if err := LinkSubscribeWithOptions(ch, done, LinkSubscribeOptions{ Namespace: &newNs, ListExisting: true}, ); err != nil { t.Fatal(err) } if !expectLinkUpdate(ch, "test", false) { t.Fatal("Add update not received as expected") } if err := nh.LinkSetUp(link); err != nil { t.Fatal(err) } if !expectLinkUpdate(ch, "test", true) { t.Fatal("Link Up update not received as expected") } if err := nh.LinkDel(link); err != nil { t.Fatal(err) } if !expectLinkUpdate(ch, "test", false) { t.Fatal("Del update not received as expected") } } func TestLinkStats(t *testing.T) { defer setUpNetlinkTest(t)() // Create a veth pair and verify the cross-stats once both // ends are brought up and some ICMPv6 packets are exchanged v0 := "v0" v1 := "v1" vethLink := &Veth{LinkAttrs: LinkAttrs{Name: v0}, PeerName: v1} if err := LinkAdd(vethLink); err != nil { t.Fatal(err) } veth0, err := LinkByName(v0) if err != nil { t.Fatal(err) } if err := LinkSetUp(veth0); err != nil { t.Fatal(err) } veth1, err := LinkByName(v1) if err != nil { t.Fatal(err) } if err := LinkSetUp(veth1); err != nil { t.Fatal(err) } time.Sleep(2 * time.Second) // verify statistics veth0, err = LinkByName(v0) if err != nil { t.Fatal(err) } veth1, err = LinkByName(v1) if err != nil { t.Fatal(err) } v0Stats := veth0.Attrs().Statistics v1Stats := veth1.Attrs().Statistics if v0Stats.RxPackets != v1Stats.TxPackets || v0Stats.TxPackets != v1Stats.RxPackets || v0Stats.RxBytes != v1Stats.TxBytes || v0Stats.TxBytes != v1Stats.RxBytes { t.Fatalf("veth ends counters differ:\n%v\n%v", v0Stats, v1Stats) } } func TestLinkXdp(t *testing.T) { links, err := LinkList() if err != nil { t.Fatal(err) } var testXdpLink Link for _, link := range links { if link.Attrs().Xdp != nil && !link.Attrs().Xdp.Attached { testXdpLink = link break } } if testXdpLink == nil { t.Skipf("No link supporting XDP found") } fd, err := loadSimpleBpf(BPF_PROG_TYPE_XDP, 2 /*XDP_PASS*/) if err != nil { t.Skipf("Loading bpf program failed: %s", err) } if err := LinkSetXdpFd(testXdpLink, fd); err != nil { t.Fatal(err) } if err := LinkSetXdpFdWithFlags(testXdpLink, fd, nl.XDP_FLAGS_UPDATE_IF_NOEXIST); err != unix.EBUSY { t.Fatal(err) } if err := LinkSetXdpFd(testXdpLink, -1); err != nil { t.Fatal(err) } } func TestLinkAddDelIptun(t *testing.T) { minKernelRequired(t, 4, 9) tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Iptun{ LinkAttrs: LinkAttrs{Name: "iptunfoo"}, PMtuDisc: 1, Local: net.IPv4(127, 0, 0, 1), Remote: net.IPv4(127, 0, 0, 1)}) } func TestLinkAddDelIp6tnl(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Ip6tnl{ LinkAttrs: LinkAttrs{Name: "ip6tnltest"}, Local: net.ParseIP("2001:db8::100"), Remote: net.ParseIP("2001:db8::200"), }) } func TestLinkAddDelSittun(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Sittun{ LinkAttrs: LinkAttrs{Name: "sittunfoo"}, PMtuDisc: 1, Local: net.IPv4(127, 0, 0, 1), Remote: net.IPv4(127, 0, 0, 1)}) } func TestLinkAddDelVti(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() testLinkAddDel(t, &Vti{ LinkAttrs: LinkAttrs{Name: "vtifoo"}, IKey: 0x101, OKey: 0x101, Local: net.IPv4(127, 0, 0, 1), Remote: net.IPv4(127, 0, 0, 1)}) testLinkAddDel(t, &Vti{ LinkAttrs: LinkAttrs{Name: "vtibar"}, IKey: 0x101, OKey: 0x101, Local: net.IPv6loopback, Remote: net.IPv6loopback}) } func TestBridgeCreationWithMulticastSnooping(t *testing.T) { minKernelRequired(t, 4, 4) tearDown := setUpNetlinkTest(t) defer tearDown() bridgeWithDefaultMcastSnoopName := "foo" bridgeWithDefaultMcastSnoop := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithDefaultMcastSnoopName}} if err := LinkAdd(bridgeWithDefaultMcastSnoop); err != nil { t.Fatal(err) } expectMcastSnooping(t, bridgeWithDefaultMcastSnoopName, true) if err := LinkDel(bridgeWithDefaultMcastSnoop); err != nil { t.Fatal(err) } mcastSnoop := true bridgeWithMcastSnoopOnName := "bar" bridgeWithMcastSnoopOn := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithMcastSnoopOnName}, MulticastSnooping: &mcastSnoop} if err := LinkAdd(bridgeWithMcastSnoopOn); err != nil { t.Fatal(err) } expectMcastSnooping(t, bridgeWithMcastSnoopOnName, true) if err := LinkDel(bridgeWithMcastSnoopOn); err != nil { t.Fatal(err) } mcastSnoop = false bridgeWithMcastSnoopOffName := "foobar" bridgeWithMcastSnoopOff := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithMcastSnoopOffName}, MulticastSnooping: &mcastSnoop} if err := LinkAdd(bridgeWithMcastSnoopOff); err != nil { t.Fatal(err) } expectMcastSnooping(t, bridgeWithMcastSnoopOffName, false) if err := LinkDel(bridgeWithMcastSnoopOff); err != nil { t.Fatal(err) } } func TestBridgeSetMcastSnoop(t *testing.T) { minKernelRequired(t, 4, 4) tearDown := setUpNetlinkTest(t) defer tearDown() bridgeName := "foo" bridge := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeName}} if err := LinkAdd(bridge); err != nil { t.Fatal(err) } expectMcastSnooping(t, bridgeName, true) if err := BridgeSetMcastSnoop(bridge, false); err != nil { t.Fatal(err) } expectMcastSnooping(t, bridgeName, false) if err := BridgeSetMcastSnoop(bridge, true); err != nil { t.Fatal(err) } expectMcastSnooping(t, bridgeName, true) if err := LinkDel(bridge); err != nil { t.Fatal(err) } } func expectMcastSnooping(t *testing.T, linkName string, expected bool) { bridge, err := LinkByName(linkName) if err != nil { t.Fatal(err) } if actual := *bridge.(*Bridge).MulticastSnooping; actual != expected { t.Fatalf("expected %t got %t", expected, actual) } } func TestBridgeSetVlanFiltering(t *testing.T) { minKernelRequired(t, 4, 4) tearDown := setUpNetlinkTest(t) defer tearDown() bridgeName := "foo" bridge := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeName}} if err := LinkAdd(bridge); err != nil { t.Fatal(err) } expectVlanFiltering(t, bridgeName, false) if err := BridgeSetVlanFiltering(bridge, true); err != nil { t.Fatal(err) } expectVlanFiltering(t, bridgeName, true) if err := BridgeSetVlanFiltering(bridge, false); err != nil { t.Fatal(err) } expectVlanFiltering(t, bridgeName, false) if err := LinkDel(bridge); err != nil { t.Fatal(err) } } func expectVlanFiltering(t *testing.T, linkName string, expected bool) { bridge, err := LinkByName(linkName) if err != nil { t.Fatal(err) } if actual := *bridge.(*Bridge).VlanFiltering; actual != expected { t.Fatalf("expected %t got %t", expected, actual) } } func TestBridgeCreationWithAgeingTime(t *testing.T) { minKernelRequired(t, 3, 18) tearDown := setUpNetlinkTest(t) defer tearDown() bridgeWithSpecifiedAgeingTimeName := "foo" ageingTime := uint32(20000) bridgeWithSpecifiedAgeingTime := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithSpecifiedAgeingTimeName}, AgeingTime: &ageingTime} if err := LinkAdd(bridgeWithSpecifiedAgeingTime); err != nil { t.Fatal(err) } retrievedBridge, err := LinkByName(bridgeWithSpecifiedAgeingTimeName) if err != nil { t.Fatal(err) } actualAgeingTime := *retrievedBridge.(*Bridge).AgeingTime if actualAgeingTime != ageingTime { t.Fatalf("expected %d got %d", ageingTime, actualAgeingTime) } if err := LinkDel(bridgeWithSpecifiedAgeingTime); err != nil { t.Fatal(err) } bridgeWithDefaultAgeingTimeName := "bar" bridgeWithDefaultAgeingTime := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithDefaultAgeingTimeName}} if err := LinkAdd(bridgeWithDefaultAgeingTime); err != nil { t.Fatal(err) } retrievedBridge, err = LinkByName(bridgeWithDefaultAgeingTimeName) if err != nil { t.Fatal(err) } actualAgeingTime = *retrievedBridge.(*Bridge).AgeingTime if actualAgeingTime != 30000 { t.Fatalf("expected %d got %d", 30000, actualAgeingTime) } if err := LinkDel(bridgeWithDefaultAgeingTime); err != nil { t.Fatal(err) } } func TestBridgeCreationWithHelloTime(t *testing.T) { minKernelRequired(t, 3, 18) tearDown := setUpNetlinkTest(t) defer tearDown() bridgeWithSpecifiedHelloTimeName := "foo" helloTime := uint32(300) bridgeWithSpecifiedHelloTime := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithSpecifiedHelloTimeName}, HelloTime: &helloTime} if err := LinkAdd(bridgeWithSpecifiedHelloTime); err != nil { t.Fatal(err) } retrievedBridge, err := LinkByName(bridgeWithSpecifiedHelloTimeName) if err != nil { t.Fatal(err) } actualHelloTime := *retrievedBridge.(*Bridge).HelloTime if actualHelloTime != helloTime { t.Fatalf("expected %d got %d", helloTime, actualHelloTime) } if err := LinkDel(bridgeWithSpecifiedHelloTime); err != nil { t.Fatal(err) } bridgeWithDefaultHelloTimeName := "bar" bridgeWithDefaultHelloTime := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithDefaultHelloTimeName}} if err := LinkAdd(bridgeWithDefaultHelloTime); err != nil { t.Fatal(err) } retrievedBridge, err = LinkByName(bridgeWithDefaultHelloTimeName) if err != nil { t.Fatal(err) } actualHelloTime = *retrievedBridge.(*Bridge).HelloTime if actualHelloTime != 200 { t.Fatalf("expected %d got %d", 200, actualHelloTime) } if err := LinkDel(bridgeWithDefaultHelloTime); err != nil { t.Fatal(err) } } func TestBridgeCreationWithVlanFiltering(t *testing.T) { minKernelRequired(t, 3, 18) tearDown := setUpNetlinkTest(t) defer tearDown() bridgeWithVlanFilteringEnabledName := "foo" vlanFiltering := true bridgeWithVlanFilteringEnabled := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithVlanFilteringEnabledName}, VlanFiltering: &vlanFiltering} if err := LinkAdd(bridgeWithVlanFilteringEnabled); err != nil { t.Fatal(err) } retrievedBridge, err := LinkByName(bridgeWithVlanFilteringEnabledName) if err != nil { t.Fatal(err) } retrievedVlanFilteringState := *retrievedBridge.(*Bridge).VlanFiltering if retrievedVlanFilteringState != vlanFiltering { t.Fatalf("expected %t got %t", vlanFiltering, retrievedVlanFilteringState) } if err := LinkDel(bridgeWithVlanFilteringEnabled); err != nil { t.Fatal(err) } bridgeWithDefaultVlanFilteringName := "bar" bridgeWIthDefaultVlanFiltering := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithDefaultVlanFilteringName}} if err := LinkAdd(bridgeWIthDefaultVlanFiltering); err != nil { t.Fatal(err) } retrievedBridge, err = LinkByName(bridgeWithDefaultVlanFilteringName) if err != nil { t.Fatal(err) } retrievedVlanFilteringState = *retrievedBridge.(*Bridge).VlanFiltering if retrievedVlanFilteringState != false { t.Fatalf("expected %t got %t", false, retrievedVlanFilteringState) } if err := LinkDel(bridgeWIthDefaultVlanFiltering); err != nil { t.Fatal(err) } } func TestLinkSubscribeWithProtinfo(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() master := &Bridge{LinkAttrs: LinkAttrs{Name: "foo"}} if err := LinkAdd(master); err != nil { t.Fatal(err) } slave := &Veth{ LinkAttrs: LinkAttrs{ Name: "bar", TxQLen: testTxQLen, MTU: 1400, MasterIndex: master.Attrs().Index, }, PeerName: "bar-peer", } if err := LinkAdd(slave); err != nil { t.Fatal(err) } ch := make(chan LinkUpdate) done := make(chan struct{}) defer close(done) if err := LinkSubscribe(ch, done); err != nil { t.Fatal(err) } if err := LinkSetHairpin(slave, true); err != nil { t.Fatal(err) } select { case update := <-ch: if !(update.Attrs().Name == "bar" && update.Attrs().Protinfo != nil && update.Attrs().Protinfo.Hairpin) { t.Fatal("Hairpin update not received as expected") } case <-time.After(time.Minute): t.Fatal("Hairpin update timed out") } if err := LinkDel(slave); err != nil { t.Fatal(err) } if err := LinkDel(master); err != nil { t.Fatal(err) } } func testGTPLink(t *testing.T) *GTP { conn1, err := net.ListenUDP("udp", &net.UDPAddr{ IP: net.ParseIP("0.0.0.0"), Port: 3386, }) if err != nil { t.Fatal(err) } conn2, err := net.ListenUDP("udp", &net.UDPAddr{ IP: net.ParseIP("0.0.0.0"), Port: 2152, }) if err != nil { t.Fatal(err) } fd1, _ := conn1.File() fd2, _ := conn2.File() return &GTP{ LinkAttrs: LinkAttrs{ Name: "gtp0", }, FD0: int(fd1.Fd()), FD1: int(fd2.Fd()), } } func TestLinkAddDelGTP(t *testing.T) { tearDown := setUpNetlinkTestWithKModule(t, "gtp") defer tearDown() gtp := testGTPLink(t) testLinkAddDel(t, gtp) } func TestLinkAddDelXfrmi(t *testing.T) { minKernelRequired(t, 4, 19) defer setUpNetlinkTest(t)() lo, _ := LinkByName("lo") testLinkAddDel(t, &Xfrmi{ LinkAttrs: LinkAttrs{Name: "xfrm123", ParentIndex: lo.Attrs().Index}, Ifid: 123}) } func TestLinkAddDelXfrmiNoId(t *testing.T) { minKernelRequired(t, 4, 19) defer setUpNetlinkTest(t)() lo, _ := LinkByName("lo") testLinkAddDel(t, &Xfrmi{ LinkAttrs: LinkAttrs{Name: "xfrm0", ParentIndex: lo.Attrs().Index}}) } func TestLinkByNameWhenLinkIsNotFound(t *testing.T) { _, err := LinkByName("iammissing") if err == nil { t.Fatal("Link not expected to found") } _, ok := err.(LinkNotFoundError) if !ok { t.Errorf("Error returned expected to of LinkNotFoundError type: %v", err) } } func TestLinkByAliasWhenLinkIsNotFound(t *testing.T) { _, err := LinkByAlias("iammissing") if err == nil { t.Fatal("Link not expected to found") } _, ok := err.(LinkNotFoundError) if !ok { t.Errorf("Error returned expected to of LinkNotFoundError type: %v", err) } } func TestLinkAddDelTuntap(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() // Mount sysfs so that sysfs gets the namespace tag of the current network namespace // This is necessary so that /sys shows the network interfaces of the current namespace. if err := syscall.Mount("sysfs", "/sys", "sysfs", syscall.MS_RDONLY, ""); err != nil { t.Fatal("Cannot mount sysfs") } defer func() { if err := syscall.Unmount("/sys", 0); err != nil { t.Fatal("Cannot umount /sys") } }() testLinkAddDel(t, &Tuntap{ LinkAttrs: LinkAttrs{Name: "foo"}, Mode: TUNTAP_MODE_TAP}) } func TestLinkAddDelTuntapMq(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() if err := syscall.Mount("sysfs", "/sys", "sysfs", syscall.MS_RDONLY, ""); err != nil { t.Fatal("Cannot mount sysfs") } defer func() { if err := syscall.Unmount("/sys", 0); err != nil { t.Fatal("Cannot umount /sys") } }() testLinkAddDel(t, &Tuntap{ LinkAttrs: LinkAttrs{Name: "foo"}, Mode: TUNTAP_MODE_TAP, Queues: 4}) testLinkAddDel(t, &Tuntap{ LinkAttrs: LinkAttrs{Name: "foo"}, Mode: TUNTAP_MODE_TAP, Queues: 4, Flags: TUNTAP_MULTI_QUEUE_DEFAULTS | TUNTAP_VNET_HDR}) } func TestLinkAddDelTuntapOwnerGroup(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() if err := syscall.Mount("sysfs", "/sys", "sysfs", syscall.MS_RDONLY, ""); err != nil { t.Fatal("Cannot mount sysfs") } defer func() { if err := syscall.Unmount("/sys", 0); err != nil { t.Fatal("Cannot umount /sys") } }() testLinkAddDel(t, &Tuntap{ LinkAttrs: LinkAttrs{Name: "foo"}, Mode: TUNTAP_MODE_TAP, Owner: 0, Group: 0, }) } func TestVethPeerIndex(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() const ( vethPeer1 = "vethOne" vethPeer2 = "vethTwo" ) link := &Veth{ LinkAttrs: LinkAttrs{ Name: vethPeer1, MTU: 1500, Flags: net.FlagUp, }, PeerName: vethPeer2, } if err := LinkAdd(link); err != nil { t.Fatal(err) } linkOne, err := LinkByName("vethOne") if err != nil { t.Fatal(err) } linkTwo, err := LinkByName("vethTwo") if err != nil { t.Fatal(err) } peerIndexOne, err := VethPeerIndex(&Veth{LinkAttrs: *linkOne.Attrs()}) if err != nil { t.Fatal(err) } peerIndexTwo, err := VethPeerIndex(&Veth{LinkAttrs: *linkTwo.Attrs()}) if err != nil { t.Fatal(err) } if peerIndexOne != linkTwo.Attrs().Index { t.Errorf("VethPeerIndex(%s) mismatch %d != %d", linkOne.Attrs().Name, peerIndexOne, linkTwo.Attrs().Index) } if peerIndexTwo != linkOne.Attrs().Index { t.Errorf("VethPeerIndex(%s) mismatch %d != %d", linkTwo.Attrs().Name, peerIndexTwo, linkOne.Attrs().Index) } } func TestLinkSlaveBond(t *testing.T) { minKernelRequired(t, 3, 13) tearDown := setUpNetlinkTest(t) defer tearDown() const ( bondName = "foo" slaveName = "fooFoo" ) bond := NewLinkBond(LinkAttrs{Name: bondName}) bond.Mode = BOND_MODE_BALANCE_RR if err := LinkAdd(bond); err != nil { t.Fatal(err) } defer LinkDel(bond) slaveDummy := &Dummy{LinkAttrs{Name: slaveName}} if err := LinkAdd(slaveDummy); err != nil { t.Fatal(err) } defer LinkDel(slaveDummy) if err := LinkSetBondSlave(slaveDummy, bond); err != nil { t.Fatal(err) } slaveLink, err := LinkByName(slaveName) if err != nil { t.Fatal(err) } slave := slaveLink.Attrs().Slave if slave == nil { t.Errorf("for %s expected slave is not nil.", slaveName) } if slaveType := slave.SlaveType(); slaveType != "bond" { t.Errorf("for %s expected slave type is 'bond', but '%s'", slaveName, slaveType) } } func TestLinkSetBondSlaveQueueId(t *testing.T) { minKernelRequired(t, 3, 13) tearDown := setUpNetlinkTest(t) defer tearDown() const ( bondName = "foo" slave1Name = "fooFoo" ) bond := NewLinkBond(LinkAttrs{Name: bondName}) if err := LinkAdd(bond); err != nil { t.Fatal(err) } defer LinkDel(bond) slave := &Dummy{LinkAttrs{Name: slave1Name}} if err := LinkAdd(slave); err != nil { t.Fatal(err) } defer LinkDel(slave) if err := LinkSetBondSlave(slave, bond); err != nil { t.Fatal(err) } if err := pkgHandle.LinkSetBondSlaveQueueId(slave, 1); err != nil { t.Fatal(err) } } func TestLinkSetBondSlave(t *testing.T) { minKernelRequired(t, 3, 13) tearDown := setUpNetlinkTest(t) defer tearDown() const ( bondName = "foo" slaveOneName = "fooFoo" slaveTwoName = "fooBar" ) bond := NewLinkBond(LinkAttrs{Name: bondName}) bond.Mode = StringToBondModeMap["802.3ad"] bond.AdSelect = BondAdSelect(BOND_AD_SELECT_BANDWIDTH) bond.AdActorSysPrio = 1 bond.AdUserPortKey = 1 bond.AdActorSystem, _ = net.ParseMAC("06:aa:bb:cc:dd:ee") if err := LinkAdd(bond); err != nil { t.Fatal(err) } bondLink, err := LinkByName(bondName) if err != nil { t.Fatal(err) } defer LinkDel(bondLink) if err := LinkAdd(&Dummy{LinkAttrs{Name: slaveOneName}}); err != nil { t.Fatal(err) } slaveOneLink, err := LinkByName(slaveOneName) if err != nil { t.Fatal(err) } defer LinkDel(slaveOneLink) if err := LinkAdd(&Dummy{LinkAttrs{Name: slaveTwoName}}); err != nil { t.Fatal(err) } slaveTwoLink, err := LinkByName(slaveTwoName) if err != nil { t.Fatal(err) } defer LinkDel(slaveTwoLink) if err := LinkSetBondSlave(slaveOneLink, &Bond{LinkAttrs: *bondLink.Attrs()}); err != nil { t.Fatal(err) } if err := LinkSetBondSlave(slaveTwoLink, &Bond{LinkAttrs: *bondLink.Attrs()}); err != nil { t.Fatal(err) } // Update info about interfaces slaveOneLink, err = LinkByName(slaveOneName) if err != nil { t.Fatal(err) } slaveTwoLink, err = LinkByName(slaveTwoName) if err != nil { t.Fatal(err) } if slaveOneLink.Attrs().MasterIndex != bondLink.Attrs().Index { t.Errorf("For %s expected %s to be master", slaveOneLink.Attrs().Name, bondLink.Attrs().Name) } if slaveTwoLink.Attrs().MasterIndex != bondLink.Attrs().Index { t.Errorf("For %s expected %s to be master", slaveTwoLink.Attrs().Name, bondLink.Attrs().Name) } } func TestLinkSetAllmulticast(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() iface := &Veth{LinkAttrs: LinkAttrs{Name: "foo"}, PeerName: "bar"} if err := LinkAdd(iface); err != nil { t.Fatal(err) } link, err := LinkByName("foo") if err != nil { t.Fatal(err) } if err := LinkSetUp(link); err != nil { t.Fatal(err) } link, err = LinkByName("foo") if err != nil { t.Fatal(err) } rawFlagsStart := link.Attrs().RawFlags if err := LinkSetAllmulticastOn(link); err != nil { t.Fatal(err) } link, err = LinkByName("foo") if err != nil { t.Fatal(err) } if link.Attrs().RawFlags&unix.IFF_ALLMULTI != uint32(unix.IFF_ALLMULTI) { t.Fatal("IFF_ALLMULTI was not set") } if err := LinkSetAllmulticastOff(link); err != nil { t.Fatal(err) } link, err = LinkByName("foo") if err != nil { t.Fatal(err) } if link.Attrs().RawFlags&unix.IFF_ALLMULTI != 0 { t.Fatal("IFF_ALLMULTI is still set") } rawFlagsEnd := link.Attrs().RawFlags if rawFlagsStart != rawFlagsEnd { t.Fatalf("RawFlags start value:%d differs from end value:%d", rawFlagsStart, rawFlagsEnd) } } func TestLinkSetMacvlanMode(t *testing.T) { tearDown := setUpNetlinkTest(t) defer tearDown() const ( parentName = "foo" macvlanName = "fooFoo" macvtapName = "fooBar" ) parent := &Dummy{LinkAttrs{Name: parentName}} if err := LinkAdd(parent); err != nil { t.Fatal(err) } defer LinkDel(parent) testMacvlanMode := func(link Link, mode MacvlanMode) { if err := LinkSetMacvlanMode(link, mode); err != nil { t.Fatal(err) } name := link.Attrs().Name result, err := LinkByName(name) if err != nil { t.Fatal(err) } var actual MacvlanMode switch l := result.(type) { case *Macvlan: actual = l.Mode case *Macvtap: actual = l.Macvlan.Mode } if actual != mode { t.Fatalf("expected %v got %v for %+v", mode, actual, link) } } macvlan := &Macvlan{ LinkAttrs: LinkAttrs{Name: macvlanName, ParentIndex: parent.Attrs().Index}, Mode: MACVLAN_MODE_BRIDGE, } if err := LinkAdd(macvlan); err != nil { t.Fatal(err) } defer LinkDel(macvlan) testMacvlanMode(macvlan, MACVLAN_MODE_VEPA) testMacvlanMode(macvlan, MACVLAN_MODE_PRIVATE) testMacvlanMode(macvlan, MACVLAN_MODE_SOURCE) testMacvlanMode(macvlan, MACVLAN_MODE_BRIDGE) macvtap := &Macvtap{ Macvlan: Macvlan{ LinkAttrs: LinkAttrs{Name: macvtapName, ParentIndex: parent.Attrs().Index}, Mode: MACVLAN_MODE_BRIDGE, }, } if err := LinkAdd(macvtap); err != nil { t.Fatal(err) } defer LinkDel(macvtap) testMacvlanMode(macvtap, MACVLAN_MODE_VEPA) testMacvlanMode(macvtap, MACVLAN_MODE_PRIVATE) testMacvlanMode(macvtap, MACVLAN_MODE_SOURCE) testMacvlanMode(macvtap, MACVLAN_MODE_BRIDGE) }
[ "\"TRAVIS_BUILD_DIR\"" ]
[]
[ "TRAVIS_BUILD_DIR" ]
[]
["TRAVIS_BUILD_DIR"]
go
1
0
pkg/maps/mapping.go
// Copyright (c) 2022 The Parca Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package maps import ( "errors" "github.com/google/pprof/profile" ) var ErrNotFound = errors.New("not found") type Mapping struct { fileCache *PIDMappingFileCache pidMappings map[uint32][]*profile.Mapping pids []uint32 } func NewMapping(fileCache *PIDMappingFileCache) *Mapping { return &Mapping{ fileCache: fileCache, pidMappings: map[uint32][]*profile.Mapping{}, pids: []uint32{}, } } func (m *Mapping) PIDAddrMapping(pid uint32, addr uint64) (*profile.Mapping, error) { maps, ok := m.pidMappings[pid] if !ok { var err error maps, err = m.fileCache.MappingForPID(pid) if err != nil { return nil, err } m.pidMappings[pid] = maps m.pids = append(m.pids, pid) } return mappingForAddr(maps, addr), nil } type ProcessMapping struct { PID uint32 Mapping *profile.Mapping } func (m *Mapping) AllMappings() ([]*profile.Mapping, []ProcessMapping) { res := []*profile.Mapping{} mappedFiles := []ProcessMapping{} i := uint64(1) // Mapping IDs need to start with 1 in pprof. for _, pid := range m.pids { maps := m.pidMappings[pid] for _, mapping := range maps { if mapping.BuildID != "" { mappedFiles = append(mappedFiles, ProcessMapping{ PID: pid, Mapping: mapping, }) } // TODO(brancz): Do we need to handle potentially duplicate // vdso/vsyscall mappings? mapping.ID = i res = append(res, mapping) i++ } } return res, mappedFiles } func mappingForAddr(mapping []*profile.Mapping, addr uint64) *profile.Mapping { for _, m := range mapping { if m.Start <= addr && m.Limit >= addr { return m } } return nil }
[]
[]
[]
[]
[]
go
null
null
null
examples/gp.py
# Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 """ Example: Gaussian Process ========================= In this example we show how to use NUTS to sample from the posterior over the hyperparameters of a gaussian process. .. image:: ../_static/img/examples/gp.png :align: center """ import argparse import os import time import matplotlib import matplotlib.pyplot as plt import numpy as np import jax from jax import vmap import jax.numpy as jnp import jax.random as random import numpyro import numpyro.distributions as dist from numpyro.infer import MCMC, NUTS, init_to_feasible, init_to_median, init_to_sample, init_to_uniform, init_to_value matplotlib.use('Agg') # noqa: E402 # squared exponential kernel with diagonal noise term def kernel(X, Z, var, length, noise, jitter=1.0e-6, include_noise=True): deltaXsq = jnp.power((X[:, None] - Z) / length, 2.0) k = var * jnp.exp(-0.5 * deltaXsq) if include_noise: k += (noise + jitter) * jnp.eye(X.shape[0]) return k def model(X, Y): # set uninformative log-normal priors on our three kernel hyperparameters var = numpyro.sample("kernel_var", dist.LogNormal(0.0, 10.0)) noise = numpyro.sample("kernel_noise", dist.LogNormal(0.0, 10.0)) length = numpyro.sample("kernel_length", dist.LogNormal(0.0, 10.0)) # compute kernel k = kernel(X, X, var, length, noise) # sample Y according to the standard gaussian process formula numpyro.sample("Y", dist.MultivariateNormal(loc=jnp.zeros(X.shape[0]), covariance_matrix=k), obs=Y) # helper function for doing hmc inference def run_inference(model, args, rng_key, X, Y): start = time.time() # demonstrate how to use different HMC initialization strategies if args.init_strategy == "value": init_strategy = init_to_value(values={"kernel_var": 1.0, "kernel_noise": 0.05, "kernel_length": 0.5}) elif args.init_strategy == "median": init_strategy = init_to_median(num_samples=10) elif args.init_strategy == "feasible": init_strategy = init_to_feasible() elif args.init_strategy == "sample": init_strategy = init_to_sample() elif args.init_strategy == "uniform": init_strategy = init_to_uniform(radius=1) kernel = NUTS(model, init_strategy=init_strategy) mcmc = MCMC(kernel, args.num_warmup, args.num_samples, num_chains=args.num_chains, thinning=args.thinning, progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True) mcmc.run(rng_key, X, Y) mcmc.print_summary() print('\nMCMC elapsed time:', time.time() - start) return mcmc.get_samples() # do GP prediction for a given set of hyperparameters. this makes use of the well-known # formula for gaussian process predictions def predict(rng_key, X, Y, X_test, var, length, noise): # compute kernels between train and test data, etc. k_pp = kernel(X_test, X_test, var, length, noise, include_noise=True) k_pX = kernel(X_test, X, var, length, noise, include_noise=False) k_XX = kernel(X, X, var, length, noise, include_noise=True) K_xx_inv = jnp.linalg.inv(k_XX) K = k_pp - jnp.matmul(k_pX, jnp.matmul(K_xx_inv, jnp.transpose(k_pX))) sigma_noise = jnp.sqrt(jnp.clip(jnp.diag(K), a_min=0.)) * jax.random.normal(rng_key, X_test.shape[:1]) mean = jnp.matmul(k_pX, jnp.matmul(K_xx_inv, Y)) # we return both the mean function and a sample from the posterior predictive for the # given set of hyperparameters return mean, mean + sigma_noise # create artificial regression dataset def get_data(N=30, sigma_obs=0.15, N_test=400): np.random.seed(0) X = jnp.linspace(-1, 1, N) Y = X + 0.2 * jnp.power(X, 3.0) + 0.5 * jnp.power(0.5 + X, 2.0) * jnp.sin(4.0 * X) Y += sigma_obs * np.random.randn(N) Y -= jnp.mean(Y) Y /= jnp.std(Y) assert X.shape == (N,) assert Y.shape == (N,) X_test = jnp.linspace(-1.3, 1.3, N_test) return X, Y, X_test def main(args): X, Y, X_test = get_data(N=args.num_data) # do inference rng_key, rng_key_predict = random.split(random.PRNGKey(0)) samples = run_inference(model, args, rng_key, X, Y) # do prediction vmap_args = (random.split(rng_key_predict, samples['kernel_var'].shape[0]), samples['kernel_var'], samples['kernel_length'], samples['kernel_noise']) means, predictions = vmap(lambda rng_key, var, length, noise: predict(rng_key, X, Y, X_test, var, length, noise))(*vmap_args) mean_prediction = np.mean(means, axis=0) percentiles = np.percentile(predictions, [5.0, 95.0], axis=0) # make plots fig, ax = plt.subplots(figsize=(8, 6), constrained_layout=True) # plot training data ax.plot(X, Y, 'kx') # plot 90% confidence level of predictions ax.fill_between(X_test, percentiles[0, :], percentiles[1, :], color='lightblue') # plot mean prediction ax.plot(X_test, mean_prediction, 'blue', ls='solid', lw=2.0) ax.set(xlabel="X", ylabel="Y", title="Mean predictions with 90% CI") plt.savefig("gp_plot.pdf") if __name__ == "__main__": assert numpyro.__version__.startswith('0.4.1') parser = argparse.ArgumentParser(description="Gaussian Process example") parser.add_argument("-n", "--num-samples", nargs="?", default=1000, type=int) parser.add_argument("--num-warmup", nargs='?', default=1000, type=int) parser.add_argument("--num-chains", nargs='?', default=1, type=int) parser.add_argument("--thinning", nargs='?', default=2, type=int) parser.add_argument("--num-data", nargs='?', default=25, type=int) parser.add_argument("--device", default='cpu', type=str, help='use "cpu" or "gpu".') parser.add_argument("--init-strategy", default='median', type=str, choices=['median', 'feasible', 'value', 'uniform', 'sample']) args = parser.parse_args() numpyro.set_platform(args.device) numpyro.set_host_device_count(args.num_chains) main(args)
[]
[]
[]
[]
[]
python
0
0
mongo-driver/mongo/database_internal_test.go
// Copyright (C) MongoDB, Inc. 2017-present. // // Licensed under the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 package mongo import ( "context" "errors" "testing" "fmt" "os" "github.com/stretchr/testify/require" "github.com/dollarkillerx/mongo/mongo-driver/bson" "github.com/dollarkillerx/mongo/mongo-driver/bson/bsoncodec" "github.com/dollarkillerx/mongo/mongo-driver/bson/primitive" "github.com/dollarkillerx/mongo/mongo-driver/internal/testutil" "github.com/dollarkillerx/mongo/mongo-driver/mongo/options" "github.com/dollarkillerx/mongo/mongo-driver/mongo/readconcern" "github.com/dollarkillerx/mongo/mongo-driver/mongo/readpref" "github.com/dollarkillerx/mongo/mongo-driver/mongo/writeconcern" "github.com/dollarkillerx/mongo/mongo-driver/x/bsonx" "github.com/dollarkillerx/mongo/mongo-driver/x/mongo/driver/connstring" "github.com/dollarkillerx/mongo/mongo-driver/x/mongo/driver/description" ) func createTestDatabase(t *testing.T, name *string, opts ...*options.DatabaseOptions) *Database { if name == nil { db := testutil.DBName(t) name = &db } client := createTestClient(t) dbOpts := []*options.DatabaseOptions{options.Database().SetWriteConcern(writeconcern.New(writeconcern.WMajority()))} dbOpts = append(dbOpts, opts...) return client.Database(*name, dbOpts...) } func TestDatabase_initialize(t *testing.T) { t.Parallel() name := "foo" db := createTestDatabase(t, &name) require.Equal(t, db.name, name) require.NotNil(t, db.client) } func compareDbs(t *testing.T, expected *Database, got *Database) { switch { case expected.readPreference != got.readPreference: t.Errorf("expected read preference %#v. got %#v", expected.readPreference, got.readPreference) case expected.readConcern != got.readConcern: t.Errorf("expected read concern %#v. got %#v", expected.readConcern, got.readConcern) case expected.writeConcern != got.writeConcern: t.Errorf("expected write concern %#v. got %#v", expected.writeConcern, got.writeConcern) case expected.registry != got.registry: t.Errorf("expected registry %#v, got %#v", expected.registry, got.registry) } } func TestDatabase_Options(t *testing.T) { name := "testDb_options" rpPrimary := readpref.Primary() rpSecondary := readpref.Secondary() wc1 := writeconcern.New(writeconcern.W(5)) wc2 := writeconcern.New(writeconcern.W(10)) rcLocal := readconcern.Local() rcMajority := readconcern.Majority() reg := bsoncodec.NewRegistryBuilder().Build() opts := options.Database().SetReadPreference(rpPrimary).SetReadConcern(rcLocal).SetWriteConcern(wc1). SetReadPreference(rpSecondary).SetReadConcern(rcMajority).SetWriteConcern(wc2).SetRegistry(reg) expectedDb := &Database{ readConcern: rcMajority, readPreference: rpSecondary, writeConcern: wc2, registry: reg, } t.Run("IndividualOptions", func(t *testing.T) { // if options specified multiple times, last instance should take precedence db := createTestDatabase(t, &name, opts) compareDbs(t, expectedDb, db) }) } func TestDatabase_InheritOptions(t *testing.T) { name := "testDb_options_inherit" client := createTestClient(t) rpPrimary := readpref.Primary() rcLocal := readconcern.Local() client.readPreference = rpPrimary client.readConcern = rcLocal reg := bsoncodec.NewRegistryBuilder().Build() wc1 := writeconcern.New(writeconcern.W(10)) db := client.Database(name, options.Database().SetWriteConcern(wc1).SetRegistry(reg)) // db should inherit read preference and read concern from client switch { case db.readPreference != rpPrimary: t.Errorf("expected read preference primary. got %#v", db.readPreference) case db.readConcern != rcLocal: t.Errorf("expected read concern local. got %#v", db.readConcern) case db.writeConcern != wc1: t.Errorf("expected write concern %#v. got %#v", wc1, db.writeConcern) case db.registry != reg: t.Errorf("expected registry %#v, got %#v", reg, db.registry) } } func TestDatabase_ReplaceTopologyError(t *testing.T) { t.Parallel() if testing.Short() { t.Skip() } cs := testutil.ConnString(t) c, err := NewClient(options.Client().ApplyURI(cs.String())) require.NoError(t, err) require.NotNil(t, c) db := c.Database("TestDatabase_ReplaceTopologyError") err = db.RunCommand(context.Background(), bsonx.Doc{{"ismaster", bsonx.Int32(1)}}).Err() require.Equal(t, err, ErrClientDisconnected) err = db.Drop(ctx) require.Equal(t, err, ErrClientDisconnected) _, err = db.ListCollections(ctx, bsonx.Doc{}) require.Equal(t, err, ErrClientDisconnected) } func TestDatabase_RunCommand(t *testing.T) { t.Parallel() db := createTestDatabase(t, nil) var result bsonx.Doc err := db.RunCommand(context.Background(), bsonx.Doc{{"ismaster", bsonx.Int32(1)}}).Decode(&result) require.NoError(t, err) isMaster, err := result.LookupErr("ismaster") require.NoError(t, err) require.Equal(t, isMaster.Type(), bson.TypeBoolean) require.Equal(t, isMaster.Boolean(), true) ok, err := result.LookupErr("ok") require.NoError(t, err) require.Equal(t, ok.Type(), bson.TypeDouble) require.Equal(t, ok.Double(), 1.0) } func TestDatabase_RunCommand_DecodeStruct(t *testing.T) { t.Parallel() db := createTestDatabase(t, nil) result := struct { Ismaster bool `bson:"ismaster"` Ok float64 `bson:"ok"` }{} err := db.RunCommand(context.Background(), bsonx.Doc{{"ismaster", bsonx.Int32(1)}}).Decode(&result) require.NoError(t, err) require.Equal(t, result.Ismaster, true) require.Equal(t, result.Ok, 1.0) } func TestDatabase_NilDocumentError(t *testing.T) { t.Parallel() db := createTestDatabase(t, nil) err := db.RunCommand(context.Background(), nil).Err() require.Equal(t, err, ErrNilDocument) _, err = db.Watch(context.Background(), nil) require.Equal(t, err, errors.New("can only transform slices and arrays into aggregation pipelines, but got invalid")) _, err = db.ListCollections(context.Background(), nil) require.Equal(t, err, ErrNilDocument) _, err = db.ListCollectionNames(context.Background(), nil) require.Equal(t, err, ErrNilDocument) } func TestDatabase_Drop(t *testing.T) { t.Parallel() name := "TestDatabase_Drop" db := createTestDatabase(t, &name) client := createTestClient(t) err := db.Drop(context.Background()) require.NoError(t, err) list, err := client.ListDatabaseNames(context.Background(), bsonx.Doc{}) require.NoError(t, err) require.NotContains(t, list, name) } func TestListCollectionNames(t *testing.T) { serverVersion, err := getServerVersion(createTestDatabase(t, nil)) require.NoError(t, err) if compareVersions(t, serverVersion, "4.0") < 0 { t.Skip() } testcases := []struct { name string filter bson.D found bool }{ {"no_filter", bson.D{}, true}, {"filter", bson.D{{"name", "filter"}}, true}, {"filter_not_found", bson.D{{"name", "123"}}, false}, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { dbName := "TestListCollectionNames" coll := createTestCollection(t, &dbName, &tc.name) defer func() { _ = coll.Drop(ctx) }() cols, err := coll.Database().ListCollectionNames(context.Background(), tc.filter) require.NoError(t, err) if !tc.found { require.Len(t, cols, 0) return } require.Len(t, cols, 1) require.Equal(t, tc.name, cols[0], "collection name mismatch; expected %s, got %s", tc.name, cols[0]) }) } } // creates 1 normal collection and 1 capped collection of size 64*1024 func setupListCollectionsDb(db *Database) (uncappedName string, cappedName string, err error) { uncappedName, cappedName = "listcoll_uncapped", "listcoll_capped" uncappedColl := db.Collection(uncappedName, options.Collection().SetWriteConcern(writeconcern.New(writeconcern.WMajority()))) // insert a document to ensure the database exists _, _ = uncappedColl.InsertOne(context.Background(), bson.D{}) err = db.RunCommand( context.Background(), bsonx.Doc{ {"create", bsonx.String(cappedName)}, {"capped", bsonx.Boolean(true)}, {"size", bsonx.Int32(64 * 1024)}, }, ).Err() if err != nil { return "", "", err } cappedColl := db.Collection(cappedName) id := primitive.NewObjectID() want := bsonx.Elem{"_id", bsonx.ObjectID(id)} doc := bsonx.Doc{want, {"x", bsonx.Int32(1)}} _, err = uncappedColl.InsertOne(context.Background(), doc) if err != nil { return "", "", err } _, err = cappedColl.InsertOne(context.Background(), doc) if err != nil { return "", "", err } return uncappedName, cappedName, nil } // verifies both collection names are found in cursor, cursor does not have extra collections, and cursor has no // duplicates func verifyListCollections(cursor *Cursor, uncappedName string, cappedName string, cappedOnly bool) (err error) { var uncappedFound bool var cappedFound bool for cursor.Next(context.Background()) { next := &bsonx.Doc{} err = cursor.Decode(next) if err != nil { return err } elem, err := next.LookupErr("name") if err != nil { return err } if elem.Type() != bson.TypeString { return fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type(), bson.TypeString) } elemName := elem.StringValue() // legacy servers can return an indexes collection that shouldn't be considered here if elemName != cappedName && elemName != uncappedName { continue } if elemName == uncappedName && !uncappedFound { if cappedOnly { return fmt.Errorf("found uncapped collection %s. expected only capped collections", uncappedName) } uncappedFound = true continue } if elemName == cappedName && !cappedFound { cappedFound = true continue } // duplicate found return fmt.Errorf("found duplicate collection %s", elemName) } if !cappedFound { return fmt.Errorf("did not find collection %s", cappedName) } if !cappedOnly && !uncappedFound { return fmt.Errorf("did not find collection %s", uncappedName) } return nil } func listCollectionsTest(db *Database, cappedOnly bool, cappedName, uncappedName string) error { var filter bsonx.Doc if cappedOnly { filter = bsonx.Doc{{"options.capped", bsonx.Boolean(true)}} } var cursor *Cursor var err error for i := 0; i < 10; i++ { cursor, err = db.ListCollections(context.Background(), filter) if err != nil { return err } err = verifyListCollections(cursor, uncappedName, cappedName, cappedOnly) if err == nil { return nil } } return err // all tests failed } // get the connection string for a direct connection to a secondary in a replica set func getSecondaryConnString(t *testing.T) connstring.ConnString { topo := testutil.Topology(t) for _, server := range topo.Description().Servers { if server.Kind != description.RSSecondary { continue } fullAddr := "mongodb://" + server.Addr.String() + "/?connect=direct" cs, err := connstring.Parse(fullAddr) require.NoError(t, err) return cs } t.Fatalf("no secondary found for %s", t.Name()) return connstring.ConnString{} } func TestDatabase_ListCollections(t *testing.T) { var listCollectionsTable = []struct { name string expectedTopology string cappedOnly bool direct bool }{ {"standalone_nofilter", "server", false, false}, {"standalone_filter", "server", true, false}, {"replicaset_nofilter", "replica_set", false, false}, {"replicaset_filter", "replica_set", true, false}, {"replicaset_secondary_nofilter", "replica_set", false, true}, {"replicaset_secondary_filter", "replica_set", true, true}, {"sharded_nofilter", "sharded_cluster", false, false}, {"sharded_filter", "sharded_cluster", true, false}, } for _, tt := range listCollectionsTable { t.Run(tt.name, func(t *testing.T) { if os.Getenv("TOPOLOGY") != tt.expectedTopology { t.Skip() } createDb := createTestDatabase(t, &tt.name, options.Database().SetWriteConcern(wcMajority)) defer func() { err := createDb.Drop(context.Background()) require.NoError(t, err) }() uncappedName, cappedName, err := setupListCollectionsDb(createDb) require.NoError(t, err) var cs connstring.ConnString if tt.direct { // TODO(GODRIVER-641) - correctly set read preference on direct connections for OP_MSG t.Skip() cs = getSecondaryConnString(t) } else { cs = testutil.ConnString(t) } client := createTestClientWithConnstring(t, cs) db := client.Database(tt.name) err = listCollectionsTest(db, tt.cappedOnly, cappedName, uncappedName) require.NoError(t, err) }) } } func TestDatabase_RunCommandCursor(t *testing.T) { var elms []interface{} for i := 0; i < 5; i++ { elms = append(elms, bson.D{ {"x", i}, }) } tests := []struct { name string ctx context.Context runCommand interface{} readPref *readpref.ReadPref toInsert []interface{} expectedErr error minVersion string }{ {"Success", nil, bson.D{ {"find", "bar"}, }, nil, elms, nil, "3.2"}, {"Success", nil, bson.D{ {"aggregate", "bar"}, {"pipeline", bson.A{}}, {"cursor", bson.D{}}, }, nil, elms, nil, "2.6"}, {"Failure", nil, bson.D{ {"ping", 1}, }, nil, elms, errors.New("cursor should be an embedded document but is of BSON type invalid"), "2.6"}, } for _, test := range tests { t.Run(test.name, func(tt *testing.T) { serverVersion, err := getServerVersion(createTestDatabase(t, nil)) require.NoError(t, err) if compareVersions(t, serverVersion, test.minVersion) < 0 { tt.Skip() } foo := "foo" bar := "bar" coll := createTestCollection(t, &foo, &bar, options.Collection().SetWriteConcern(wcMajority).SetReadPreference(test.readPref)) defer func() { _ = coll.Drop(ctx) }() res, err := coll.InsertMany(test.ctx, test.toInsert) require.NoError(t, err, "error inserting into database") cursor, err := coll.Database().RunCommandCursor(test.ctx, test.runCommand) require.Equal(tt, test.expectedErr, err, "db.RunCommandCursor returned different error than expected") if cursor != nil { var count int for cursor.Next(test.ctx) { count++ } require.Equal(t, len(res.InsertedIDs), count, "doc count mismatch; expected %d, got %d", len(res.InsertedIDs), count) } }) } }
[ "\"TOPOLOGY\"" ]
[]
[ "TOPOLOGY" ]
[]
["TOPOLOGY"]
go
1
0
examples/aws-ecs-go-web-api/main.go
package main import ( "crypto/tls" "encoding/json" "expvar" "fmt" "log" "net" "net/http" "net/url" "os" "os/signal" "path/filepath" "strings" "syscall" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/go-redis/redis" "github.com/jmoiron/sqlx" "github.com/kelseyhightower/envconfig" _ "github.com/lib/pq" "github.com/pkg/errors" "github.com/rogaha/devops/pkg/devdeploy" "golang.org/x/crypto/acme" "golang.org/x/crypto/acme/autocert" ) // build is the git version of this program. It is set using build flags in the makefile. var build = "develop" // service is the name of the program used for logging, tracing, etc. var service = "WEB_API" func main() { // ========================================================================= // Logging log.SetFlags(log.LstdFlags | log.Lmicroseconds | log.Lshortfile) log.SetPrefix(service + " : ") log := log.New(os.Stdout, log.Prefix(), log.Flags()) // Print the build version for our logs. Also expose it under /debug/vars. expvar.NewString("build").Set(build) log.Printf("main : Started : Service Initializing version %q", build) defer log.Println("main : Completed") // ========================================================================= // Configuration // Use environment variables to configure service. Used defined envconfig value for the key or prefix the key with // the service name, ie. WEB_API_AWS_AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY_ID var cfg struct { Env string `default:"dev" envconfig:"ENV"` Service struct { Name string `default:"web-api" envconfig:"SERVICE_NAME"` Project string `default:"" envconfig:"PROJECT_NAME"` BaseUrl string `default:"" envconfig:"BASE_URL" example:"http://api.example.saasstartupkit.com"` HostNames []string `envconfig:"HOST_NAMES" example:"alternative-subdomain.example.saasstartupkit.com"` Host string `default:"0.0.0.0:4000" envconfig:"HOST"` EnableHTTPS bool `default:"false" envconfig:"ENABLE_HTTPS"` HTTPSHost string `default:"" envconfig:"HTTPS_HOST"` StaticFiles struct { Dir string `default:"./static" envconfig:"STATIC_DIR"` S3Enabled bool `envconfig:"S3_ENABLED"` S3Prefix string `default:"public/web_app/static" envconfig:"S3_PREFIX"` CloudFrontEnabled bool `envconfig:"CLOUDFRONT_ENABLED"` } } Redis struct { Host string `default:":6379" envconfig:"HOST"` DB int `default:"1" envconfig:"DB"` DialTimeout time.Duration `default:"5s" envconfig:"DIAL_TIMEOUT"` } DB struct { Host string `default:"127.0.0.1:5433" envconfig:"HOST"` User string `default:"postgres" envconfig:"USERNAME"` Pass string `default:"postgres" envconfig:"PASSWORD" json:"-"` // don't print Database string `default:"shared" envconfig:"DATABASE"` Driver string `default:"postgres" envconfig:"DRIVER"` Timezone string `default:"utc" envconfig:"TIMEZONE"` DisableTLS bool `default:"true" envconfig:"DISABLE_TLS"` } Aws struct { AccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"` SecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY" json:"-"` // don't print Region string `default:"us-west-2" envconfig:"AWS_DEFAULT_REGION"` S3BucketPrivate string `envconfig:"S3_BUCKET_PRIVATE"` S3BucketPublic string `envconfig:"S3_BUCKET_PUBLIC"` SecretsManagerConfigPrefix string `default:"" envconfig:"SECRETS_MANAGER_CONFIG_PREFIX"` // Get an AWS session from an implicit source if no explicit // configuration is provided. This is useful for taking advantage of // EC2/ECS instance roles. UseRole bool `envconfig:"AWS_USE_ROLE"` } BuildInfo struct { CiCommitRefName string `envconfig:"CI_COMMIT_REF_NAME"` CiCommitShortSha string `envconfig:"CI_COMMIT_SHORT_SHA"` CiCommitSha string `envconfig:"CI_COMMIT_SHA"` CiCommitTag string `envconfig:"CI_COMMIT_TAG"` CiJobId string `envconfig:"CI_JOB_ID"` CiJobUrl string `envconfig:"CI_JOB_URL"` CiPipelineId string `envconfig:"CI_PIPELINE_ID"` CiPipelineUrl string `envconfig:"CI_PIPELINE_URL"` } } // For additional details refer to https://github.com/kelseyhightower/envconfig if err := envconfig.Process(service, &cfg); err != nil { log.Fatalf("main : Parsing Config : %+v", err) } // AWS access keys are required, if roles are enabled, remove any placeholders. if cfg.Aws.UseRole { cfg.Aws.AccessKeyID = "" cfg.Aws.SecretAccessKey = "" // Get an AWS session from an implicit source if no explicit // configuration is provided. This is useful for taking advantage of // EC2/ECS instance roles. if cfg.Aws.Region == "" { sess := session.Must(session.NewSession()) md := ec2metadata.New(sess) var err error cfg.Aws.Region, err = md.Region() if err != nil { log.Fatalf("main : Load region of ecs metadata : %+v", err) } } } // Set the default AWS Secrets Manager prefix used for name to store config files that will be persisted across // deployments and distributed to each instance of the service running. if cfg.Aws.SecretsManagerConfigPrefix == "" { var pts []string if cfg.Service.Project != "" { pts = append(pts, cfg.Service.Project) } pts = append(pts, cfg.Env) cfg.Aws.SecretsManagerConfigPrefix = filepath.Join(pts...) } // Print the config for our logs. It's important to any credentials in the config // that could expose a security risk are excluded from being json encoded by // applying the tag `json:"-"` to the struct var. { cfgJSON, err := json.MarshalIndent(cfg, "", " ") if err != nil { log.Fatalf("main : Marshalling Config to JSON : %+v", err) } log.Printf("main : Config : %v\n", string(cfgJSON)) } // ========================================================================= // Init AWS Session var awsSession *session.Session if cfg.Aws.UseRole { // Get an AWS session from an implicit source if no explicit // configuration is provided. This is useful for taking advantage of // EC2/ECS instance roles. awsSession = session.Must(session.NewSession()) if cfg.Aws.Region != "" { awsSession.Config.WithRegion(cfg.Aws.Region) } log.Printf("main : AWS : Using role.\n") } else if cfg.Aws.AccessKeyID != "" { creds := credentials.NewStaticCredentials(cfg.Aws.AccessKeyID, cfg.Aws.SecretAccessKey, "") awsSession = session.New(&aws.Config{Region: aws.String(cfg.Aws.Region), Credentials: creds}) log.Printf("main : AWS : Using static credentials\n") } // ========================================================================= // Start Database var dbUrl url.URL { // Query parameters. var q url.Values = make(map[string][]string) // Handle SSL Mode if cfg.DB.DisableTLS { q.Set("sslmode", "disable") } else { q.Set("sslmode", "require") } q.Set("timezone", cfg.DB.Timezone) // Construct url. dbUrl = url.URL{ Scheme: cfg.DB.Driver, User: url.UserPassword(cfg.DB.User, cfg.DB.Pass), Host: cfg.DB.Host, Path: cfg.DB.Database, RawQuery: q.Encode(), } } masterDb, err := sqlx.Open(cfg.DB.Driver, dbUrl.String()) if err != nil { log.Fatalf("main : Register DB : %s : %+v", cfg.DB.Driver, err) } defer masterDb.Close() // Enable AWS to auto pause the DB when no activity. masterDb.SetConnMaxLifetime(time.Hour) // ========================================================================= // Start Redis if enabled var redisClient *redis.Client if strings.Trim(cfg.Redis.Host, "-") != "" { log.Println("main : Started : Initialize Redis") redisClient = redis.NewClient(&redis.Options{ Addr: cfg.Redis.Host, DB: cfg.Redis.DB, DialTimeout: cfg.Redis.DialTimeout, }) defer redisClient.Close() if err := redisClient.Ping().Err(); err != nil { log.Fatalf("main : Ping Redis : %+v", err) } } // ========================================================================= // URL Formatter // s3UrlFormatter is a help function used by to convert an relative static file path to publicly available URL. var staticUrlFormatter func(string) string if cfg.Service.StaticFiles.S3Enabled || cfg.Service.StaticFiles.CloudFrontEnabled { s3UrlFormatter, err := devdeploy.S3UrlFormatter(awsSession, cfg.Aws.S3BucketPublic, cfg.Service.StaticFiles.S3Prefix, cfg.Service.StaticFiles.CloudFrontEnabled) if err != nil { log.Fatalf("main : S3UrlFormatter failed : %+v", err) } staticUrlFormatter = func(p string) string { // When the path starts with a forward slash its referencing a local file, // make sure the static file prefix is included if strings.HasPrefix(p, "/") || !strings.HasPrefix(p, "://") { p = filepath.Join(cfg.Service.StaticFiles.S3Prefix, p) } return s3UrlFormatter(p) } } else { staticUrlFormatter = func(p string) string { return p } } // ========================================================================= // Register routes // Ping Handler - used by AWS ELB target group health check. // build/cicd/internal/config/service.go // ctx.AwsElbLoadBalancer.TargetGroups = []*devdeploy.AwsElbTargetGroup{ // &devdeploy.AwsElbTargetGroup{ // Name: fmt.Sprintf("%s-http", ctx.Name), // Port: 80, // Protocol: "HTTP", // TargetType: "ip", // HealthCheckEnabled: true, // HealthCheckIntervalSeconds: 30, // HealthCheckPath: "/ping", // HealthCheckProtocol: "HTTP", // HealthCheckTimeoutSeconds: 5, // HealthyThresholdCount: 3, // UnhealthyThresholdCount: 3, // Matcher: "200", // }, // } // build/cicd/internal/config/service.go // container1 := &ecs.ContainerDefinition{ // ... // HealthCheck: &ecs.HealthCheck{ // Retries: aws.Int64(3), // Command: aws.StringSlice([]string{ // "CMD-SHELL", // "curl -f http://localhost/ping || exit 1", // }), // Timeout: aws.Int64(5), // Interval: aws.Int64(60), // http.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) fmt.Fprintf(w, "PONG") }) // Only test the DB once on init, so the database can be auto-paused for AWS. dbErr := testDbConn(masterDb) // Main Handler http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { // Serve the file from the local file system. if strings.TrimPrefix(r.RequestURI, "/") != "" { fp := filepath.Join(cfg.Service.StaticFiles.Dir, r.RequestURI) http.ServeFile(w, r, fp) return } w.Header().Set("Content-Type", "text/html") fmt.Fprintf(w, "Welcome to the example web API!\n<br/>") if dbErr != nil { fmt.Fprintf(w, "Database connection: %s\n<br/>", dbErr) } else { fmt.Fprintf(w, "Database connection: ok\n<br/>") } if redisClient != nil { if err := testRedisConn(redisClient); err != nil { fmt.Fprintf(w, "Redis connection: %s\n<br/>", err) } else { fmt.Fprintf(w, "Redis connection: ok\n<br/>") } } fmt.Fprintf(w, "CI_COMMIT_REF_NAME: %s\n<br/>", os.Getenv("CI_COMMIT_REF_NAME")) fmt.Fprintf(w, "CI_COMMIT_SHORT_SHA: %s\n<br/>", os.Getenv("CI_COMMIT_SHORT_SHA")) fmt.Fprintf(w, "CI_COMMIT_SHA: %s\n<br/>", os.Getenv("CI_COMMIT_SHA")) fmt.Fprintf(w, "CI_COMMIT_TAG: %s\n<br/>", os.Getenv("CI_COMMIT_TAG")) fmt.Fprintf(w, "CI_JOB_ID: <a href=\"%s\">%s</a>\n<br/>", os.Getenv("CI_JOB_URL"), os.Getenv("CI_JOB_ID")) fmt.Fprintf(w, "CI_PIPELINE_ID: <a href=\"%s\">%s</a>\n<br/>", os.Getenv("CI_PIPELINE_URL"), os.Getenv("CI_PIPELINE_ID")) fmt.Fprintf(w, "<img src=\"%s\">", staticUrlFormatter("dancing_gopher1.gif")) fmt.Fprintf(w, "<img src=\"%s\">", staticUrlFormatter("dancing_gopher2.gif")) }) // ========================================================================= // ECS Task registration for services that don't use an AWS Elastic Load Balancer. err = devdeploy.EcsServiceTaskInit(log, awsSession) if err != nil { log.Fatalf("main : Ecs Service Task init : %+v", err) } // ========================================================================= // Start APP Service // Make a channel to listen for an interrupt or terminate signal from the OS. // Use a buffered channel because the signal package requires it. shutdown := make(chan os.Signal, 1) signal.Notify(shutdown, os.Interrupt, syscall.SIGTERM) // Make a channel to listen for errors coming from the listener. Use a // buffered channel so the goroutine can exit if we don't collect this error. serverErrors := make(chan error, 1) go func() { log.Printf("main : API Listening %s", cfg.Service.Host) serverErrors <- http.ListenAndServe(cfg.Service.Host, nil) }() // Start the HTTPS service listening for requests with an SSL Cert auto generated with Let's Encrypt. if cfg.Service.HTTPSHost != "" { // Determine the primary host by parsing host from the base app URL. baseSiteUrl, err := url.Parse(cfg.Service.BaseUrl) if err != nil { log.Fatalf("main : Parse service base URL : %s : %+v", cfg.Service.BaseUrl, err) } // Drop any ports from the base app URL. var primaryHostname string if strings.Contains(baseSiteUrl.Host, ":") { primaryHostname, _, err = net.SplitHostPort(baseSiteUrl.Host) if err != nil { log.Fatalf("main : SplitHostPort : %s : %+v", baseSiteUrl.Host, err) } } else { primaryHostname = baseSiteUrl.Host } // Generate a unique list of hostnames. var hosts = []string{primaryHostname} for _, h := range cfg.Service.HostNames { h = strings.TrimSpace(h) if h != "" && h != primaryHostname { hosts = append(hosts, h) } } // Enable autocert to store certs via Secret Manager. secretPrefix := filepath.Join(cfg.Aws.SecretsManagerConfigPrefix, "autocert") // Local file cache to reduce requests hitting Secret Manager. localCache := autocert.DirCache(os.TempDir()) cache, err := devdeploy.NewSecretManagerAutocertCache(log, awsSession, secretPrefix, localCache) if err != nil { log.Fatalf("main : HTTPS : %+v", err) } m := &autocert.Manager{ Prompt: autocert.AcceptTOS, HostPolicy: autocert.HostWhitelist(hosts...), Cache: cache, } tLSConfig := &tls.Config{GetCertificate: m.GetCertificate} tLSConfig.NextProtos = append(tLSConfig.NextProtos, acme.ALPNProto) go func() { log.Printf("main : API Listening %s with SSL cert for hosts %s", cfg.Service.HTTPSHost, strings.Join(hosts, ", ")) srv := &http.Server{ Addr: cfg.Service.HTTPSHost, Handler: http.DefaultServeMux, TLSConfig: tLSConfig, TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0), } serverErrors <- srv.ListenAndServeTLS("", "") }() } // ========================================================================= // Shutdown // Blocking main and waiting for shutdown. select { case err := <-serverErrors: log.Fatalf("main : Error starting server: %+v", err) case sig := <-shutdown: log.Printf("main : %v : Start shutdown..", sig) // Ensure the public IP address for the task is removed from Route53. // TODO: this function needs to remove the current IP of the instance, and terminate RDS + Elastic cache err = devdeploy.EcsServiceTaskTaskShutdown(log, awsSession) if err != nil { log.Fatalf("main : Ecs Service Task shutdown : %+v", err) } // Log the status of this shutdown. switch { case sig == syscall.SIGSTOP: log.Fatal("main : Integrity issue caused shutdown") case err != nil: log.Fatalf("main : Could not stop server gracefully : %+v", err) } } } // testDbConn ensures this service can access the database instance. func testDbConn(db *sqlx.DB) error { // check _, err := db.Exec("SELECT 1") if err != nil { return errors.Wrap(err, "Database query failed.") } return nil } // testRedisConn ensures this service can access the Redis cache instance. func testRedisConn(r *redis.Client) error { err := r.Ping().Err() if err != nil { return errors.Wrap(err, "Redis ping failed.") } return err }
[ "\"CI_COMMIT_REF_NAME\"", "\"CI_COMMIT_SHORT_SHA\"", "\"CI_COMMIT_SHA\"", "\"CI_COMMIT_TAG\"", "\"CI_JOB_URL\"", "\"CI_JOB_ID\"", "\"CI_PIPELINE_URL\"", "\"CI_PIPELINE_ID\"" ]
[]
[ "CI_PIPELINE_ID", "CI_COMMIT_SHORT_SHA", "CI_PIPELINE_URL", "CI_JOB_ID", "CI_COMMIT_REF_NAME", "CI_COMMIT_SHA", "CI_COMMIT_TAG", "CI_JOB_URL" ]
[]
["CI_PIPELINE_ID", "CI_COMMIT_SHORT_SHA", "CI_PIPELINE_URL", "CI_JOB_ID", "CI_COMMIT_REF_NAME", "CI_COMMIT_SHA", "CI_COMMIT_TAG", "CI_JOB_URL"]
go
8
0
components/cronet/native/perftest/run.py
#!/usr/bin/env python # Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """This script runs an automated Cronet native performance benchmark. This script: 1. Starts HTTP and QUIC servers on the host machine. 2. Runs benchmark executable. Prerequisites: 1. quic_server and cronet_native_perf_test have been built for the host machine, e.g. via: gn gen out/Release --args="is_debug=false" ninja -C out/Release quic_server cronet_native_perf_test 2. sudo apt-get install lighttpd Invocation: ./run.py Output: Benchmark timings are output to /tmp/cronet_perf_test_results.txt """ import json import os import shutil import sys import tempfile REPOSITORY_ROOT = os.path.abspath(os.path.join( os.path.dirname(__file__), '..', '..', '..', '..')) sys.path.append(os.path.join(REPOSITORY_ROOT, 'build', 'android')) import lighttpd_server # pylint: disable=wrong-import-position sys.path.append(os.path.join(REPOSITORY_ROOT, 'components')) from cronet.tools import perf_test_utils # pylint: disable=wrong-import-position def main(): device = perf_test_utils.NativeDevice() # Start HTTP server. http_server_doc_root = perf_test_utils.GenerateHttpTestResources() config_file = tempfile.NamedTemporaryFile() http_server = lighttpd_server.LighttpdServer(http_server_doc_root, port=perf_test_utils.HTTP_PORT, base_config_path=config_file.name) perf_test_utils.GenerateLighttpdConfig(config_file, http_server_doc_root, http_server) assert http_server.StartupHttpServer() config_file.close() # Start QUIC server. quic_server_doc_root = perf_test_utils.GenerateQuicTestResources(device) quic_server = perf_test_utils.QuicServer(quic_server_doc_root) quic_server.StartupQuicServer(device) # Run test os.environ['LD_LIBRARY_PATH'] = perf_test_utils.BUILD_DIR device.RunShellCommand( [os.path.join(perf_test_utils.BUILD_DIR, 'cronet_native_perf_test'), json.dumps(perf_test_utils.GetConfig(device))], check_return=True) # Shutdown. quic_server.ShutdownQuicServer() shutil.rmtree(quic_server_doc_root) http_server.ShutdownHttpServer() shutil.rmtree(http_server_doc_root) if __name__ == '__main__': main()
[]
[]
[ "LD_LIBRARY_PATH" ]
[]
["LD_LIBRARY_PATH"]
python
1
0
pure1/metricsTypes.go
/* Copyright 2018 David Evans Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pure1 // Metric type describes the metric object returned by the API type Metric struct { ID string `json:"id,omitempty"` Name string `json:"name,omitempty"` AsOf int `json:"_as_of,omitempty"` Availabilities []interface{} `json:"availabilities,omitempty"` Data []interface{} `json:"data,omitempty"` Description string `json:"description,omitempty"` ResourceTypes interface{} `json:"resource_types,omitempty"` Resources []interface{} `json:"resources,omitempty"` Resolution int `json:"resolution,omitempty"` Unit string `json:"unit,omitempty"` }
[]
[]
[]
[]
[]
go
null
null
null
todo/__init__.py
import os from flask import Flask def create_app(): app = Flask(__name__) app.config.from_mapping( SECRET_KEY = 'myKey', DATABASE_HOST = os.environ.get('FLASK_DATABASE_HOST'), DATABASE_PASSWORD = os.environ.get('FLASK_DATABASE_PASSWORD'), DATABASE_USER = os.environ.get('FLASK_DATABASE_USER'), DATABASE = os.environ.get('FLASK_DATABASE') ) from . import db db.init_app(app) from . import auth from . import todo app.register_blueprint(auth.bp) app.register_blueprint(todo.bp) @app.route('/hello') def hello(): return 'Hello World' return app
[]
[]
[ "FLASK_DATABASE", "FLASK_DATABASE_HOST", "FLASK_DATABASE_USER", "FLASK_DATABASE_PASSWORD" ]
[]
["FLASK_DATABASE", "FLASK_DATABASE_HOST", "FLASK_DATABASE_USER", "FLASK_DATABASE_PASSWORD"]
python
4
0
flask_boilerplate/settings.py
# -*- coding: utf-8 -*- """Application configuration.""" import os class Config(object): """Base configuration.""" SECRET_KEY = os.environ.get('FLASK_BOILERPLATE_SECRET', 'secret-key') # TODO: Change me APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS = 13 DEBUG_TB_ENABLED = False # Disable Debug toolbar DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc. SQLALCHEMY_TRACK_MODIFICATIONS = False WEBPACK_MANIFEST_PATH = 'webpack/manifest.json' class ProdConfig(Config): """Production configuration.""" ENV = 'prod' DEBUG = False SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/example' # TODO: Change me DEBUG_TB_ENABLED = False # Disable Debug toolbar class DevConfig(Config): """Development configuration.""" ENV = 'dev' DEBUG = True DB_NAME = 'dev.db' # Put the db file in project root DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME) SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH) DEBUG_TB_ENABLED = True CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc. class TestConfig(Config): """Test configuration.""" TESTING = True DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite://' BCRYPT_LOG_ROUNDS = 4 # For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds" WTF_CSRF_ENABLED = False # Allows form testing
[]
[]
[ "FLASK_BOILERPLATE_SECRET" ]
[]
["FLASK_BOILERPLATE_SECRET"]
python
1
0
CoreLogic/authentication.py
# from config import client_id, secret import json import requests from requests.exceptions import HTTPError import os class Authentication(object): def __init__(self, **kwargs): # prod (api), test (api-uat), dev (+ /sandbox) env = {'prod' : "https://api.corelogic.asia", 'test' : "https://api-uat.corelogic.asia", 'dev' : "https://api-uat.corelogic.asia/sandbox",} self.base = env[kwargs['config']] if kwargs['config'] else env['dev'] self.auth = "https://access-api.corelogic.asia" client_id = os.environ['client_id'] secret = os.environ['secret'] self.access_token = 'Bearer ' + self.generate_token(client_id, secret) self.headers = {'Content-Type': 'application/json', \ 'Authorization' : self.access_token} # Generate Authentication token def generate_token(self, cid, secret): url = self.auth + '/access/oauth/token' try: params = {'grant_type': 'client_credentials', 'client_id': cid, \ 'client_secret': secret} result = requests.get(url, params=params) token = result.json()['access_token'] except HTTPError as err: print(f'HTTP error occurred: {err}') return token
[]
[]
[ "client_id", "secret" ]
[]
["client_id", "secret"]
python
2
0
daemon-producer/main.go
package main import ( "flag" "log" "os" "syscall" "time" "gopkg.in/sevlyar/go-daemon.v0" ) const appname = "daemon-producer" var ( signal = flag.String("s", "", `Send signal to the daemon: quit — graceful shutdown stop — fast shutdown reload — reloading the configuration file`) queueUrl string ) func main() { flag.Parse() daemon.AddCommand(daemon.StringFlag(signal, "quit"), syscall.SIGQUIT, termHandler) daemon.AddCommand(daemon.StringFlag(signal, "stop"), syscall.SIGTERM, termHandler) daemon.AddCommand(daemon.StringFlag(signal, "reload"), syscall.SIGHUP, reloadHandler) cntxt := &daemon.Context{ PidFileName: appname + ".pid", PidFilePerm: 0644, LogFileName: appname + ".log", LogFilePerm: 0640, WorkDir: "./", Umask: 027, Args: []string{"[" + appname + "]"}, } queueUrl = os.Getenv("QUEUE_URL") if queueUrl == "" { log.Fatalln("QUEUE_URL must be specified"); } if len(daemon.ActiveFlags()) > 0 { d, err := cntxt.Search() if err != nil { log.Fatalf("Unable send signal to the daemon: %s", err.Error()) } daemon.SendCommands(d) return } d, err := cntxt.Reborn() if err != nil { log.Fatalln(err) } if d != nil { return } defer cntxt.Release() log.Println("- - - - - - - - - - - - - - -") log.Println("daemon started") go worker() err = daemon.ServeSignals() if err != nil { log.Printf("Error: %s", err.Error()) } log.Println("daemon terminated") } var ( stop = make(chan struct{}) done = make(chan struct{}) ) func worker() { LOOP: for { time.Sleep(2 * time.Second) // this is work to be done by worker. select { case <-stop: break LOOP default: if err := Send(queueUrl); err != nil { panic(err.Error()) } } } done <- struct{}{} } func termHandler(sig os.Signal) error { log.Println("terminating...") stop <- struct{}{} if sig == syscall.SIGQUIT { <-done } return daemon.ErrStop } func reloadHandler(sig os.Signal) error { log.Println("configuration reloaded") return nil }
[ "\"QUEUE_URL\"" ]
[]
[ "QUEUE_URL" ]
[]
["QUEUE_URL"]
go
1
0
project101/asgi.py
""" ASGI config for project101 project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project101.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
artifact/artifact.go
package artifact import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "os" "path/filepath" "time" ) const apiVersion = "6.0-preview" const uploadChunkSize = 8 * 1024 * 1024 // 8 MB // Upload content as GitHub Actions artifact func Upload(ctx context.Context, name, fp string, content io.Reader) error { c, err := createContainerForArtifact(ctx, name) if err != nil { return err } size, err := upload(ctx, name, c.FileContainerResourceURL, fp, content) if err != nil { return err } if err := patchArtifactSize(ctx, name, size); err != nil { return err } return nil } // UploadFiles as GitHub Actions artifact func UploadFiles(ctx context.Context, name string, files []string) error { c, err := createContainerForArtifact(ctx, name) if err != nil { return err } total, err := uploadFiles(ctx, name, c.FileContainerResourceURL, files) if err != nil { return err } if err := patchArtifactSize(ctx, name, total); err != nil { return err } return nil } type containerResponce struct { ContainerID int `json:"containerId"` Size int `json:"size"` SignedContent interface{} `json:"signedContent"` FileContainerResourceURL string `json:"fileContainerResourceUrl"` Type string `json:"type"` Name string `json:"name"` URL string `json:"url"` ExpiresOn time.Time `json:"expiresOn"` Items interface{} `json:"items"` } func createContainerForArtifact(ctx context.Context, name string) (*containerResponce, error) { param := map[string]string{ "Type": "actions_storage", "Name": name, } u, err := getArtifactURL() if err != nil { return nil, err } b, err := json.Marshal(&param) if err != nil { return nil, err } req, err := http.NewRequest( http.MethodPost, u, bytes.NewReader(b), ) if err != nil { return nil, err } req.Header.Set("Accept", fmt.Sprintf("application/json;api-version=%s", apiVersion)) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", os.Getenv("ACTIONS_RUNTIME_TOKEN"))) client := &http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } res := &containerResponce{} if err := json.Unmarshal(body, res); err != nil { return nil, err } return res, nil } func upload(ctx context.Context, name, ep, fp string, content io.Reader) (int, error) { u, err := url.Parse(ep) if err != nil { return 0, err } q := u.Query() q.Set("itemPath", filepath.Join(name, fp)) q.Set("api-version", apiVersion) u.RawQuery = q.Encode() body := &bytes.Buffer{} if _, err = io.Copy(body, content); err != nil { return 0, err } max := body.Len() buf := make([]byte, 0, uploadChunkSize) start := 0 client := &http.Client{} for { n, err := body.Read(buf[:cap(buf)]) buf = buf[:n] if n == 0 { if err == nil { continue } if err == io.EOF { break } return 0, err } end := start + n - 1 req, err := createRequest(u, start, end, max, bytes.NewReader(buf)) if err != nil { return 0, err } resp, err := client.Do(req) if err != nil { return 0, err } if _, err := io.ReadAll(resp.Body); err != nil { return 0, err } if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusAccepted { return 0, errors.New(resp.Status) } start = start + n if err != nil && err != io.EOF { return 0, err } } return max, nil } func patchArtifactSize(ctx context.Context, name string, size int) error { e, err := getArtifactURL() if err != nil { return err } u, err := url.Parse(e) if err != nil { return err } q := u.Query() q.Set("artifactName", name) q.Set("api-version", apiVersion) u.RawQuery = q.Encode() param := map[string]int{ "Size": size, } b, err := json.Marshal(&param) if err != nil { return err } req, err := http.NewRequest( http.MethodPatch, u.String(), bytes.NewReader(b), ) if err != nil { return err } req.Header.Set("Accept", fmt.Sprintf("application/json;api-version=%s", apiVersion)) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", os.Getenv("ACTIONS_RUNTIME_TOKEN"))) client := &http.Client{} resp, err := client.Do(req) if err != nil { return err } if _, err := io.ReadAll(resp.Body); err != nil { return err } return nil } func uploadFiles(ctx context.Context, name, ep string, files []string) (int, error) { total := 0 for _, f := range files { a, err := filepath.Abs(f) if err != nil { return 0, err } rel, err := filepath.Rel(os.Getenv("GITHUB_WORKSPACE"), a) if err != nil { return 0, err } file, err := os.Open(filepath.Clean(f)) if err != nil { return 0, err } size, err := upload(ctx, name, ep, rel, file) if err != nil { _ = file.Close() return 0, err } total += size if err := file.Close(); err != nil { return 0, err } } return total, nil } func createRequest(u *url.URL, start, end, max int, b io.Reader) (*http.Request, error) { req, err := http.NewRequest( http.MethodPut, u.String(), b, ) if err != nil { return nil, err } req.Header.Set("Accept", fmt.Sprintf("application/json;api-version=%s", apiVersion)) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", os.Getenv("ACTIONS_RUNTIME_TOKEN"))) req.Header.Set("Content-Length", fmt.Sprintf("%d", end-start+1)) req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, max)) return req, nil } func getArtifactURL() (string, error) { if os.Getenv("ACTIONS_RUNTIME_URL") == "" { return "", errors.New("env ACTIONS_RUNTIME_URL is only available from the context of an action") } if os.Getenv("GITHUB_RUN_ID") == "" { return "", errors.New("env GITHUB_RUN_ID is only available from the context of an action") } return fmt.Sprintf("%s_apis/pipelines/workflows/%s/artifacts?api-version=%s", os.Getenv("ACTIONS_RUNTIME_URL"), os.Getenv("GITHUB_RUN_ID"), apiVersion), nil }
[ "\"ACTIONS_RUNTIME_TOKEN\"", "\"ACTIONS_RUNTIME_TOKEN\"", "\"GITHUB_WORKSPACE\"", "\"ACTIONS_RUNTIME_TOKEN\"", "\"ACTIONS_RUNTIME_URL\"", "\"GITHUB_RUN_ID\"", "\"ACTIONS_RUNTIME_URL\"", "\"GITHUB_RUN_ID\"" ]
[]
[ "GITHUB_WORKSPACE", "ACTIONS_RUNTIME_URL", "GITHUB_RUN_ID", "ACTIONS_RUNTIME_TOKEN" ]
[]
["GITHUB_WORKSPACE", "ACTIONS_RUNTIME_URL", "GITHUB_RUN_ID", "ACTIONS_RUNTIME_TOKEN"]
go
4
0
src/python/pants/ivy/ivy_subsystem.py
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import os import urllib from pants.java.distribution.distribution import DistributionLocator from pants.subsystem.subsystem import Subsystem class IvySubsystem(Subsystem): """Common configuration items for ivy tasks. :API: public """ options_scope = 'ivy' _DEFAULT_VERSION = '2.4.0' _DEFAULT_URL = ('https://repo1.maven.org/maven2/' 'org/apache/ivy/ivy/' '{version}/ivy-{version}.jar'.format(version=_DEFAULT_VERSION)) @classmethod def register_options(cls, register): super().register_options(register) register('--http-proxy', advanced=True, help='Specify a proxy URL for http requests.') register('--https-proxy', advanced=True, help='Specify a proxy URL for https requests.') register('--bootstrap-jar-url', advanced=True, default=cls._DEFAULT_URL, help='Location to download a bootstrap version of Ivy.') register('--bootstrap-fetch-timeout-secs', type=int, advanced=True, default=10, help='Timeout the fetch if the connection is idle for longer than this value.') register('--ivy-profile', advanced=True, default=cls._DEFAULT_VERSION, help='The version of ivy to fetch.') register('--cache-dir', advanced=True, default=os.path.expanduser('~/.ivy2/pants'), help='The default directory used for both the Ivy resolution and repository caches.' 'If you want to isolate the resolution cache from the repository cache, we ' 'recommend setting both the --resolution-cache-dir and --repository-cache-dir ' 'instead of using --cache-dir') register('--resolution-cache-dir', advanced=True, help='Directory to store Ivy resolution artifacts.') register('--repository-cache-dir', advanced=True, help='Directory to store Ivy repository artifacts.') register('--ivy-settings', advanced=True, help='Location of XML configuration file for Ivy settings.') register('--bootstrap-ivy-settings', advanced=True, help='Bootstrap Ivy XML configuration file.') @classmethod def subsystem_dependencies(cls): return super().subsystem_dependencies() + (DistributionLocator,) def http_proxy(self): """Set ivy to use an http proxy. Expects a string of the form http://<host>:<port> """ if os.getenv('HTTP_PROXY'): return os.getenv('HTTP_PROXY') if os.getenv('http_proxy'): return os.getenv('http_proxy') return self.get_options().http_proxy def https_proxy(self): """Set ivy to use an https proxy. Expects a string of the form http://<host>:<port> """ if os.getenv('HTTPS_PROXY'): return os.getenv('HTTPS_PROXY') if os.getenv('https_proxy'): return os.getenv('https_proxy') return self.get_options().https_proxy def extra_jvm_options(self): extra_options = [] http_proxy = self.http_proxy() if http_proxy: host, port = self._parse_proxy_string(http_proxy) extra_options.extend([ "-Dhttp.proxyHost={}".format(host), "-Dhttp.proxyPort={}".format(port), ]) https_proxy = self.https_proxy() if https_proxy: host, port = self._parse_proxy_string(https_proxy) extra_options.extend([ "-Dhttps.proxyHost={}".format(host), "-Dhttps.proxyPort={}".format(port), ]) return extra_options def _parse_proxy_string(self, proxy_string): parse_result = urllib.parse.urlparse(proxy_string) return parse_result.hostname, parse_result.port def resolution_cache_dir(self): if self.get_options().resolution_cache_dir: return self.get_options().resolution_cache_dir else: return self.get_options().cache_dir def repository_cache_dir(self): if self.get_options().repository_cache_dir: return self.get_options().repository_cache_dir else: return self.get_options().cache_dir
[]
[]
[ "HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy" ]
[]
["HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy"]
python
4
0
backend/per_me_ngle_32511/wsgi.py
""" WSGI config for per_me_ngle_32511 project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'per_me_ngle_32511.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
test/python/topology/test2_ec.py
# coding=utf-8 # Licensed Materials - Property of IBM # Copyright IBM Corp. 2016 import unittest import sys import itertools import logging import tempfile import codecs import os from streamsx.topology.topology import * from streamsx.topology.tester import Tester from streamsx.topology.context import ConfigParams import streamsx.ec def _trc_msg_direct(level): atm = (level, "direct _ec message:" + str(level*77), "A1,B2,python", "MyFile.py", "MyFunc", 4242) import _streamsx_ec _streamsx_ec._app_trc(atm) ctl = _streamsx_ec._app_trc_level() print("Current Trace level:", ctl, logging.getLevelName(ctl)) def _log_msg_direct(level): atm = (level, "direct _ec log message:" + str(level*77), "C1,D2,python", "MyLogFile.py", "MyLogFunc", 2189) import _streamsx_ec _streamsx_ec._app_log(atm) ctl = _streamsx_ec._app_log_level() print("Current Log level:", ctl, logging.getLevelName(ctl)) def _trc_msg(msg): logger = logging.getLogger() logger.critical("Critical:%s", msg) logger.error("Error:" + msg) logger.warning("Warning:" + msg) logger.info("Info:%s") logger.debug("Debug:" + msg) ctl = logger.getEffectiveLevel() print("Current Root logger Trace level:", ctl, logging.getLevelName(ctl)) def _log_msg(msg): logger = logging.getLogger('com.ibm.streams.log') logger.critical("Critical:" + msg) logger.error("Error:" + msg) logger.warning("Warning:" + msg) logger.info("Info:" + msg) ctl = logger.getEffectiveLevel() print("Current Stream log logger level:", ctl, logging.getLevelName(ctl)) def read_config_file(name): path = os.path.join(streamsx.ec.get_application_directory(), 'etc', name) with codecs.open(path, encoding='utf-8') as f: return f.read() class EcSource(object): def __init__(self, val): self.val = val self.ev = None def __call__(self): return [(self.val, self.ev)] def __enter__(self): self.ev = 'EcSource_enter' def __exit__(self, a, b, c): pass class EcFilter(object): def __init__(self, val): self.val = val self.ev = None def __call__(self, tuple_): return self.val == self.ev def __enter__(self): self.ev = self.val def __exit__(self, a, b, c): pass class EcMap(object): def __init__(self, val): self.val = val self.ev = None def __call__(self, tuple_): return tuple_ + (self.val, self.ev) def __enter__(self): self.ev = 'EcMap_enter' def __exit__(self, a, b, c): pass class EcForEach(object): def __init__(self): self.ev = False def __call__(self, tuple): if not self.ev: raise AssertionError("__enter__ not called") def __enter__(self): self.ev = True def __exit__(self, a, b, c): pass class EcDuplicateMetric(object): def __enter__(self): self.m1 = streamsx.ec.CustomMetric(self, name='METRIC1', initialValue=37) if int(self.m1.value) != 37: raise ValueError("Expected initial 37 got " + int(self.m1.value)) self.m1 = streamsx.ec.CustomMetric(self, name='METRIC1', initialValue=99) if int(self.m1.value) != 37: raise ValueError("Expected 37 got " + int(self.m1.value)) try: streamsx.ec.CustomMetric(self, name='METRIC1', kind='Gauge') # 4.3 allows metrics of the same name regardless of kind. self.okerror = True except ValueError as e: self.okerror = True def __exit__(self, a, b, c): pass def __call__(self, tuple_): return tuple_ + (self.m1.name,self.okerror) def get_sys_argv(): import sys as sys_ec_test return sys_ec_test.argv class TestEc(unittest.TestCase): _multiprocess_can_split_ = True def setUp(self): Tester.setup_standalone(self) def test_enter_called(self): self.assertFalse(streamsx.ec.is_active()) topo = Topology() s = topo.source(EcSource('A211')) s = s.filter(EcFilter('F243')) s = s.filter(lambda _ : streamsx.ec.is_active()) s.for_each(EcForEach()) s = s.map(EcMap('M523')) s = s.map(EcDuplicateMetric()) tester = Tester(topo) tester.contents(s, [('A211', 'EcSource_enter', 'M523', 'EcMap_enter', 'METRIC1', True)]) tester.test(self.test_ctxtype, self.test_config) @unittest.skipIf('CP4D_URL' in os.environ, "TODO - needs to be analyzed") def test_sys_argv(self): topo = Topology() s = topo.source(get_sys_argv) tester = Tester(topo) tester.contents(s, ['']) tester.test(self.test_ctxtype, self.test_config) def test_app_dir(self): fn = None with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write("SomeConfig".encode('utf-8')) temp.flush() fn = temp.name bfn = os.path.basename(fn) topo = Topology() rtpath = topo.add_file_dependency(temp.name, 'etc') self.assertEqual('etc/' + bfn, rtpath) s = topo.source(['A']) s = s.filter(lambda x : os.path.isdir(streamsx.ec.get_application_directory())) s = s.map(lambda x : read_config_file(bfn)) tester = Tester(topo) tester.contents(s, ['SomeConfig']) tester.test(self.test_ctxtype, self.test_config) os.remove(fn) def test_app_trc_direct(self): topo = Topology() s = topo.source([40,30,20,10,99]) at = s.filter(lambda x : x != 99) at.for_each(_trc_msg_direct) tester = Tester(topo) tester.tuple_count(s, 5) tester.test(self.test_ctxtype, self.test_config) def test_app_log_direct(self): topo = Topology() s = topo.source([40,30,20,99]) at = s.filter(lambda x : x != 99) at.for_each(_log_msg_direct) tester = Tester(topo) tester.tuple_count(s, 4) tester.test(self.test_ctxtype, self.test_config) def test_app_trc(self): topo = Topology() s = topo.source(['msg1', 'msg2你好']) s.for_each(_trc_msg) tester = Tester(topo) tester.tuple_count(s, 2) tester.test(self.test_ctxtype, self.test_config) def test_app_log(self): topo = Topology() s = topo.source(['logmsg1', 'logmsg2你好']) s.for_each(_log_msg) tester = Tester(topo) tester.tuple_count(s, 2) tester.test(self.test_ctxtype, self.test_config) class TestDistributedEc(TestEc): def setUp(self): Tester.setup_distributed(self) self.test_config[ConfigParams.SSL_VERIFY] = False class TestSasEc(TestEc): def setUp(self): Tester.setup_streaming_analytics(self, force_remote_build=True)
[]
[]
[]
[]
[]
python
0
0
weed/command/fuse.go
package command import ( "fmt" "os" "strconv" "strings" "time" ) func init() { cmdFuse.Run = runFuse // break init cycle } type parameter struct { name string value string } func runFuse(cmd *Command, args []string) bool { rawArgs := strings.Join(args, " ") rawArgsLen := len(rawArgs) option := strings.Builder{} options := []parameter{} masterProcess := true fusermountPath := "" // first parameter i := 0 for i = 0; i < rawArgsLen && rawArgs[i] != ' '; i++ { option.WriteByte(rawArgs[i]) } options = append(options, parameter{"arg0", option.String()}) option.Reset() for i++; i < rawArgsLen; i++ { // space separator check for filled option if rawArgs[i] == ' ' { if option.Len() > 0 { options = append(options, parameter{option.String(), "true"}) option.Reset() } // dash separator read option until next space } else if rawArgs[i] == '-' { for i++; i < rawArgsLen && rawArgs[i] != ' '; i++ { option.WriteByte(rawArgs[i]) } options = append(options, parameter{option.String(), "true"}) option.Reset() // equal separator start option with pending value } else if rawArgs[i] == '=' { name := option.String() option.Reset() for i++; i < rawArgsLen && rawArgs[i] != ','; i++ { // double quote separator read option until next double quote if rawArgs[i] == '"' { for i++; i < rawArgsLen && rawArgs[i] != '"'; i++ { option.WriteByte(rawArgs[i]) } // single quote separator read option until next single quote } else if rawArgs[i] == '\'' { for i++; i < rawArgsLen && rawArgs[i] != '\''; i++ { option.WriteByte(rawArgs[i]) } // add chars before comma } else if rawArgs[i] != ' ' { option.WriteByte(rawArgs[i]) } } options = append(options, parameter{name, option.String()}) option.Reset() // comma separator just read current option } else if rawArgs[i] == ',' { options = append(options, parameter{option.String(), "true"}) option.Reset() // what is not a separator fill option buffer } else { option.WriteByte(rawArgs[i]) } } // get residual option data if option.Len() > 0 { // add value to pending option options = append(options, parameter{option.String(), "true"}) option.Reset() } // scan each parameter for i := 0; i < len(options); i++ { parameter := options[i] switch parameter.name { case "child": masterProcess = false case "arg0": mountOptions.dir = &parameter.value case "filer": mountOptions.filer = &parameter.value case "filer.path": mountOptions.filerMountRootPath = &parameter.value case "dirAutoCreate": if parsed, err := strconv.ParseBool(parameter.value); err != nil { mountOptions.dirAutoCreate = &parsed } else { panic(fmt.Errorf("dirAutoCreate: %s", err)) } case "collection": mountOptions.collection = &parameter.value case "replication": mountOptions.replication = &parameter.value case "disk": mountOptions.diskType = &parameter.value case "ttl": if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err != nil { intValue := int(parsed) mountOptions.ttlSec = &intValue } else { panic(fmt.Errorf("ttl: %s", err)) } case "chunkSizeLimitMB": if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err != nil { intValue := int(parsed) mountOptions.chunkSizeLimitMB = &intValue } else { panic(fmt.Errorf("chunkSizeLimitMB: %s", err)) } case "concurrentWriters": i++ if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err != nil { intValue := int(parsed) mountOptions.concurrentWriters = &intValue } else { panic(fmt.Errorf("concurrentWriters: %s", err)) } case "cacheDir": mountOptions.cacheDir = &parameter.value case "cacheCapacityMB": if parsed, err := strconv.ParseInt(parameter.value, 0, 64); err != nil { mountOptions.cacheSizeMB = &parsed } else { panic(fmt.Errorf("cacheCapacityMB: %s", err)) } case "dataCenter": mountOptions.dataCenter = &parameter.value case "allowOthers": if parsed, err := strconv.ParseBool(parameter.value); err != nil { mountOptions.allowOthers = &parsed } else { panic(fmt.Errorf("allowOthers: %s", err)) } case "umask": mountOptions.umaskString = &parameter.value case "nonempty": if parsed, err := strconv.ParseBool(parameter.value); err != nil { mountOptions.nonempty = &parsed } else { panic(fmt.Errorf("nonempty: %s", err)) } case "volumeServerAccess": mountOptions.volumeServerAccess = &parameter.value case "map.uid": mountOptions.uidMap = &parameter.value case "map.gid": mountOptions.gidMap = &parameter.value case "readOnly": if parsed, err := strconv.ParseBool(parameter.value); err != nil { mountOptions.readOnly = &parsed } else { panic(fmt.Errorf("readOnly: %s", err)) } case "cpuprofile": mountCpuProfile = &parameter.value case "memprofile": mountMemProfile = &parameter.value case "readRetryTime": if parsed, err := time.ParseDuration(parameter.value); err != nil { mountReadRetryTime = &parsed } else { panic(fmt.Errorf("readRetryTime: %s", err)) } case "fusermount.path": fusermountPath = parameter.value } } // the master start the child, release it then finish himself if masterProcess { arg0 := os.Args[0] argv := append(os.Args, "-o", "child") attr := os.ProcAttr{} attr.Env = os.Environ() child, err := os.StartProcess(arg0, argv, &attr) if err != nil { panic(fmt.Errorf("master process can not start child process: %s", err)) } err = child.Release() if err != nil { panic(fmt.Errorf("master process can not release child process: %s", err)) } return true } if fusermountPath != "" { if err := os.Setenv("PATH", fusermountPath); err != nil { panic(fmt.Errorf("setenv: %s", err)) } } else if os.Getenv("PATH") == "" { if err := os.Setenv("PATH", "/bin:/sbin:/usr/bin:/usr/sbin"); err != nil { panic(fmt.Errorf("setenv: %s", err)) } } // just call "weed mount" command return runMount(cmdMount, []string{}) } var cmdFuse = &Command{ UsageLine: "fuse /mnt/mount/point -o \"filer=localhost:8888,filer.path=/\"", Short: "Allow use weed with linux's mount command", Long: `Allow use weed with linux's mount command You can use -t weed on mount command: mv weed /sbin/mount.weed mount -t weed fuse /mnt -o "filer=localhost:8888,filer.path=/" Or you can use -t fuse on mount command: mv weed /sbin/weed mount -t fuse.weed fuse /mnt -o "filer=localhost:8888,filer.path=/" mount -t fuse "weed#fuse" /mnt -o "filer=localhost:8888,filer.path=/" To use without mess with your /sbin: mount -t fuse./home/user/bin/weed fuse /mnt -o "filer=localhost:8888,filer.path=/" mount -t fuse "/home/user/bin/weed#fuse" /mnt -o "filer=localhost:8888,filer.path=/" To pass more than one parameter use quotes, example: mount -t weed fuse /mnt -o "filer='192.168.0.1:8888,192.168.0.2:8888',filer.path=/" To check valid options look "weed mount --help" `, }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
internal/testutils/config.go
package testutils import ( "os" "github.com/origin-finkle/logs/internal/config" ) func Init() { err := config.Init(os.Getenv("CONFIG_FOLDER")) if err != nil { panic(err) } }
[ "\"CONFIG_FOLDER\"" ]
[]
[ "CONFIG_FOLDER" ]
[]
["CONFIG_FOLDER"]
go
1
0
tests/tcex_init.py
# -*- coding: utf-8 -*- """TcEx Testing Initialization.""" import os import sys from tcex import TcEx from .tc_token import TcToken # instance of tc token to retrieve testing token from API tc_token = TcToken() # a token in required for DataStore testing # if not os.getenv('TC_TOKEN'): # raise RuntimeError('A Token is required to run tests.') config_data = { # connection 'api_default_org': os.getenv('API_DEFAULT_ORG'), # 'tc_token': tc_token.service_token, 'tc_token': tc_token.api_token, # 'tc_token': os.getenv('TC_TOKEN'), 'tc_token_expires': os.getenv('TC_TOKEN_EXPIRES'), 'tc_owner': os.getenv('TC_OWNER', 'TCI'), # hmac auth (for session tests) 'api_access_id': os.getenv('API_ACCESS_ID'), 'api_secret_key': os.getenv('API_SECRET_KEY'), # logging 'tc_log_level': os.getenv('TC_LOG_LEVEL', 'trace'), 'tc_log_to_api': str(os.getenv('TC_LOG_TO_API', 'false')).lower() in ['true'], # paths 'tc_api_path': os.getenv('TC_API_PATH'), 'tc_in_path': os.getenv('TC_IN_PATH', 'log'), 'tc_log_path': os.getenv('TC_LOG_PATH', 'log'), 'tc_out_path': os.getenv('TC_OUT_API', 'log'), 'tc_temp_path': os.getenv('TC_TEMP_PATH', 'log'), # playbooks 'tc_playbook_db_type': os.getenv('TC_PLAYBOOK_DB_TYPE', 'Redis'), 'tc_playbook_db_context': os.getenv( 'TC_PLAYBOOK_DB_CONTEXT', '0d5a675a-1d60-4679-bd01-3948d6a0a8bd' ), 'tc_playbook_db_path': os.getenv('TC_PLAYBOOK_DB_PATH', 'localhost'), 'tc_playbook_db_port': os.getenv('TC_PLAYBOOK_DB_PORT', '6379'), # proxy 'tc_proxy_tc': str(os.getenv('TC_PROXY_TC', 'false')).lower() in ['true'], 'tc_proxy_external': str(os.getenv('TC_PROXY_EXTERNAL', 'false')).lower() in ['true'], } # proxy if os.getenv('TC_PROXY_HOST'): config_data['tc_proxy_host'] = os.getenv('TC_PROXY_HOST') if os.getenv('TC_PROXY_PORT'): config_data['tc_proxy_port'] = os.getenv('TC_PROXY_PORT') if os.getenv('TC_PROXY_USERNAME'): config_data['tc_proxy_username'] = os.getenv('TC_PROXY_USERNAME') if os.getenv('TC_PROXY_PASSWORD'): config_data['tc_proxy_password'] = os.getenv('TC_PROXY_PASSWORD') tcex = TcEx(config=config_data) # clear sys.argv sys.argv = sys.argv[:1] + ['--tc_log_level', 'trace'] # args.py [if cli_arg in sys.argv:]
[]
[]
[ "API_SECRET_KEY", "TC_TOKEN", "TC_OWNER", "TC_LOG_TO_API", "TC_LOG_LEVEL", "API_ACCESS_ID", "TC_PROXY_HOST", "TC_LOG_PATH", "TC_PROXY_USERNAME", "API_DEFAULT_ORG", "TC_OUT_API", "TC_PROXY_PORT", "TC_PLAYBOOK_DB_PORT", "TC_TEMP_PATH", "TC_PROXY_PASSWORD", "TC_PLAYBOOK_DB_TYPE", "TC_TOKEN_EXPIRES", "TC_PLAYBOOK_DB_CONTEXT", "TC_API_PATH", "TC_PROXY_EXTERNAL", "TC_PLAYBOOK_DB_PATH", "TC_PROXY_TC", "TC_IN_PATH" ]
[]
["API_SECRET_KEY", "TC_TOKEN", "TC_OWNER", "TC_LOG_TO_API", "TC_LOG_LEVEL", "API_ACCESS_ID", "TC_PROXY_HOST", "TC_LOG_PATH", "TC_PROXY_USERNAME", "API_DEFAULT_ORG", "TC_OUT_API", "TC_PROXY_PORT", "TC_PLAYBOOK_DB_PORT", "TC_TEMP_PATH", "TC_PROXY_PASSWORD", "TC_PLAYBOOK_DB_TYPE", "TC_TOKEN_EXPIRES", "TC_PLAYBOOK_DB_CONTEXT", "TC_API_PATH", "TC_PROXY_EXTERNAL", "TC_PLAYBOOK_DB_PATH", "TC_PROXY_TC", "TC_IN_PATH"]
python
23
0
gomp_lib/gomp_lib.go
package gomp_lib import( "runtime" "os" . "strconv" ) var GOMP_NUM_ROUTINES int = runtime.NumCPU() func Gomp_set_num_routines(N int){ os.Setenv("num_routines",Itoa(N)) } func Gomp_get_num_routines() int { res,_:=Atoi(os.Getenv("num_routines")) return res } func Gomp_get_routine_num() int { return 0 }
[ "\"num_routines\"" ]
[]
[ "num_routines" ]
[]
["num_routines"]
go
1
0
models/image_classification/pytorch-imagenet-cf.py
import argparse import os import shutil import time import math import sys sys.path.append('./') sys.path.append('./src') import copy import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.models as models import torchvision.transforms as transforms import torchvision.datasets as datasets import logging from cf_checkpoint import CFCheckpoint from cf_manager import CFManager, CFMode from cf_iterator import CFIterator from torch.multiprocessing import Pool, Process, set_start_method try: set_start_method('spawn') except RuntimeError: pass try: from nvidia.dali.plugin.pytorch import DALIClassificationIterator from nvidia.dali.pipeline import Pipeline import nvidia.dali.ops as ops import nvidia.dali.types as types except ImportError: raise ImportError("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.") import threading try: from apex.parallel import DistributedDataParallel as DDP from apex.fp16_utils import * from apex import amp, optimizers from apex.multi_tensor_apply import multi_tensor_applier from apex.parallel.LARC import LARC except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.") model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name])) parser = argparse.ArgumentParser(description='PyTorch ImageNet Training using DALI') parser.add_argument('--data', metavar='DIR', default="./", type=str, help='path(s) to dataset (if one path is provided, it is assumed\n' + 'to have subdirectories named "train" and "val"; alternatively,\n' + 'train and val paths can be specified directly by providing both paths as arguments)') parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet18)') parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=3, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--nopin', action='store_false', help='Use this ' 'argument to disable memory pinning') #parser.add_argument('--resume', default='', type=str, metavar='PATH', parser.add_argument('--resume', default=False, action='store_true', help='path to latest checkpoint (default: none)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--fp16', action='store_true', help='Run model fp16 mode.') parser.add_argument('--dali_cpu', action='store_true', help='Runs CPU based version of DALI pipeline.') parser.add_argument('--static-loss-scale', type=float, default=1, help='Static loss scale, positive power of 2 values can improve fp16 convergence.') parser.add_argument('--dynamic-loss-scale', action='store_true', help='Use dynamic loss scaling. If supplied, this argument supersedes ' + '--static-loss-scale.') parser.add_argument('--prof', dest='prof', action='store_true', help='Only run 10 iterations for profiling.') parser.add_argument('-t', '--test', action='store_true', help='Launch test mode with preset arguments') parser.add_argument("--local_rank", default=0, type=int) parser.add_argument("--steps_per_run", default=-1, type=int) parser.add_argument("--classes", default=1000, type=int) parser.add_argument("--cache_size", default=0, type=int) parser.add_argument('--sync_bn', action='store_true', help='enabling apex sync BN.') parser.add_argument('--opt-level', type=str) parser.add_argument('--keep-batchnorm-fp32', type=str, default=None) parser.add_argument('--loss-scale', type=str, default=None) parser.add_argument('--channels-last', type=bool, default=False) parser.add_argument('--deterministic', action='store_true') parser.add_argument('--noeval', action='store_true') parser.add_argument('--amp',action='store_true',help='Run model AMP (automatic mixed precision) mode.') parser.add_argument("--nnodes", default=1, type=int) parser.add_argument("--node_rank", default=0, type=int) parser.add_argument('--mint', action='store_true') parser.add_argument('--dali', action='store_true') parser.add_argument('--persist', action='store_true', default=False) parser.add_argument('--dynamic', action='store_true', default=False) parser.add_argument('--node_ip_list', action='append', type=str, help='Enter IP of other nodes in order') parser.add_argument('--node_port_list', action='append', type=int, help='Enter start port of other nodes in order') parser.add_argument('--iters', default=-1, type=int,metavar='N', help='Num iters (default: 50') parser.add_argument('--chk-freq', default=0, type=int,metavar='N', help='checkpoint frequency') parser.add_argument('--barrier', action='store_true', default=False) parser.add_argument('--overwrite', action='store_true', default=False) parser.add_argument('--synchronous', action='store_true', default=False) parser.add_argument('--tic-tac', action='store_true', default=False) parser.add_argument('--rename', action='store_true', default=False) parser.add_argument('--tic-tac-len', default=2, type=int) parser.add_argument('--chk-prefix', type=str, default="./") parser.add_argument('--checkfreq', action='store_true', default=False) parser.add_argument('--cf_iterator', action='store_true', default=False) parser.add_argument('--chk_mode_baseline', action='store_true', default=False) cudnn.benchmark = True must_chk = False compute_time_list = [] data_time_list = [] chk_time_list = [] class HybridTrainPipe(Pipeline): def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False, resume_index=0, resume_epoch=0): super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id) shard = int(args.node_rank*args.world_size/args.nnodes + args.local_rank) if args.mint: self.input = ops.FileReader(file_root=data_dir, shard_id=shard, num_shards=args.world_size, shuffle_after_epoch=True, cache_size=args.cache_size) else: cf_det=True if not resume_index and not resume_epoch and not args.cf_iterator: cf_det=False self.input = ops.FileReader(file_root=data_dir, shard_id=shard, num_shards=args.world_size, shuffle_after_epoch=True) else: self.input = ops.FileReader(file_root=data_dir, shard_id=shard, num_shards=args.world_size, shuffle_after_epoch=True, resume_index=resume_index, resume_epoch=resume_epoch, cf_det=cf_det) print("CF deterministic shuffling is {}".format(cf_det)) #let user decide which pipeline works him bets for RN version he runs dali_device = 'cpu' if dali_cpu else 'gpu' #decoder_device = 'cpu' decoder_device = 'cpu' if dali_cpu else 'mixed' # This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet # without additional reallocations device_memory_padding = 211025920 if decoder_device == 'mixed' else 0 host_memory_padding = 140544512 if decoder_device == 'mixed' else 0 self.decode = ops.ImageDecoderRandomCrop(device=decoder_device, output_type=types.RGB, device_memory_padding=device_memory_padding, host_memory_padding=host_memory_padding, random_aspect_ratio=[0.8, 1.25], random_area=[0.1, 1.0], num_attempts=100) self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR) self.cmnp = ops.CropMirrorNormalize(device="gpu", output_dtype=types.FLOAT, output_layout=types.NCHW, crop=(crop, crop), image_type=types.RGB, mean=[0.485 * 255,0.456 * 255,0.406 * 255], std=[0.229 * 255,0.224 * 255,0.225 * 255]) self.coin = ops.CoinFlip(probability=0.5) print('DALI "{0}" variant'.format(dali_device)) def define_graph(self): rng = self.coin() self.jpegs, self.labels = self.input(name="Reader") images = self.decode(self.jpegs) images = self.res(images) output = self.cmnp(images.gpu(), mirror=rng) return [output, self.labels] class HybridValPipe(Pipeline): def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size): super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id) shard = int(args.node_rank*args.world_size/args.nnodes + args.local_rank) self.input = ops.FileReader(file_root=data_dir, shard_id=shard, num_shards=args.world_size, random_shuffle=False) self.decode = ops.ImageDecoder(device="cpu", output_type=types.RGB) self.res = ops.Resize(device="cpu", resize_shorter=size, interp_type=types.INTERP_TRIANGULAR) self.cmnp = ops.CropMirrorNormalize(device="gpu", output_dtype=types.FLOAT, output_layout=types.NCHW, crop=(crop, crop), image_type=types.RGB, mean=[0.485 * 255,0.456 * 255,0.406 * 255], std=[0.229 * 255,0.224 * 255,0.225 * 255]) def define_graph(self): self.jpegs, self.labels = self.input(name="Reader") images = self.decode(self.jpegs) images = self.res(images) output = self.cmnp(images.gpu()) return [output, self.labels] best_prec1 = 0 args = parser.parse_args() # test mode, use default args for sanity test if args.test: args.fp16 = False args.epochs = 1 args.start_epoch = 0 args.arch = 'resnet50' args.batch_size = 64 args.data = [] args.prof = True args.data.append('/data/imagenet/train-jpeg/') args.data.append('/data/imagenet/val-jpeg/') if args.deterministic: cudnn.benchmark = False cudnn.deterministic = True torch.manual_seed(args.local_rank) torch.set_printoptions(precision=10) if not len(args.data): raise Exception("error: too few arguments") if args.amp: args.opt_level='O1' if args.amp: print("Using mixed precision : {}".format(args.amp)) print("opt_level = {}".format(args.opt_level)) print("keep_batchnorm_fp32 = {}".format(args.keep_batchnorm_fp32), type(args.keep_batchnorm_fp32)) print("loss_scale = {}".format(args.loss_scale), type(args.loss_scale)) if args.dali: print("Using DALI") else: print("Using native dataloader") args.distributed = False if 'WORLD_SIZE' in os.environ: args.distributed = int(os.environ['WORLD_SIZE']) > 1 # item() is a recent addition, so this helps with backward compatibility. def to_python_float(t): if hasattr(t, 'item'): return t.item() else: return t[0] def main(): logging.basicConfig(format='%(module)s - %(funcName)s - %(levelname)s - %(message)s', level=logging.INFO) start_full = time.time() global best_prec1, args time_stat = [] chk_stat = [] start = time.time() args.gpu = 0 args.world_size = 1 torch.cuda.set_device(args.gpu) if args.distributed: args.gpu = args.local_rank % torch.cuda.device_count() torch.cuda.set_device(args.gpu) torch.distributed.init_process_group(backend='nccl', init_method='env://') args.world_size = torch.distributed.get_world_size() args.total_batch_size = args.world_size * args.batch_size if args.fp16: assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled." if args.amp and args.fp16: print("Please use only one of the --fp16/--amp flags") exit(1) if args.static_loss_scale != 1.0: if not args.fp16: print("Warning: if --fp16 is not used, static_loss_scale will be ignored.") if args.sync_bn: import apex print("using apex synced BN") model = apex.parallel.convert_syncbn_model(model) # create model if args.pretrained: print("=> using pre-trained model '{}'".format(args.arch)) model = models.__dict__[args.arch](pretrained=True) else: print("=> creating model '{}'".format(args.arch)) if(args.arch == "inception_v3"): model = models.__dict__[args.arch](num_classes=args.classes,aux_logits=False) else: model = models.__dict__[args.arch](num_classes=args.classes) model = model.cuda() if args.fp16: model = network_to_half(model) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) if args.fp16: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.static_loss_scale, dynamic_loss_scale=args.dynamic_loss_scale) # Initialize Amp. Amp accepts either values or strings for the optional override arguments, # for convenient interoperation with argparse if args.amp: model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level, keep_batchnorm_fp32=args.keep_batchnorm_fp32, loss_scale=args.loss_scale, min_loss_scale=1.0 ) # For distributed training, wrap the model with apex.parallel.DistributedDataParallel. # This must be done AFTER the call to amp.initialize. If model = DDP(model) is called # before model, ... = amp.initialize(model, ...), the call to amp.initialize may alter # the types of model's parameters in a way that disrupts or destroys DDP's allreduce hooks. if args.distributed: # shared param/delay all reduce turns off bucketing in DDP, for lower latency runs this can improve perf # for the older version of APEX please use shared_param, for newer one it is delay_allreduce model = DDP(model, delay_allreduce=True) # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss().cuda() args.lr = args.lr*float(args.batch_size*args.world_size)/256. if args.chk_mode_baseline: args.chk_mode = CFMode.MANUAL else: args.chk_mode = CFMode.AUTO #if args.local_rank == 0: chk = CFCheckpoint(model=model, optimizer=optimizer) cf_manager = CFManager(args.chk_prefix, chk, mode=args.chk_mode) #else: # cf_manager = None # optionally resume from a checkpoint args.start_index = 0 args.steps_so_far = 0 extra_state=None if args.resume: extra_state = cf_manager.restore(gpu=args.gpu) if extra_state is not None: args.start_epoch = extra_state['epoch'] args.start_index = extra_state['start_index'] args.steps_so_far = extra_state['steps_so_far'] print("Populated: epoch :{}, start_idx:{}, steps_so_far:{}".format(args.start_epoch,args.start_index,args.steps_so_far)) #if os.path.isfile(args.resume): # print("=> loading checkpoint '{}'".format(args.resume)) # checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu)) # args.start_epoch = checkpoint['epoch'] # args.start_index = checkpoint['iter']*args.batch_size # args.steps_so_far = checkpoint['steps_so_far'] # args.shuffle_seed = checkpoint['dl_shuffle_seed'] # best_prec1 = checkpoint['best_prec1'] # model.load_state_dict(checkpoint['state_dict']) # optimizer.load_state_dict(checkpoint['optimizer']) # print("=> loaded checkpoint '{}' (epoch {})" # .format(args.resume, checkpoint['epoch'])) #else: # print("=> no checkpoint found at '{}'".format(args.resume)) # Data loading code traindir = os.path.join(args.data, 'train') valdir = os.path.join(args.data, 'val') train_pipe = None if args.dali: if(args.arch == "inception_v3"): crop_size = 299 val_size = 320 # I chose this value arbitrarily, we can adjust. else: crop_size = 224 val_size = 256 if not args.cf_iterator: args.start_index = 0 pipe = HybridTrainPipe(batch_size=args.batch_size, num_threads=args.workers, device_id=args.local_rank, data_dir=traindir, crop=crop_size, dali_cpu=args.dali_cpu) else: pipe = HybridTrainPipe(batch_size=args.batch_size, num_threads=args.workers, device_id=args.local_rank, data_dir=traindir, crop=crop_size, dali_cpu=args.dali_cpu, resume_index=args.start_index, resume_epoch=args.start_epoch) pipe.build() train_pipe = pipe resume_size = int(pipe.epoch_size("Reader") / args.world_size) - args.start_index train_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size("Reader") / args.world_size), fill_last_batch=False, resume_size=resume_size) if args.cf_iterator: train_loader = CFIterator(train_loader, worker_id=args.local_rank, bs=args.batch_size, steps_this_epoch=int(args.start_index/args.batch_size), epoch=args.start_epoch, dali=args.dali, cf_manager=cf_manager, chk_freq=args.chk_freq, arch=args.arch, steps_to_run=args.steps_per_run, persist=args.persist, dynamic=args.dynamic) if args.resume: train_loader.load_state_dict(extra_state) if not args.noeval: pipe_val = HybridValPipe(batch_size=args.batch_size, num_threads=args.workers, device_id=args.local_rank, data_dir=valdir, crop=crop_size, size=val_size) pipe_val.build() val_loader = DALIClassificationIterator(pipe_val, size=int(pipe_val.epoch_size("Reader") / args.world_size)) else: normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=args.nopin, sampler=train_sampler) val_loader = torch.utils.data.DataLoader( datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=args.nopin) if args.evaluate and not args.noeval: validate(val_loader, model, criterion) return total_time = AverageMeter() dur_setup = time.time() - start time_stat.append(dur_setup) print("Batch size for GPU {} is {}, workers={}".format(args.gpu, args.batch_size, args.workers)) fname = 'time-split' + str(args.local_rank) + '.csv' df = open(fname, 'w+') if args.rename: df.write("epoch, iter, dtime, mtime, ftime, ctime, ttime,chktime, renametime, tottime\n") else: df.write("epoch, iter,dtime, mtime, ftime, ctime, ttime, chktime, tottime\n") for epoch in range(args.start_epoch, args.epochs): if args.local_rank == 0 and epoch == 0: os.system("swapoff -a") os.system("free -g") # log timing start_ep = time.time() df.write("\n") # train for one epoch avg_train_time = train(train_loader, model, criterion, optimizer, epoch, df, cf_manager) total_time.update(avg_train_time) if args.prof: break # evaluate on validation set if args.noeval: [prec1, prec5] = [0,0] else: [prec1, prec5] = validate(val_loader, model, criterion) filename = 'acc-progress-' + str(args.gpu) + '.csv' with open(filename, 'a+') as fw: fw.write("{},{},{},{}\n".format(epoch, time.time() -start_ep, prec1, prec5)) chk_st = time.time() # remember best prec@1 and save checkpoint if args.local_rank == 0: is_best = prec1 > best_prec1 best_prec1 = max(prec1, best_prec1) ''' save_checkpoint({ 'epoch': epoch + 1, 'arch': args.arch, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best) ''' if epoch == args.epochs - 1: print('##Top-1 {0}\n' '##Top-5 {1}\n' '##Perf {2}'.format(prec1, prec5, args.total_batch_size / total_time.avg)) dur_chk = time.time() - chk_st if args.cf_iterator and train_loader.exit: break if args.dali: # reset DALI iterators train_loader.reset() if not args.noeval: val_loader.reset() dur_ep = time.time() - start_ep print("EPOCH DURATION = {}".format(dur_ep)) time_stat.append(dur_ep) chk_stat.append(dur_chk) if args.local_rank == 0: for i in time_stat: print("Time_stat : {}".format(i)) for i in range(0, len(data_time_list)): print("Data time : {}\t Compute time : {}\t Chk time : {}".format(data_time_list[i], compute_time_list[i],chk_time_list[i])) dur_full = time.time() - start_full if args.local_rank == 0: print("Total time for all epochs = {}".format(dur_full)) if cf_manager.chk_process is not None: cf_manager.chk_process.join() if args.dali: del pipe if not args.noeval: del pipe_val def train(train_loader, model, criterion, optimizer, epoch, df, cf_manager): batch_time = AverageMeter() total_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() global must_chk # switch to train mode model.train() end = time.time() dataset_time = compute_time = checkpoint_time = rename_time = 0 chk_per_epoch = 0 for i, data in enumerate(train_loader): rename_time = 0 if args.dali: images = data[0]["data"] target = data[0]["label"].squeeze().cuda().long() train_loader_len = int(math.ceil(train_loader._size / args.batch_size)) input_var = Variable(images) target_var = Variable(target) else: images, target = data target = target.squeeze().cuda().long() input_var = Variable(images).cuda(args.gpu, non_blocking=True) target_var = Variable(target).cuda(args.gpu, non_blocking=True) train_loader_len = int(len(train_loader)) adjust_learning_rate(optimizer, epoch, i, train_loader_len) if args.prof: if i > 10: break # measure data loading time dtime = time.time() - end start_copy = time.time() mtime = time.time() - start_copy data_time.update(time.time() - end) dataset_time += (time.time() - end) compute_start = time.time() # compute output output = model(input_var) loss = criterion(output, target_var) # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) if args.distributed: reduced_loss = reduce_tensor(loss.data) prec1 = reduce_tensor(prec1) prec5 = reduce_tensor(prec5) else: reduced_loss = loss.data losses.update(to_python_float(reduced_loss), images.size(0)) top1.update(to_python_float(prec1), images.size(0)) top5.update(to_python_float(prec5), images.size(0)) # compute gradient and do SGD step optimizer.zero_grad() ftime = time.time() - compute_start if args.fp16: optimizer.backward(loss) elif args.amp: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() #if args.cf_iterator: #torch.cuda.synchronize() if args.local_rank == 0: cf_manager.weight_update() else: optimizer.step() torch.cuda.synchronize() compute_time += (time.time() - compute_start) ctime = time.time() - compute_start proc = [] ttime = time.time() - end ch_st = time.time() chktime = time.time() - ch_st checkpoint_time += chktime #print("After CF chk : mem before={}MB, after={}MB".format(mem_before/1024/1024, mem_after/1024/1024)) if args.barrier: dist.barrier() tottime = time.time() - end total_time.update(time.time() - end) df.write("{},{},{}\n".format(epoch, i, tottime)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if args.local_rank == 0 and i % args.print_freq == 0 and i > 1: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Speed {3:.3f} ({4:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( epoch, i, train_loader_len, args.total_batch_size / batch_time.val, args.total_batch_size / batch_time.avg, batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) if args.iters > 0 and args.iters == i: must_chk = False #if args.local_rank == 0: # for p in proc: # p.join() break data_time_list.append(dataset_time) compute_time_list.append(compute_time) chk_time_list.append(checkpoint_time) return batch_time.avg def validate(val_loader, model, criterion): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() for i, data in enumerate(val_loader): if args.dali: images = data[0]["data"] target = data[0]["label"].squeeze().cuda().long() val_loader_len = int(val_loader._size / args.batch_size) target = target.cuda(non_blocking=True) input_var = Variable(images) target_var = Variable(target) else: images, target = data target = target.squeeze().cuda().long() val_loader_len = int(len(val_loader)) input_var = Variable(images).cuda(args.gpu, non_blocking=True) target_var = Variable(target).cuda(args.gpu, non_blocking=True) # compute output with torch.no_grad(): output = model(input_var) loss = criterion(output, target_var) # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) if args.distributed: reduced_loss = reduce_tensor(loss.data) prec1 = reduce_tensor(prec1) prec5 = reduce_tensor(prec5) else: reduced_loss = loss.data losses.update(to_python_float(reduced_loss), images.size(0)) top1.update(to_python_float(prec1), images.size(0)) top5.update(to_python_float(prec5), images.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if args.local_rank == 0 and i % args.print_freq == 0: print('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Speed {2:.3f} ({3:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( i, val_loader_len, args.total_batch_size / batch_time.val, args.total_batch_size / batch_time.avg, batch_time=batch_time, loss=losses, top1=top1, top5=top5)) print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}' .format(top1=top1, top5=top5)) return [top1.avg, top5.avg] def save_one_checkpoint(state): filename = 'checkpoint.pth.tar.bgk.one' s = time.time() torch.save(state, filename) print("In bgk saved in {}s".format(time.time()-s)) def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): torch.save(state, filename) if is_best: shutil.copyfile(filename, 'model_best.pth.tar') def bgk_save_checkpoint(model, optimizer): global must_chk i = 0 while must_chk and i < 10: state = { 'epoch': 1, 'iter': 1, 'arch': args.arch, 'state_dict': model.state_dict(), 'best_prec1': 0, 'optimizer': optimizer.state_dict(), } i += 1 filename = 'checkpoint.pth.tar.bgk' s = time.time() clone_state = copy.deepcopy(state) for k, v in clone_state['state_dict'].items(): clone_state['state_dict'][k] = v.cpu() dur = time.time() - s torch.save(clone_state, filename) print("In bgk saved {}, clone={}s, write={}s".format(i, dur, time.time()-s-dur)) s = time.time() torch.save(state, filename) print("In bgk saved {}, save={}s".format(i, time.time()-s)) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def adjust_learning_rate(optimizer, epoch, step, len_epoch): """LR schedule that should yield 76% converged accuracy with batch size 256""" factor = epoch // 30 if epoch >= 80: factor = factor + 1 lr = args.lr * (0.1 ** factor) """Warmup""" if epoch < 5: lr = lr * float(1 + step + epoch * len_epoch) / (5. * len_epoch) if(args.local_rank == 0 and step % args.print_freq == 0 and step > 1): print("Epoch = {}, step = {}, lr = {}".format(epoch, step, lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def reduce_tensor(tensor): rt = tensor.clone() dist.all_reduce(rt, op=dist.reduce_op.SUM) rt /= args.world_size return rt if __name__ == '__main__': main()
[]
[]
[ "WORLD_SIZE" ]
[]
["WORLD_SIZE"]
python
1
0
libs/geometry/doc/make_qbk.py
#! /usr/bin/env python # -*- coding: utf-8 -*- # =========================================================================== # Copyright (c) 2007-2012 Barend Gehrels, Amsterdam, the Netherlands. # Copyright (c) 2008-2012 Bruno Lalande, Paris, France. # Copyright (c) 2009-2012 Mateusz Loskot ([email protected]), London, UK # # Use, modification and distribution is subject to the Boost Software License, # Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) # ============================================================================ import os, sys script_dir = os.path.dirname(__file__) os.chdir(os.path.abspath(script_dir)) print("Boost.Geometry is making .qbk files in %s" % os.getcwd()) if 'DOXYGEN' in os.environ: doxygen_cmd = os.environ['DOXYGEN'] else: doxygen_cmd = 'doxygen' if 'DOXYGEN_XML2QBK' in os.environ: doxygen_xml2qbk_cmd = os.environ['DOXYGEN_XML2QBK'] else: doxygen_xml2qbk_cmd = 'doxygen_xml2qbk' cmd = doxygen_xml2qbk_cmd cmd = cmd + " --xml doxy/doxygen_output/xml/%s.xml" cmd = cmd + " --start_include boost/geometry/" cmd = cmd + " --convenience_header_path ../../../boost/geometry/" cmd = cmd + " --convenience_headers geometry.hpp,geometries/geometries.hpp,multi/multi.hpp" cmd = cmd + " --skip_namespace boost::geometry::" cmd = cmd + " --copyright src/copyright_block.qbk" cmd = cmd + " --output_member_variables false" cmd = cmd + " > generated/%s.qbk" def run_command(command): if os.system(command) != 0: raise Exception("Error running %s" % command) def call_doxygen(): os.chdir("doxy"); run_command("rm -f doxygen_output/xml/*.xml") run_command(doxygen_cmd) os.chdir("..") def group_to_quickbook(section): run_command(cmd % ("group__" + section.replace("_", "__"), section)) def model_to_quickbook(section): run_command(cmd % ("classboost_1_1geometry_1_1model_1_1" + section.replace("_", "__"), section)) def model_to_quickbook2(classname, section): run_command(cmd % ("classboost_1_1geometry_1_1model_1_1" + classname, section)) def struct_to_quickbook(section): run_command(cmd % ("structboost_1_1geometry_1_1" + section.replace("_", "__"), section)) def class_to_quickbook(section): run_command(cmd % ("classboost_1_1geometry_1_1" + section.replace("_", "__"), section)) def strategy_to_quickbook(section): p = section.find("::") ns = section[:p] strategy = section[p+2:] run_command(cmd % ("classboost_1_1geometry_1_1strategy_1_1" + ns.replace("_", "__") + "_1_1" + strategy.replace("_", "__"), ns + "_" + strategy)) def cs_to_quickbook(section): run_command(cmd % ("structboost_1_1geometry_1_1cs_1_1" + section.replace("_", "__"), section)) call_doxygen() algorithms = ["append", "assign", "make", "clear" , "area", "buffer", "centroid", "convert", "correct", "covered_by" , "convex_hull", "difference", "disjoint", "distance" , "envelope", "equals", "expand", "for_each", "intersection", "intersects" , "length", "num_geometries", "num_interior_rings", "num_points" , "overlaps", "perimeter", "reverse", "simplify", "sym_difference" , "touches", "transform", "union", "unique", "within"] access_functions = ["get", "set", "exterior_ring", "interior_rings" , "num_points", "num_interior_rings", "num_geometries"] coordinate_systems = ["cartesian", "geographic", "polar", "spherical", "spherical_equatorial"] core = ["closure", "coordinate_system", "coordinate_type", "cs_tag" , "dimension", "exception", "interior_type" , "degree", "radian" , "is_radian", "point_order" , "point_type", "ring_type", "tag", "tag_cast" ] exceptions = ["exception", "centroid_exception"]; iterators = ["circular_iterator", "closing_iterator" , "ever_circling_iterator"] models = ["point", "linestring", "box" , "polygon", "segment", "ring" , "multi_linestring", "multi_point", "multi_polygon", "referring_segment"] strategies = ["distance::pythagoras", "distance::haversine" , "distance::cross_track", "distance::projected_point" , "within::winding", "within::franklin", "within::crossings_multiply" , "area::surveyor", "area::huiller" , "centroid::bashein_detmer", "centroid::average" , "convex_hull::graham_andrew" , "simplify::douglas_peucker" , "side::side_by_triangle", "side::side_by_cross_track", "side::spherical_side_formula" , "transform::inverse_transformer", "transform::map_transformer" , "transform::rotate_transformer", "transform::scale_transformer" , "transform::translate_transformer", "transform::ublas_transformer" ] views = ["box_view", "segment_view" , "closeable_view", "reversible_view", "identity_view"] for i in algorithms: group_to_quickbook(i) for i in access_functions: group_to_quickbook(i) for i in coordinate_systems: cs_to_quickbook(i) for i in core: struct_to_quickbook(i) for i in exceptions: class_to_quickbook(i) for i in iterators: struct_to_quickbook(i) for i in models: model_to_quickbook(i) for i in strategies: strategy_to_quickbook(i) for i in views: struct_to_quickbook(i) model_to_quickbook2("d2_1_1point__xy", "point_xy") group_to_quickbook("arithmetic") group_to_quickbook("enum") group_to_quickbook("register") group_to_quickbook("svg") class_to_quickbook("svg_mapper") group_to_quickbook("wkt") os.chdir("index") execfile("make_qbk.py") os.chdir("..") # Use either bjam or b2 or ../../../b2 (the last should be done on Release branch) run_command("bjam")
[]
[]
[ "DOXYGEN", "DOXYGEN_XML2QBK" ]
[]
["DOXYGEN", "DOXYGEN_XML2QBK"]
python
2
0
smontry.py
""" A minimal Sentry client. Loosely based on sentry-sdk v1.5.8 (https://github.com/getsentry/sentry-python). sentry-sdk is: Copyright (c) 2018 Sentry (https://sentry.io) and individual contributors. Licensed under the BSD-2-Clause License. """ import datetime import gzip import json import os import socket import uuid from typing import Optional from urllib.parse import urlsplit from urllib.request import Request, urlopen __version__ = "0.1" USER_AGENT = f"smontry/{__version__}" def _get_url_and_auth( sentry_dsn: str, url_type: str, *, client: Optional[str] = None, timestamp: Optional[datetime.datetime] = None, version: int = 7, ): """ Parse a Sentry DSN into an API URL and an authentication header. """ if not sentry_dsn: raise ValueError("sentry_dsn not set") parts = urlsplit(sentry_dsn) assert parts.username path = parts.path.rsplit("/", 1) project_id = str(int(path.pop())) path = "/".join(path) + "/" url = f"{parts.scheme}://{parts.hostname}{path}api/{project_id}/{url_type}/" rv = [("sentry_key", parts.username), ("sentry_version", version)] if timestamp is not None: rv.append(("sentry_timestamp", str(timestamp.timestamp()))) if client is not None: rv.append(("sentry_client", client)) if parts.password is not None: rv.append(("sentry_secret", parts.password)) parts = ", ".join(f"{key}={value}" for key, value in rv) auth = f"Sentry {parts}" return (url, auth) def _store_event( sentry_dsn: str, event: dict, ) -> bytes: """ Send a Store API request. May raise. """ url, auth = _get_url_and_auth( sentry_dsn, url_type="store", timestamp=datetime.datetime.utcnow(), client=USER_AGENT, ) body = gzip.compress(json.dumps(event).encode("utf-8")) headers = { "Content-Encoding": "gzip", "Content-Type": "application/json", "User-Agent": USER_AGENT, "X-Sentry-Auth": auth, } req = Request( url, body, headers, method="POST", ) resp = urlopen(req) return resp.fp.read() def _augment_event(event: dict) -> dict: event = event.copy() if "server_name" not in event and hasattr(socket, "gethostname"): event["server_name"] = socket.gethostname() if "environment" not in event: event["environment"] = os.environ.get("SENTRY_ENVIRONMENT") or "production" if "platform" not in event: event["platform"] = "python" if "timestamp" not in event: event["timestamp"] = datetime.datetime.utcnow().strftime( "%Y-%m-%dT%H:%M:%S.%fZ" ) return event def _get_default_sentry_dsn() -> str: return os.environ.get("SENTRY_DSN") def capture_message( message: str, level: str = "info", *, sentry_dsn: Optional[str] = None, ): """ Send a message event with the given level. """ return _store_event( sentry_dsn or _get_default_sentry_dsn(), _augment_event({"message": message, "level": level}), ) if __name__ == "__main__": capture_message( f"This is Smontry, hello?! {uuid.uuid4()}", )
[]
[]
[ "SENTRY_DSN", "SENTRY_ENVIRONMENT" ]
[]
["SENTRY_DSN", "SENTRY_ENVIRONMENT"]
python
2
0
src/third_party/red_tamarin_stable/tamarin-cental/configure.py
#!/usr/bin/env python # -*- Mode: Python; indent-tabs-mode: nil -*- # vi: set ts=4 sw=4 expandtab: # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # # This script runs just like a traditional configure script, to do configuration # testing and makefile generation. #**************************************************************************** # If you're building android the android public sdk/ndk must be set up on your # build machine. # See the wiki page here for instructions on how to create # the android public sdk/ndk: # https://zerowing.corp.adobe.com/display/FlashPlayer/android+tamarin+shell+support # # Before building edit the /android-public/android-vars.sh script # and check that the ANDROIDTOP variable is set correctly. Then run the script # before invoking configure.py: # . /android-public/android-vars.sh # #**************************************************************************** import os import os.path import sys import build.process import re import string import subprocess thisdir = os.path.dirname(os.path.abspath(__file__)) # Look for additional modules in our build/ directory. sys.path.append(thisdir) from build.configuration import * import build.getopt import build.avmfeatures # Used to set the mac SDK parameters def _setSDKParams(sdk_version, os_ver, xcode_version): if sdk_version is None: # Infer SDK version from the current OS version if os_ver == '10.4': sdk_version = '104u' else: sdk_version = os_ver.translate(None, string.punctuation) # On 10.5/6 systems, and only if "--mac-sdk=104u" is passed in, compile for the 10.4u SDK and override CC/CXX (set in configuration.py) to use gcc/gxx 4.0.x # Infer xcode version from the SDK version if not directly specified if sdk_version == '104u': os_ver,sdk_number = '10.4','10.4u' config._acvars['CXX'] = 'g++-4.0' config._acvars['CC'] = 'gcc-4.0' if xcode_version is None: xcode_version = '3' elif sdk_version == '105': os_ver,sdk_number = '10.5','10.5' if xcode_version is None: xcode_version = '3' elif sdk_version == '106': os_ver,sdk_number = '10.6','10.6' if xcode_version is None: xcode_version = '3' elif sdk_version == '107': os_ver,sdk_number = '10.7','10.7' if xcode_version is None: xcode_version = '4' else: print'Unknown SDK version -> %s. Expected values are 104u, 105, 106 or 107.' % sdk_version sys.exit(2) sdk_prefix = None if xcode_version is not None: xcode_major_version = xcode_version.split(".")[0] if int(xcode_major_version) >= 4: sdk_prefix = "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX" if sdk_prefix is None: sdk_prefix = "/Developer/SDKs/MacOSX" sdk_path = sdk_prefix + sdk_number + ".sdk" if not os.path.exists(sdk_path): print'Could not find %s' % sdk_path sys.exit(2) else: return os_ver,sdk_path def _setGCCVersionedFlags(FLAGS, MAJOR_VERSION, MINOR_VERSION, current_cpu): # warnings have been updated to try to include all those enabled by current Flash/AIR builds -- disable with caution, or risk integration pain if MAJOR_VERSION >= 4: FLAGS += "-Wstrict-null-sentinel " if current_cpu == 'mips': FLAGS += "-Wstrict-aliasing=0 " elif (MAJOR_VERSION == 4 and MINOR_VERSION <= 2): # 4.0 - 4.2 # Bugzilla 654996: -Werror for gcc prior to 4.3 can _usually_ be # turned on; see core/manifest.mk for Interpreter.cpp workaround. FLAGS += "-Wstrict-aliasing=0 -Werror " elif (MAJOR_VERSION == 4 and MINOR_VERSION == 4): # 4.4 FLAGS += "-Werror -Wempty-body -Wno-logical-op -Wmissing-field-initializers -Wstrict-aliasing=0 -Wno-array-bounds -Wno-clobbered -Wstrict-overflow=0 -funit-at-a-time " else: # gcc 4.5 or later FLAGS += "-Werror -Wempty-body -Wno-logical-op -Wmissing-field-initializers -Wstrict-aliasing=3 -Wno-array-bounds -Wno-clobbered -Wstrict-overflow=0 -funit-at-a-time " if (MAJOR_VERSION == 4 and MINOR_VERSION == 6): # 4.6 FLAGS += "-Wno-psabi -Wno-unused-variable -Wno-unused-but-set-variable " return FLAGS o = build.getopt.Options() config = Configuration(thisdir, options = o, sourcefile = 'core/avmplus.h') the_os, cpu = config.getTarget() arm_fpu = o.getBoolArg("arm-fpu", False) arm_neon = o.getBoolArg("arm-neon", False) arm_thumb = False if cpu == "thumb2": arm_thumb = True else: arm_thumb = o.getBoolArg("arm-thumb",False) arm_hard_float = o.getBoolArg("arm-hard-float", False) arm_arch = o.arm_arch if arm_arch == None and cpu == "thumb2": arm_arch = "armv7-a" buildTamarin = o.getBoolArg('tamarin', True) if buildTamarin: config.subst("ENABLE_TAMARIN", 1) buildShell = o.getBoolArg("shell", True) if (buildShell): config.subst("ENABLE_SHELL", 1) buildAot = o.peekBoolArg("aot", False) if buildAot: config.subst("ENABLE_AOT", 1) APP_CPPFLAGS = "-DAVMSHELL_BUILD " APP_CXXFLAGS = "" APP_CFLAGS = "" OPT_CXXFLAGS = "-O3 " OPT_CPPFLAGS = "" DEBUG_CPPFLAGS = "-DDEBUG -D_DEBUG " DEBUG_CXXFLAGS = "" DEBUG_CFLAGS = "" DEBUG_LDFLAGS = "" OS_LIBS = [] OS_LDFLAGS = "" LDFLAGS = config._acvars['LDFLAGS'] MMGC_CPPFLAGS = "-DAVMSHELL_BUILD " AVMSHELL_CPPFLAGS = "" AVMSHELL_LDFLAGS = "" MMGC_DEFINES = {} NSPR_INCLUDES = "" NSPR_LDOPTS = "" DISABLE_RTMPE = None ANDROIDPLATFORMVER = "android-9" ARM_EABI_VER = '4.4.3' ARM_EABI = 'arm-linux-androideabi' if 'APP_CPPFLAGS' in os.environ: APP_CPPFLAGS += os.environ['APP_CPPFLAGS'] + " " if 'APP_CXXFLAGS' in os.environ: APP_CXXFLAGS += os.environ['APP_CXXFLAGS'] + " " if 'APP_CFLAGS' in os.environ: APP_CFLAGS += os.environ['APP_CFLAGS'] + " " if 'OPT_CXXFLAGS' in os.environ: OPT_CXXFLAGS += os.environ['OPT_CXXFLAGS'] + " " if 'OPT_CPPFLAGS' in os.environ: OPT_CPPFLAGS += os.environ['OPT_CPPFLAGS'] + " " if 'DEBUG_CPPFLAGS' in os.environ: DEBUG_CPPFLAGS += os.environ['DEBUG_CPPFLAGS'] + " " if 'DEBUG_CXXFLAGS' in os.environ: DEBUG_CXXFLAGS += os.environ['DEBUG_CXXFLAGS'] + " " if 'DEBUG_CFLAGS' in os.environ: DEBUG_CFLAGS += os.environ['DEBUG_CFLAGS'] + " " if 'DEBUG_LDFLAGS' in os.environ: DEBUG_LDFLAGS += os.environ['DEBUG_LDFLAGS'] + " " if 'OS_LDFLAGS' in os.environ: OS_LDFLAGS += os.environ['OS_LDFLAGS'] + " " if 'MMGC_CPPFLAGS' in os.environ: MMGC_CPPFLAGS += os.environ['MMGC_CPPFLAGS'] + " " if 'AVMSHELL_CPPFLAGS' in os.environ: AVMSHELL_CPPFLAGS += os.environ['AVMSHELL_CPPFLAGS'] + " " if 'ARM_EABI' in os.environ: ARM_EABI = os.environ['ARM_EABI'] if 'ARM_EABI_VER' in os.environ: ARM_EABI_VER = os.environ['ARM_EABI_VER'] if 'AVMSHELL_LDFLAGS' in os.environ: AVMSHELL_LDFLAGS += os.environ['AVMSHELL_LDFLAGS'] + " " if 'NSPR_INCLUDES' in os.environ: NSPR_INCLUDES += os.environ['NSPR_INCLUDES'] + " " if 'NSPR_LDOPTS' in os.environ: NSPR_LDOPTS += os.environ['NSPR_LDOPTS'] + " " if 'DISABLE_RTMPE' in os.environ: DISABLE_RTMPE += os.environ['DISABLE_RTMPE'] + " " if o.getBoolArg('valgrind', False, False): OPT_CXXFLAGS = "-O1 -g " valinc = '$(topsrcdir)/other-licenses' if 'VALGRIND_HOME' in os.environ: valinc = os.environ['VALGRIND_HOME'] + '/include' APP_CPPFLAGS += '-I' + valinc + ' ' # builtinBuildFlags() must be called first, featureSettings() will clear the features! config.subst("BUILTIN_BUILDFLAGS",build.avmfeatures.builtinBuildFlags(o)); # See build/avmfeatures.py for the code that processes switches for # standard feature names. APP_CPPFLAGS += build.avmfeatures.featureSettings(o) if not o.getBoolArg("methodenv-impl32", True): APP_CPPFLAGS += "-DVMCFG_METHODENV_IMPL32=0 " memoryProfiler = o.getBoolArg("memory-profiler", False) if memoryProfiler: APP_CPPFLAGS += "-DMMGC_MEMORY_PROFILER " MMGC_INTERIOR_PTRS = o.getBoolArg('mmgc-interior-pointers', False) if MMGC_INTERIOR_PTRS: MMGC_DEFINES['MMGC_INTERIOR_PTRS'] = None MMGC_DYNAMIC = o.getBoolArg('mmgc-shared', False) if MMGC_DYNAMIC: MMGC_DEFINES['MMGC_DLL'] = None MMGC_CPPFLAGS += "-DMMGC_IMPL " # For -Wreorder, see https://bugzilla.mozilla.org/show_bug.cgi?id=475750 if config.getCompiler() == 'GCC': if 'CXX' in os.environ: rawver = build.process.run_for_output(['$CXX', '--version']) else: rawver = build.process.run_for_output(['gcc', '--version']) vre = re.compile(".* ([3-9]\.[0-9]+\.[0-9]+)[ \n]") ver = vre.match(rawver).group(1) ver_arr = ver.split('.') GCC_MAJOR_VERSION = int(ver_arr[0]) GCC_MINOR_VERSION = int(ver_arr[1]) if the_os == 'android': try: ANDROID_TOOLCHAIN = os.environ['ANDROID_TOOLCHAIN'] ANDROID_NDK = os.environ['ANDROID_NDK'] ANDROID_NDK_BIN = os.environ['ANDROID_NDK_BIN'] ANDROID_SDK = os.environ['ANDROID_SDK'] except: print('\nANDROID_ variables not found in environment\nPlease run /android-public/android-vars.sh') exit(0) ANDROID_INCLUDES = "-I$(topsrcdir)/other-licenses/zlib "\ "-I$(ANDROID_NDK)/platforms/%s/arch-arm/usr/include "\ "-I$(ANDROID_NDK_BIN) "\ "-I$(ANDROID_SDK) "\ "-I$(ANDROID_NDK)/sources/cxx-stl/stlport/stlport "\ "-I$(ANDROID_TOOLCHAIN)/openssl/include "\ "-I$(ANDROID_TOOLCHAIN)/frameworks/base/opengl/include " % (ANDROIDPLATFORMVER) # These flags are shared with some of the other builds such as ARM, but better to keep them separate here for flexibility COMMON_CXX_FLAGS = "-Wall -Wdisabled-optimization -Wextra -Wformat=2 -Winit-self -Winvalid-pch -Wno-invalid-offsetof " \ "-Wno-switch -Wpointer-arith -Wwrite-strings -Woverloaded-virtual -Wsign-promo " \ "-fmessage-length=0 -fno-exceptions -fno-rtti -fsigned-char -fno-inline-functions-called-once -ffunction-sections -fdata-sections -Wno-ctor-dtor-privacy " # Additional flags used by android APP_CXX_FLAGS = "%s -Wno-ctor-dtor-privacy -Wlogical-op -Wstrict-overflow=1 " \ "-Wmissing-include-dirs -Wno-missing-field-initializers -Wno-type-limits -Wno-unused-parameter " \ "-Wnon-virtual-dtor -Wstrict-null-sentinel -Wno-missing-braces -Wno-multichar -Wno-psabi -Wno-reorder " \ "-fno-short-enums -fno-strict-aliasing -fpic -funwind-tables -fstack-protector -finline-limit=200 -ftree-vectorize " \ "-feliminate-unused-debug-symbols -feliminate-unused-debug-types -MD -fwrapv " % COMMON_CXX_FLAGS APP_CXXFLAGS += _setGCCVersionedFlags(APP_CXX_FLAGS, GCC_MAJOR_VERSION, GCC_MINOR_VERSION, cpu) # LFLAGS_HEADLESS gets picked up in configuration.py by MKPROGRAM LFLAGS_HEADLESS = "-nostdlib -Bdynamic -Wl,-T,"\ "$(ANDROID_NDK_BIN)/../%s/lib/ldscripts/armelf_linux_eabi.x "\ "-Wl,-dynamic-linker,/system/bin/linker "\ "-Wl,-z,nocopyreloc "\ "-L$(ANDROID_NDK)/platforms/%s/arch-arm/usr/lib "\ "-L$(ANDROID_NDK)/sources/cxx-stl/stlport/libs/armeabi "\ "-Wl,-rpath-link=$(ANDROID_NDK)/platforms/%s/arch-arm/usr/lib "\ "$(ANDROID_NDK)/platforms/%s/arch-arm/usr/lib/crtbegin_dynamic.o "\ "$(ANDROID_NDK)/platforms/%s/arch-arm/usr/lib/crtend_android.o " % (ARM_EABI,ANDROIDPLATFORMVER,ANDROIDPLATFORMVER,ANDROIDPLATFORMVER,ANDROIDPLATFORMVER) LDFLAGS += "$(ANDROID_TOOLCHAIN)/openssl/libcrypto.a $(ANDROID_TOOLCHAIN)/openssl/libssl.a" # SEARCH_DIRS gets picked up in configuration.py by MKPROGRAM SEARCH_DIRS = "-L." BASE_M_FLAGS = "-mlong-calls -mthumb-interwork " if arm_arch == "armv7-a" or arm_arch == None: BASE_CXX_FLAGS = "%s -march=armv7-a -mtune=cortex-a8 -mfloat-abi=softfp -mno-thumb -fno-section-anchors -D__ARM_ARCH__=7 " \ "-DARMV6_ASSEMBLY " % BASE_M_FLAGS APP_CXXFLAGS += BASE_CXX_FLAGS elif arm_arch == "armv6": BASE_CXX_FLAGS = "%s -march=armv6 -mfloat-abi=soft -D__ARM_ARCH__=6 -DARMV5_ASSEMBLY -DARMV6_ASSEMBLY " % BASE_M_FLAGS APP_CXXFLAGS += BASE_CXX_FLAGS LFLAGS_HEADLESS += "-Wl,--no-enum-size-warning" elif arm_arch == "armv5": BASE_CXX_FLAGS = "%s -march=armv5te -mfloat-abi=soft -mtune=xscale -D__ARM_ARCH__=5 -DARMV5_ASSEMBLY " % BASE_M_FLAGS APP_CXXFLAGS += BASE_CXX_FLAGS LFLAGS_HEADLESS += "-Wl,--no-enum-size-warning" else: raise Exception('Unrecognized architecture: %s' % arm_arch) APP_CPPFLAGS += "-DAVMPLUS_UNIX -DUNIX -Dlinux -DUSE_PTHREAD_MUTEX -DGTEST_USE_OWN_TR1_TUPLE=1 -DHAVE_STDARG -DAVMPLUS_ARM %s" % ANDROID_INCLUDES else: APP_CXXFLAGS += "-Wall -Wcast-align -Wdisabled-optimization -Wextra -Wformat=2 -Winit-self -Winvalid-pch -Wno-invalid-offsetof -Wno-switch "\ "-Wparentheses -Wpointer-arith -Wreorder -Wsign-compare -Wunused-parameter -Wwrite-strings -Wno-ctor-dtor-privacy -Woverloaded-virtual "\ "-Wsign-promo -Wno-char-subscripts -fmessage-length=0 -fno-exceptions -fno-rtti -fno-check-new -fstrict-aliasing -fsigned-char " APP_CXXFLAGS += _setGCCVersionedFlags(APP_CXXFLAGS, GCC_MAJOR_VERSION, GCC_MINOR_VERSION, cpu) if cpu == 'sh4': APP_CXXFLAGS += "-mieee -Wno-cast-align " if cpu == 'arm' or cpu == 'thumb2': APP_CXXFLAGS += "-Wno-cast-align " APP_CFLAGS += "-Wno-cast-align " FLOAT_ABI = None; EXTRA_CFLAGS = ""; if arm_fpu: FLOAT_ABI = "-mfloat-abi=softfp " EXTRA_CFLAGS = "-mfpu=vfp -march=%s " % arm_arch # compile to use hardware fpu if arm_hard_float: FLOAT_ABI = "-mfloat-abi=hard -march=%s " % arm_arch # compile to use neon vfp AVMSHELL_LDFLAGS += "-static " if arm_neon: if FLOAT_ABI == None: FLOAT_ABI = "-mfloat-abi=softfp " EXTRA_CFLAGS = "-mfpu=neon -march=%s -DTARGET_NEON " % arm_arch # compile to use neon vfp if arm_thumb: EXTRA_CFLAGS += "-mthumb -DTARGET_THUMB2 " if arm_thumb != False and arm_arch == "armv7-a": EXTRA_CFLAGS += "-mtune=cortex-a8 " #if arm_arch: #OPT_CXXFLAGS += "-march=%s " % arm_arch #DEBUG_CXXFLAGS += "-march=%s " % arm_arch if EXTRA_CFLAGS != None: APP_CXXFLAGS += EXTRA_CFLAGS APP_CFLAGS += EXTRA_CFLAGS if FLOAT_ABI != None: APP_CXXFLAGS += FLOAT_ABI APP_CFLAGS += FLOAT_ABI AVMSHELL_LDFLAGS += FLOAT_ABI if config.getDebug(): APP_CXXFLAGS += "" else: APP_CXXFLAGS += "-Wuninitialized " DEBUG_CXXFLAGS += "-g " DEBUG_LDFLAGS += "-g " elif config.getCompiler() == 'VS': if cpu == "arm": APP_CXXFLAGS = "-W4 -WX -wd4291 -wd4201 -wd4189 -wd4740 -wd4127 -fp:fast -GF -GS- -Zc:wchar_t- " OS_LDFLAGS += "-MAP " if config.getDebug(): DEBUG_CXXFLAGS = "-Od " DEBUG_CFLAGS = "-Od " APP_CXXFLAGS += "-GR- -fp:fast -GS- -Zc:wchar_t- -Zc:forScope " else: OPT_CXXFLAGS = "-O2 -GR- " if arm_arch: OPT_CXXFLAGS += "-QR%s " % arm_arch if arm_fpu: OPT_CXXFLAGS += "-QRfpe- " # compile to use hardware fpu else: APP_CXXFLAGS = "-W4 -WX -wd4291 -GF -GS- -Zc:wchar_t- " APP_CFLAGS = "-W3 -WX -wd4291 -GF -GS- -Zc:wchar_t- " if cpu == 'x86_64': pass # 64 bit VC does NaN comparisons incorrectly with fp:fast else: APP_CXXFLAGS += "-fp:fast " APP_CFLAGS += "-fp:fast " OS_LDFLAGS += "-MAP " if config.getDebug(): DEBUG_CXXFLAGS = "-Od " DEBUG_CFLAGS = "-Od " else: OPT_CXXFLAGS = "-O2 -Ob1 -GR- " OPT_CFLAGS = "-O2 -Ob1 -GR- " if memoryProfiler: OPT_CXXFLAGS += "-Oy- -Zi " DEBUG_CXXFLAGS += "-Zi " DEBUG_CFLAGS += "-Zi " DEBUG_LDFLAGS += "-DEBUG " elif config.getCompiler() == 'SunStudio': APP_CXXFLAGS = "-template=no%extdef -erroff" OPT_CXXFLAGS = "-xO2 " DEBUG_CXXFLAGS += "-g " else: raise Exception('Unrecognized compiler: ' + config.getCompiler()) zlib_include_dir = o.getStringArg('zlib-include-dir') if zlib_include_dir is not None: AVMSHELL_CPPFLAGS += "-I%s " % zlib_include_dir zlib_lib = o.getStringArg('zlib-lib') if zlib_lib is None: zlib_lib = '$(call EXPAND_LIBNAME,zlib)' lzma_lib = o.getStringArg('lzma-lib') if lzma_lib is None: lzma_lib = '$(call EXPAND_LIBNAME,lzma)' AVMSHELL_LDFLAGS += zlib_lib + ' ' + lzma_lib sys_root_dir = o.getStringArg('sys-root-dir') if sys_root_dir is not None: OS_LDFLAGS += " --sysroot=%s " % sys_root_dir OPT_CXXFLAGS += " --sysroot=%s " % sys_root_dir if the_os == "darwin": # Get machine's OS version number and trim off anything after '10.x' p = subprocess.Popen('sw_vers -productVersion', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) os_ver = p.stdout.read() parts = os_ver.split('.') os_ver = parts[0] + '.' + parts[1] AVMSHELL_LDFLAGS += " -exported_symbols_list $(topsrcdir)/platform/mac/avmshell/exports.exp" MMGC_DEFINES.update({'TARGET_API_MAC_CARBON': 1, 'DARWIN': 1, '_MAC': None, 'AVMPLUS_MAC': None, 'TARGET_RT_MAC_MACHO': 1}) APP_CXXFLAGS += "-fpascal-strings -faltivec -fasm-blocks " # If an sdk is selected align OS and gcc/g++ versions to it os_ver,sdk_path = _setSDKParams(o.mac_sdk, os_ver, o.mac_xcode) APP_CXXFLAGS += "-mmacosx-version-min=%s -isysroot %s " % (os_ver,sdk_path) config.subst("MACOSX_DEPLOYMENT_TARGET",os_ver) if cpu == 'ppc64': APP_CXXFLAGS += "-arch ppc64 " APP_CFLAGS += "-arch ppc64 " OS_LDFLAGS += "-arch ppc64 " elif cpu == 'x86_64': APP_CXXFLAGS += "-arch x86_64 " APP_CFLAGS += "-arch x86_64 " OS_LDFLAGS += "-arch x86_64 " elif the_os == "windows" or the_os == "cygwin": MMGC_DEFINES.update({'WIN32': None, '_CRT_SECURE_NO_DEPRECATE': None}) OS_LDFLAGS += "-MAP " if cpu == "arm": APP_CPPFLAGS += "-DARM -D_ARM_ -DUNICODE -DUNDER_CE=1 -DMMGC_ARM " if arm_fpu: APP_CPPFLAGS += "-DARMV6 -QRarch6 " else: APP_CPPFLAGS += "-DARMV5 -QRarch5t " OS_LIBS.append('mmtimer corelibc coredll') elif cpu == "thumb2": APP_CPPFLAGS += "-DARMv7 -D_ARM_ -DTARGET_THUMB2 -DUNICODE -DUNDER_RT=1 -DWIN32_LEAN_AND_MEAN -D_CONSOLE -D_ARM_WINAPI_PARTITION_DESKTOP_SDK_AVAILABLE " OS_LIBS.append('winmm') OS_LIBS.append('shlwapi') OS_LIBS.append('AdvAPI32') else: APP_CPPFLAGS += "-DWIN32_LEAN_AND_MEAN -D_CONSOLE " OS_LIBS.append('winmm') OS_LIBS.append('shlwapi') OS_LIBS.append('AdvAPI32') elif the_os == "linux": MMGC_DEFINES.update({'UNIX': None, 'AVMPLUS_UNIX': None}) OS_LIBS.append('pthread') if cpu == "i686": APP_CPPFLAGS += "-m32 -march=i686 " OS_LDFLAGS += "-m32 " # if cpu == "x86_64": # # workaround https://bugzilla.mozilla.org/show_bug.cgi?id=467776 # OPT_CXXFLAGS += '-fno-schedule-insns2 ' if config.getDebug(): OS_LIBS.append("dl") elif the_os == "android": BASE_D_FLAGS = "-DANDROID -DHAVE_SYS_UIO_H -Dlinux -DUNIX -Dcompress=zlib_compress " APP_CXXFLAGS += BASE_D_FLAGS if config.getDebug(): DEBUG_CXXFLAGS += "-DDEBUG -D_DEBUG -DASYNC_DEBUG -O0 -ggdb3 " DEBUG_CPPFLAGS = "" else: APP_CXXFLAGS += "-DNDEBUG -O3 -fomit-frame-pointer -fvisibility=hidden -finline-functions -fgcse-after-reload -frerun-cse-after-loop -frename-registers -fvisibility-inlines-hidden " DEBUG_CPPFLAGS = "" elif the_os == "sunos": if config.getCompiler() != 'GCC': APP_CXXFLAGS = "-template=no%extdef -erroff" OPT_CXXFLAGS = "-xO2 " DEBUG_CXXFLAGS = "-g " MMGC_DEFINES.update({'UNIX': None, 'AVMPLUS_UNIX': None, 'SOLARIS': None}) OS_LIBS.append('pthread') OS_LIBS.append('rt') OS_LIBS.append('Cstd') APP_CPPFLAGS += '-DAVMPLUS_CDECL ' if config.getDebug(): OS_LIBS.append("dl") else: raise Exception("Unsupported OS") if cpu == "i686": if config.getCompiler() == 'GCC' : # we require sse2 APP_CPPFLAGS += "-msse2 " elif cpu == "powerpc": # we detect this in core/avmbuild.h and MMgc/*build.h None elif cpu == "ppc64": # we detect this in core/avmbuild.h and MMgc/*build.h None elif cpu == "sparc": APP_CPPFLAGS += "-DAVMPLUS_SPARC " elif cpu == "x86_64": # we detect this in core/avmbuild.h and MMgc/*build.h None elif cpu == "thumb2": # we detect this in core/avmbuild.h and MMgc/*build.h None elif cpu == "arm": # we detect this in core/avmbuild.h and MMgc/*build.h None elif cpu == "mips": # we detect this in core/avmbuild.h and MMgc/*build.h None elif cpu == "sh4": # work around for a problem with tas.b instruction on some sh4 boards APP_CPPFLAGS += "-DUSE_PTHREAD_MUTEX " else: raise Exception("Unsupported CPU") if o.getBoolArg('perfm'): APP_CPPFLAGS += "-DPERFM " if o.help: sys.stdout.write(o.getHelp()) sys.exit(1) # Get the optional avm description string # This is NOT supported on windows/cygwin due to cygwin-wrapper.sh # not passing the string correctly to cl.exe AVMPLUS_DESC = o.getStringArg('desc') or '' if the_os == "windows" or the_os == "cygwin": if AVMPLUS_DESC: print('AVMPLUS_DESC is not supported on windows via cygwin make.' ' Ignoring description.') else: # all other platforms # place in Makefile even if the value is empty so # it can be updated by hand if desired APP_CPPFLAGS += '-DAVMPLUS_DESC="${AVMPLUS_DESC}" ' config.subst("AVMPLUS_DESC", AVMPLUS_DESC) # Append MMGC_DEFINES to APP_CPPFLAGS APP_CPPFLAGS += ''.join(val is None and ('-D%s ' % var) or ('-D%s=%s ' % (var, val)) for (var, val) in MMGC_DEFINES.iteritems()) config.subst("APP_CPPFLAGS", APP_CPPFLAGS) config.subst("APP_CXXFLAGS", APP_CXXFLAGS) config.subst("APP_CFLAGS", APP_CFLAGS) config.subst("OPT_CPPFLAGS", OPT_CPPFLAGS) config.subst("OPT_CXXFLAGS", OPT_CXXFLAGS) config.subst("DEBUG_CPPFLAGS", DEBUG_CPPFLAGS) config.subst("DEBUG_CXXFLAGS", DEBUG_CXXFLAGS) config.subst("DEBUG_LDFLAGS", DEBUG_LDFLAGS) config.subst("OS_LIBS", " ".join(OS_LIBS)) config.subst("OS_LDFLAGS", OS_LDFLAGS) config.subst("MMGC_CPPFLAGS", MMGC_CPPFLAGS) config.subst("AVMSHELL_CPPFLAGS", AVMSHELL_CPPFLAGS) config.subst("AVMSHELL_LDFLAGS", AVMSHELL_LDFLAGS) config.subst("MMGC_DYNAMIC", MMGC_DYNAMIC and 1 or '') if the_os == "android": config.subst("LFLAGS_HEADLESS", LFLAGS_HEADLESS) config.subst("LDFLAGS", LDFLAGS) config.subst("SEARCH_DIRS", SEARCH_DIRS) config.generate("Makefile") o.finish()
[]
[]
[ "APP_CFLAGS", "ANDROID_NDK_BIN", "APP_CPPFLAGS", "DEBUG_LDFLAGS", "AVMSHELL_LDFLAGS", "VALGRIND_HOME", "APP_CXXFLAGS", "ANDROID_TOOLCHAIN", "DISABLE_RTMPE", "OPT_CPPFLAGS", "ARM_EABI", "DEBUG_CXXFLAGS", "DEBUG_CPPFLAGS", "ANDROID_SDK", "NSPR_LDOPTS", "ANDROID_NDK", "OS_LDFLAGS", "ARM_EABI_VER", "DEBUG_CFLAGS", "MMGC_CPPFLAGS", "AVMSHELL_CPPFLAGS", "OPT_CXXFLAGS", "NSPR_INCLUDES" ]
[]
["APP_CFLAGS", "ANDROID_NDK_BIN", "APP_CPPFLAGS", "DEBUG_LDFLAGS", "AVMSHELL_LDFLAGS", "VALGRIND_HOME", "APP_CXXFLAGS", "ANDROID_TOOLCHAIN", "DISABLE_RTMPE", "OPT_CPPFLAGS", "ARM_EABI", "DEBUG_CXXFLAGS", "DEBUG_CPPFLAGS", "ANDROID_SDK", "NSPR_LDOPTS", "ANDROID_NDK", "OS_LDFLAGS", "ARM_EABI_VER", "DEBUG_CFLAGS", "MMGC_CPPFLAGS", "AVMSHELL_CPPFLAGS", "OPT_CXXFLAGS", "NSPR_INCLUDES"]
python
23
0
backend/guff.go
package main import ( "context" "encoding/base64" "flag" "io/ioutil" "os" "os/signal" "syscall" "cloud.google.com/go/kms/apiv1" "github.com/golang/glog" "golang.org/x/oauth2/google" kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" "github.com/toothrot/guff/backend/core" ) var ( webRoot = flag.String("web_root", "", "Path from which to serve web files.") port = flag.String("port", "8080", "Port to listen for HTTP requests.") divisionsURL = flag.String("divisions_url", "https://royalpalmsshuffle.leagueapps.com/leagues?state=LIVE&locationId=&seasonId=&days=&levelId=", "URL of Divisions page") dbname = flag.String("dbname", "guff_dev", "Postgres database name") // Secrets oauthConfigPath = flag.String("oauth_config", "/run/secrets/oauth.json", "OAuth config JSON file path (see http://golang.org/x/oauth2/google#ConfigFromJSON)") dbPassPath = flag.String("db_pass_path", "/run/secrets/postgres-guff-password", "Database password secret file path") // ENV Secrets // example keyName: "projects/PROJECT_ID/locations/global/keyRings/RING_ID/cryptoKeys/KEY_ID" kmsKey = os.Getenv("GUFF_KMS_KEY") oauthConfigEnc = os.Getenv("GUFF_OAUTH_CONFIG_ENC") dbURLEnc = os.Getenv("GUFF_DB_URL_ENC") requireHTTPS = os.Getenv("GUFF_REQUIRE_HTTPS") ) func main() { defer glog.Info("later, gator.") flag.Parse() glog.Info("Don't take any guff from these swine.") ctx, cancel := context.WithCancel(context.Background()) defer cancel() ctx = handleSigs(ctx) if lis := os.Getenv("PORT"); lis != "" { *port = lis glog.Infof("Will listen on %q", *port) } kc, err := kms.NewKeyManagementClient(ctx) if err != nil { glog.Errorf("kms.NewKeyManagementClient() = %v", err) } oc, err := google.ConfigFromJSON(oauthConfig(ctx, kc)) if err != nil { glog.Errorf("google.ConfigFromJSON() returned error %q", err) } dbpass, err := getSecret(*dbPassPath) if err != nil { glog.Errorf("error getting secret %q: %q", *dbPassPath, err) dbpass = []byte{} } oc.Scopes = []string{"email", "profile"} conf := &core.Config{ OAuthConfig: oc, ProgramsURL: *divisionsURL, DBName: *dbname, DBPassword: string(dbpass), DBURL: string(getKMSSecret(ctx, kc, dbURLEnc)), RequireHTTPS: requireHTTPS, } g := newGuffApp(ctx, conf) g.Serve(ctx) } func oauthConfig(ctx context.Context, kc *kms.KeyManagementClient) []byte { if oauthConfigEnc != "" { return getKMSSecret(ctx, kc, oauthConfigEnc) } b, err := getSecret(*oauthConfigPath) if err != nil { glog.Fatalf("error getting secret %q: %q", *oauthConfigPath, err) } return b } func getKMSSecret(ctx context.Context, kc *kms.KeyManagementClient, cipherText string) []byte { if kmsKey == "" { glog.Infof("GUFF_KMS_KEY not set, skipping") return []byte{} } b, err := base64.StdEncoding.DecodeString(cipherText) if err != nil { glog.Errorf("error decoding secret: %v", err) return []byte{} } req := &kmspb.DecryptRequest{Name: kmsKey, Ciphertext: b} resp, err := kc.Decrypt(ctx, req) if err != nil { glog.Errorf("error decrypting secret: %v", err) return []byte{} } return resp.GetPlaintext() } func getSecret(path string) ([]byte, error) { f, err := os.Open(path) if err != nil { return []byte{}, err } return ioutil.ReadAll(f) } func handleSigs(ctx context.Context) context.Context { sigint := make(chan os.Signal, 1) signal.Notify(sigint, os.Interrupt) signal.Notify(sigint, syscall.SIGTERM) ctx, cancel := context.WithCancel(ctx) go func() { select { case s := <-sigint: glog.Infof("Handling signal %#v", s) cancel() case <-ctx.Done(): } }() return ctx }
[ "\"GUFF_KMS_KEY\"", "\"GUFF_OAUTH_CONFIG_ENC\"", "\"GUFF_DB_URL_ENC\"", "\"GUFF_REQUIRE_HTTPS\"", "\"PORT\"" ]
[]
[ "PORT", "GUFF_DB_URL_ENC", "GUFF_KMS_KEY", "GUFF_OAUTH_CONFIG_ENC", "GUFF_REQUIRE_HTTPS" ]
[]
["PORT", "GUFF_DB_URL_ENC", "GUFF_KMS_KEY", "GUFF_OAUTH_CONFIG_ENC", "GUFF_REQUIRE_HTTPS"]
go
5
0
bsp/thead-smart/rtconfig.py
import os # toolchains options # CPUNAME = e906/e906f/e906fd/e906p/e906fp/e906fdp # CPUNAME = e907/e907f/e907fd/e907p/e907fp/e907fdp ARCH ='risc-v' CPU ='e9xx' CPUNAME ='e906fdp' VENDOR ='t-head' CROSS_TOOL ='gcc' if os.getenv('RTT_CC'): CROSS_TOOL = os.getenv('RTT_CC') if CROSS_TOOL == 'gcc': PLATFORM = 'gcc' EXEC_PATH = r'/home/chenzx/.thead/riscv64-elf-x86_64-2.0.1/bin/' else: print ('Please make sure your toolchains is GNU GCC!') exit(0) if os.getenv('RTT_EXEC_PATH'): EXEC_PATH = os.getenv('RTT_EXEC_PATH') # BUILD = 'debug' BUILD = 'release' if PLATFORM == 'gcc': # toolchains PREFIX = 'riscv64-unknown-elf-' CC = PREFIX + 'gcc' CXX = PREFIX + 'g++' AS = PREFIX + 'gcc' AR = PREFIX + 'ar' LINK = PREFIX + 'g++' TARGET_EXT = 'elf' SIZE = PREFIX + 'size' OBJDUMP = PREFIX + 'objdump' OBJCPY = PREFIX + 'objcopy' STRIP = PREFIX + 'strip' if CPUNAME == 'e906fdp' or CPUNAME == 'e907fdp': DEVICE = ' -march=rv32imafdcpzp64_xtheade -mabi=ilp32d' if CPUNAME == 'e906fp' or CPUNAME == 'e907fp': DEVICE = ' -march=rv32imafcpzp64_xtheade -mabi=ilp32f' if CPUNAME == 'e906p' or CPUNAME == 'e907p': DEVICE = ' -march=rv32imacpzp64_xtheade -mabi=ilp32' if CPUNAME == 'e906fd' or CPUNAME == 'e907fd': DEVICE = ' -march=rv32imafdc_xtheade -mabi=ilp32d' if CPUNAME == 'e906f' or CPUNAME == 'e907f': DEVICE = ' -march=rv32imafc_xtheade -mabi=ilp32f' if CPUNAME == 'e906' or CPUNAME == 'e907': DEVICE = ' -march=rv32imac_xtheade -mabi=ilp32' CFLAGS = DEVICE + ' -c -g -ffunction-sections -fdata-sections -Wall -mcmodel=medlow' AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp' LFLAGS = DEVICE + ' -nostartfiles -Wl,--no-whole-archive -T gcc_csky.ld -lm -lc -lgcc -Wl,-gc-sections -Wl,-zmax-page-size=1024 -Wl,-Map=rtt.map' CPATH = '' LPATH = '' if BUILD == 'debug': CFLAGS += ' -O0 -gdwarf-2' AFLAGS += ' -gdwarf-2' else: CFLAGS += ' -O2 -g2' CXXFLAGS = CFLAGS DUMP_ACTION = OBJDUMP + ' -D -S $TARGET > rtt.asm\n' POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
[]
[]
[ "RTT_CC", "RTT_EXEC_PATH" ]
[]
["RTT_CC", "RTT_EXEC_PATH"]
python
2
0
src/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go
// +build daemon,!windows package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "time" "github.com/docker/libnetwork/iptables" "github.com/docker/libtrust" "github.com/go-check/check" ) func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { c.Fatalf("Could not run top1: err=%v\n%s", err, out) } // --restart=no by default if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { c.Fatalf("Could not run top2: err=%v\n%s", err, out) } testRun := func(m map[string]bool, prefix string) { var format string for cont, shouldRun := range m { out, err := s.d.Cmd("ps") if err != nil { c.Fatalf("Could not run ps: err=%v\n%q", err, out) } if shouldRun { format = "%scontainer %q is not running" } else { format = "%scontainer %q is running" } if shouldRun != strings.Contains(out, cont) { c.Fatalf(format, prefix, cont) } } } testRun(map[string]bool{"top1": true, "top2": true}, "") if err := s.d.Restart(); err != nil { c.Fatalf("Could not restart daemon: %v", err) } testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") } func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } if out, err := s.d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { c.Fatal(err, out) } if err := s.d.Restart(); err != nil { c.Fatal(err) } if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { c.Fatal(err) } if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { c.Fatal(err, out) } out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1") c.Assert(err, check.IsNil) if _, err := inspectMountPointJSON(out, "/foo"); err != nil { c.Fatalf("Expected volume to exist: /foo, error: %v\n", err) } } // #11008 func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { err := s.d.StartWithBusybox() c.Assert(err, check.IsNil) out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top") c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) out, err = s.d.Cmd("run", "-d", "--name", "top2", "--restart", "unless-stopped", "busybox:latest", "top") c.Assert(err, check.IsNil, check.Commentf("run top2: %v", out)) testRun := func(m map[string]bool, prefix string) { var format string for name, shouldRun := range m { out, err := s.d.Cmd("ps") c.Assert(err, check.IsNil, check.Commentf("run ps: %v", out)) if shouldRun { format = "%scontainer %q is not running" } else { format = "%scontainer %q is running" } c.Assert(strings.Contains(out, name), check.Equals, shouldRun, check.Commentf(format, prefix, name)) } } // both running testRun(map[string]bool{"top1": true, "top2": true}, "") out, err = s.d.Cmd("stop", "top1") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("stop", "top2") c.Assert(err, check.IsNil, check.Commentf(out)) // both stopped testRun(map[string]bool{"top1": false, "top2": false}, "") err = s.d.Restart() c.Assert(err, check.IsNil) // restart=always running testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") out, err = s.d.Cmd("start", "top2") c.Assert(err, check.IsNil, check.Commentf("start top2: %v", out)) err = s.d.Restart() c.Assert(err, check.IsNil) // both running testRun(map[string]bool{"top1": true, "top2": true}, "After second daemon restart: ") } func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { if err := s.d.Start("--iptables=false"); err != nil { c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) } } // Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and // no longer has an IP associated, we should gracefully handle that case and associate // an IP with it rather than fail daemon start func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) { // rather than depending on brctl commands to verify docker0 is created and up // let's start the daemon and stop it, and then make a modification to run the // actual test if err := s.d.Start(); err != nil { c.Fatalf("Could not start daemon: %v", err) } if err := s.d.Stop(); err != nil { c.Fatalf("Could not stop daemon: %v", err) } // now we will remove the ip from docker0 and then try starting the daemon ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) if err != nil { c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) } if err := s.d.Start(); err != nil { warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) } } func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { c.Fatalf("Could not run top: %s, %v", out, err) } // get output from iptables with container running ipTablesSearchString := "tcp dpt:80" ipTablesCmd := exec.Command("iptables", "-nvL") out, _, err := runCommandWithOutput(ipTablesCmd) if err != nil { c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) } if err := s.d.Stop(); err != nil { c.Fatalf("Could not stop daemon: %v", err) } // get output from iptables after restart ipTablesCmd = exec.Command("iptables", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) if err != nil { c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if strings.Contains(out, ipTablesSearchString) { c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) } } func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { c.Fatalf("Could not run top: %s, %v", out, err) } // get output from iptables with container running ipTablesSearchString := "tcp dpt:80" ipTablesCmd := exec.Command("iptables", "-nvL") out, _, err := runCommandWithOutput(ipTablesCmd) if err != nil { c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) } if err := s.d.Restart(); err != nil { c.Fatalf("Could not restart daemon: %v", err) } // make sure the container is not running runningOut, err := s.d.Cmd("inspect", "--format='{{.State.Running}}'", "top") if err != nil { c.Fatalf("Could not inspect on container: %s, %v", out, err) } if strings.TrimSpace(runningOut) != "true" { c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) } // get output from iptables after restart ipTablesCmd = exec.Command("iptables", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) if err != nil { c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) } } // TestDaemonIPv6Enabled checks that when the daemon is started with --ipv6=true that the docker0 bridge // has the fe80::1 address and that a container is assigned a link-local address func (s *DockerSuite) TestDaemonIPv6Enabled(c *check.C) { testRequires(c, IPv6) if err := setupV6(); err != nil { c.Fatal("Could not set up host for IPv6 tests") } d := NewDaemon(c) if err := d.StartWithBusybox("--ipv6"); err != nil { c.Fatal(err) } defer d.Stop() iface, err := net.InterfaceByName("docker0") if err != nil { c.Fatalf("Error getting docker0 interface: %v", err) } addrs, err := iface.Addrs() if err != nil { c.Fatalf("Error getting addresses for docker0 interface: %v", err) } var found bool expected := "fe80::1/64" for i := range addrs { if addrs[i].String() == expected { found = true } } if !found { c.Fatalf("Bridge does not have an IPv6 Address") } if out, err := d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { c.Fatalf("Could not run container: %s, %v", out, err) } out, err := d.Cmd("inspect", "--format", "'{{.NetworkSettings.LinkLocalIPv6Address}}'", "ipv6test") out = strings.Trim(out, " \r\n'") if err != nil { c.Fatalf("Error inspecting container: %s, %v", out, err) } if ip := net.ParseIP(out); ip == nil { c.Fatalf("Container should have a link-local IPv6 address") } out, err = d.Cmd("inspect", "--format", "'{{.NetworkSettings.GlobalIPv6Address}}'", "ipv6test") out = strings.Trim(out, " \r\n'") if err != nil { c.Fatalf("Error inspecting container: %s, %v", out, err) } if ip := net.ParseIP(out); ip != nil { c.Fatalf("Container should not have a global IPv6 address: %v", out) } if err := teardownV6(); err != nil { c.Fatal("Could not perform teardown for IPv6 tests") } } // TestDaemonIPv6FixedCIDR checks that when the daemon is started with --ipv6=true and a fixed CIDR // that running containers are given a link-local and global IPv6 address func (s *DockerSuite) TestDaemonIPv6FixedCIDR(c *check.C) { testRequires(c, IPv6) if err := setupV6(); err != nil { c.Fatal("Could not set up host for IPv6 tests") } d := NewDaemon(c) if err := d.StartWithBusybox("--ipv6", "--fixed-cidr-v6='2001:db8:1::/64'"); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } defer d.Stop() if out, err := d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { c.Fatalf("Could not run container: %s, %v", out, err) } out, err := d.Cmd("inspect", "--format", "'{{.NetworkSettings.LinkLocalIPv6Address}}'", "ipv6test") out = strings.Trim(out, " \r\n'") if err != nil { c.Fatalf("Error inspecting container: %s, %v", out, err) } if ip := net.ParseIP(out); ip == nil { c.Fatalf("Container should have a link-local IPv6 address") } out, err = d.Cmd("inspect", "--format", "'{{.NetworkSettings.GlobalIPv6Address}}'", "ipv6test") out = strings.Trim(out, " \r\n'") if err != nil { c.Fatalf("Error inspecting container: %s, %v", out, err) } if ip := net.ParseIP(out); ip == nil { c.Fatalf("Container should have a global IPv6 address") } if err := teardownV6(); err != nil { c.Fatal("Could not perform teardown for IPv6 tests") } } func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) } func (s *DockerSuite) TestDaemonStartWithBackwardCompatibility(c *check.C) { var validCommandArgs = [][]string{ {"--selinux-enabled", "-l", "info"}, {"--insecure-registry", "daemon"}, } var invalidCommandArgs = [][]string{ {"--selinux-enabled", "--storage-opt"}, {"-D", "-b"}, {"--config", "/tmp"}, } for _, args := range validCommandArgs { d := NewDaemon(c) d.Command = "--daemon" if err := d.Start(args...); err != nil { c.Fatalf("Daemon should have started successfully with --daemon %v: %v", args, err) } d.Stop() } for _, args := range invalidCommandArgs { d := NewDaemon(c) if err := d.Start(args...); err == nil { d.Stop() c.Fatalf("Daemon should have failed to start with %v", args) } } } func (s *DockerSuite) TestDaemonStartWithDaemonCommand(c *check.C) { type kind int const ( common kind = iota daemon ) var flags = []map[kind][]string{ {common: {"-l", "info"}, daemon: {"--selinux-enabled"}}, {common: {"-D"}, daemon: {"--selinux-enabled", "-r"}}, {common: {"-D"}, daemon: {"--restart"}}, {common: {"--debug"}, daemon: {"--log-driver=json-file", "--log-opt=max-size=1k"}}, } var invalidGlobalFlags = [][]string{ //Invalid because you cannot pass daemon flags as global flags. {"--selinux-enabled", "-l", "info"}, {"-D", "-r"}, {"--config", "/tmp"}, } // `docker daemon -l info --selinux-enabled` // should NOT error out for _, f := range flags { d := NewDaemon(c) args := append(f[common], f[daemon]...) if err := d.Start(args...); err != nil { c.Fatalf("Daemon should have started successfully with %v: %v", args, err) } d.Stop() } // `docker -l info daemon --selinux-enabled` // should error out for _, f := range flags { d := NewDaemon(c) d.GlobalFlags = f[common] if err := d.Start(f[daemon]...); err == nil { d.Stop() c.Fatalf("Daemon should have failed to start with docker %v daemon %v", d.GlobalFlags, f[daemon]) } } for _, f := range invalidGlobalFlags { cmd := exec.Command(dockerBinary, append(f, "daemon")...) errch := make(chan error) var err error go func() { errch <- cmd.Run() }() select { case <-time.After(time.Second): cmd.Process.Kill() case err = <-errch: } if err == nil { c.Fatalf("Daemon should have failed to start with docker %v daemon", f) } } } func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { if err := s.d.Start("--log-level=debug"); err != nil { c.Fatal(err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { // we creating new daemons to create new logFile if err := s.d.Start("--log-level=fatal"); err != nil { c.Fatal(err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { if err := s.d.Start("-D"); err != nil { c.Fatal(err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { if err := s.d.Start("--debug"); err != nil { c.Fatal(err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { c.Fatal(err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { listeningPorts := [][]string{ {"0.0.0.0", "0.0.0.0", "5678"}, {"127.0.0.1", "127.0.0.1", "1234"}, {"localhost", "127.0.0.1", "1235"}, } cmdArgs := make([]string, 0, len(listeningPorts)*2) for _, hostDirective := range listeningPorts { cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) } if err := s.d.StartWithBusybox(cmdArgs...); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } for _, hostDirective := range listeningPorts { output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") if err == nil { c.Fatalf("Container should not start, expected port already allocated error: %q", output) } else if !strings.Contains(output, "port is already allocated") { c.Fatalf("Expected port is already allocated error: %q", output) } } } func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { // TODO: skip or update for Windows daemon os.Remove("/etc/docker/key.json") if err := s.d.Start(); err != nil { c.Fatalf("Could not start daemon: %v", err) } s.d.Stop() k, err := libtrust.LoadKeyFile("/etc/docker/key.json") if err != nil { c.Fatalf("Error opening key file") } kid := k.KeyID() // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) if len(kid) != 59 { c.Fatalf("Bad key ID: %s", kid) } } func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) { // TODO: skip or update for Windows daemon os.Remove("/etc/docker/key.json") k1, err := libtrust.GenerateECP256PrivateKey() if err != nil { c.Fatalf("Error generating private key: %s", err) } if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { c.Fatalf("Error creating .docker directory: %s", err) } if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { c.Fatalf("Error saving private key: %s", err) } if err := s.d.Start(); err != nil { c.Fatalf("Could not start daemon: %v", err) } s.d.Stop() k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") if err != nil { c.Fatalf("Error opening key file") } if k1.KeyID() != k2.KeyID() { c.Fatalf("Key not migrated") } } // GH#11320 - verify that the daemon exits on failure properly // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { //attempt to start daemon with incorrect flags (we know -b and --bip conflict) if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { //verify we got the right error if !strings.Contains(err.Error(), "Daemon exited and never started") { c.Fatalf("Expected daemon not to start, got %v", err) } // look in the log and make sure we got the message that daemon is shutting down runCmd := exec.Command("grep", "Error starting daemon", s.d.LogfileName()) if out, _, err := runCommandWithOutput(runCmd); err != nil { c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err) } } else { //if we didn't get an error and the daemon is running, this is a failure c.Fatal("Conflicting options should cause the daemon to error out with a failure") } } func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { d := s.d err := d.Start("--bridge", "nosuchbridge") c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail")) defer d.Restart() bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) err = d.StartWithBusybox("--bridge", bridgeName) c.Assert(err, check.IsNil) ipTablesSearchString := bridgeIPNet.String() ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) c.Assert(err, check.IsNil) c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, check.Commentf("iptables output should have contained %q, but was %q", ipTablesSearchString, out)) _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") c.Assert(err, check.IsNil) containerIP := d.findContainerIP("ExtContainer") ip := net.ParseIP(containerIP) c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, check.Commentf("Container IP-Address must be in the same subnet range : %s", containerIP)) } func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) { args := []string{"link", "add", "name", ifName, "type", ifType} ipLinkCmd := exec.Command("ip", args...) out, _, err := runCommandWithOutput(ipLinkCmd) if err != nil { return out, err } ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up") out, _, err = runCommandWithOutput(ifCfgCmd) return out, err } func deleteInterface(c *check.C, ifName string) { ifCmd := exec.Command("ip", "link", "delete", ifName) out, _, err := runCommandWithOutput(ifCmd) c.Assert(err, check.IsNil, check.Commentf(out)) flushCmd := exec.Command("iptables", "-t", "nat", "--flush") out, _, err = runCommandWithOutput(flushCmd) c.Assert(err, check.IsNil, check.Commentf(out)) flushCmd = exec.Command("iptables", "--flush") out, _, err = runCommandWithOutput(flushCmd) c.Assert(err, check.IsNil, check.Commentf(out)) } func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { // TestDaemonBridgeIP Steps // 1. Delete the existing docker0 Bridge // 2. Set --bip daemon configuration and start the new Docker Daemon // 3. Check if the bip config has taken effect using ifconfig and iptables commands // 4. Launch a Container and make sure the IP-Address is in the expected subnet // 5. Delete the docker0 Bridge // 6. Restart the Docker Daemon (via deferred action) // This Restart takes care of bringing docker0 interface back to auto-assigned IP defaultNetworkBridge := "docker0" deleteInterface(c, defaultNetworkBridge) d := s.d bridgeIP := "192.169.1.1/24" ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) err := d.StartWithBusybox("--bip", bridgeIP) c.Assert(err, check.IsNil) defer d.Restart() ifconfigSearchString := ip.String() ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge) out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd) c.Assert(err, check.IsNil) c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true, check.Commentf("ifconfig output should have contained %q, but was %q", ifconfigSearchString, out)) ipTablesSearchString := bridgeIPNet.String() ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) c.Assert(err, check.IsNil) c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, check.Commentf("iptables output should have contained %q, but was %q", ipTablesSearchString, out)) out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top") c.Assert(err, check.IsNil) containerIP := d.findContainerIP("test") ip = net.ParseIP(containerIP) c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, check.Commentf("Container IP-Address must be in the same subnet range : %s", containerIP)) deleteInterface(c, defaultNetworkBridge) } func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) { if err := s.d.Start(); err != nil { c.Fatalf("Could not start daemon: %v", err) } defer s.d.Restart() if err := s.d.Stop(); err != nil { c.Fatalf("Could not stop daemon: %v", err) } // now we will change the docker0's IP and then try starting the daemon bridgeIP := "192.169.100.1/24" _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) ipCmd := exec.Command("ifconfig", "docker0", bridgeIP) stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) if err != nil { c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) } if err := s.d.Start("--bip", bridgeIP); err != nil { c.Fatalf("Could not start daemon: %v", err) } //check if the iptables contains new bridgeIP MASQUERADE rule ipTablesSearchString := bridgeIPNet.String() ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") out, _, err := runCommandWithOutput(ipTablesCmd) if err != nil { c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out) } } func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { d := s.d bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"} err = d.StartWithBusybox(args...) c.Assert(err, check.IsNil) defer d.Restart() for i := 0; i < 4; i++ { cName := "Container" + strconv.Itoa(i) out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") if err != nil { c.Assert(strings.Contains(out, "no available IPv4 addresses"), check.Equals, true, check.Commentf("Could not run a Container : %s %s", err.Error(), out)) } } } func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) { defaultNetworkBridge := "docker0" deleteInterface(c, defaultNetworkBridge) d := s.d bridgeIP := "192.169.1.1" bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) err := d.StartWithBusybox("--bip", bridgeIPNet) c.Assert(err, check.IsNil) defer d.Restart() expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP) out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'", bridgeIP, strings.TrimSpace(out))) deleteInterface(c, defaultNetworkBridge) } func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) { defaultNetworkBridge := "docker0" deleteInterface(c, defaultNetworkBridge) d := s.d bridgeIP := "192.169.1.1" bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) gatewayIP := "192.169.1.254" err := d.StartWithBusybox("--bip", bridgeIPNet, "--default-gateway", gatewayIP) c.Assert(err, check.IsNil) defer d.Restart() expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP) out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, check.Commentf("Explicit default gateway should be %s, but default route was '%s'", gatewayIP, strings.TrimSpace(out))) deleteInterface(c, defaultNetworkBridge) } func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) { defaultNetworkBridge := "docker0" deleteInterface(c, defaultNetworkBridge) // Program a custom default gateway outside of the container subnet, daemon should accept it and start err := s.d.StartWithBusybox("--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") c.Assert(err, check.IsNil) deleteInterface(c, defaultNetworkBridge) s.d.Restart() } func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { d := s.d ipStr := "192.170.1.1/24" ip, _, _ := net.ParseCIDR(ipStr) args := []string{"--ip", ip.String()} err := d.StartWithBusybox(args...) c.Assert(err, check.IsNil) defer d.Restart() out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") c.Assert(err, check.NotNil, check.Commentf("Running a container must fail with an invalid --ip option")) c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true) ifName := "dummy" out, err = createInterface(c, "dummy", ifName, ipStr) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, ifName) _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") c.Assert(err, check.IsNil) ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) c.Assert(err, check.IsNil) regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String()) matched, _ := regexp.MatchString(regex, out) c.Assert(matched, check.Equals, true, check.Commentf("iptables output should have contained %q, but was %q", regex, out)) } func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { d := s.d bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) args := []string{"--bridge", bridgeName, "--icc=false"} err = d.StartWithBusybox(args...) c.Assert(err, check.IsNil) defer d.Restart() ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") out, _, err = runCommandWithOutput(ipTablesCmd) c.Assert(err, check.IsNil) regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) matched, _ := regexp.MatchString(regex, out) c.Assert(matched, check.Equals, true, check.Commentf("iptables output should have contained %q, but was %q", regex, out)) // Pinging another container must fail with --icc=false pingContainers(c, d, true) ipStr := "192.171.1.1/24" ip, _, _ := net.ParseCIDR(ipStr) ifName := "icc-dummy" createInterface(c, "dummy", ifName, ipStr) // But, Pinging external or a Host interface must succeed pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String()) runArgs := []string{"--rm", "busybox", "sh", "-c", pingCmd} _, err = d.Cmd("run", runArgs...) c.Assert(err, check.IsNil) } func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) { d := s.d bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) args := []string{"--bridge", bridgeName, "--icc=false"} err = d.StartWithBusybox(args...) c.Assert(err, check.IsNil) defer d.Restart() ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") out, _, err = runCommandWithOutput(ipTablesCmd) c.Assert(err, check.IsNil) regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) matched, _ := regexp.MatchString(regex, out) c.Assert(matched, check.Equals, true, check.Commentf("iptables output should have contained %q, but was %q", regex, out)) out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567") c.Assert(err, check.IsNil, check.Commentf(out)) } func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) { bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) err = s.d.StartWithBusybox("--bridge", bridgeName, "--icc=false") c.Assert(err, check.IsNil) defer s.d.Restart() _, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") c.Assert(err, check.IsNil) _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") c.Assert(err, check.IsNil) childIP := s.d.findContainerIP("child") parentIP := s.d.findContainerIP("parent") sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { c.Fatal("Iptables rules not found") } s.d.Cmd("rm", "--link", "parent/http") if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { c.Fatal("Iptables rules should be removed when unlink") } s.d.Cmd("kill", "child") s.d.Cmd("kill", "parent") } func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { testRequires(c, NativeExecDriver) if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") if err != nil { c.Fatal(out, err) } outArr := strings.Split(out, "\n") if len(outArr) < 2 { c.Fatalf("got unexpected output: %s", out) } nofile := strings.TrimSpace(outArr[0]) nproc := strings.TrimSpace(outArr[1]) if nofile != "42" { c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) } if nproc != "2048" { c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) } // Now restart daemon with a new default if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil { c.Fatal(err) } out, err = s.d.Cmd("start", "-a", "test") if err != nil { c.Fatal(err) } outArr = strings.Split(out, "\n") if len(outArr) < 2 { c.Fatalf("got unexpected output: %s", out) } nofile = strings.TrimSpace(outArr[0]) nproc = strings.TrimSpace(outArr[1]) if nofile != "43" { c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) } if nproc != "2048" { c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) } } // #11315 func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { c.Fatal(err, out) } if out, err := s.d.Cmd("rename", "test", "test2"); err != nil { c.Fatal(err, out) } if err := s.d.Restart(); err != nil { c.Fatal(err) } if out, err := s.d.Cmd("start", "test2"); err != nil { c.Fatal(err, out) } } func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := s.d.Cmd("wait", id); err != nil { c.Fatal(out, err) } logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { c.Fatal(err) } f, err := os.Open(logPath) if err != nil { c.Fatal(err) } var res struct { Log string `json:"log"` Stream string `json:"stream"` Time time.Time `json:"time"` } if err := json.NewDecoder(f).Decode(&res); err != nil { c.Fatal(err) } if res.Log != "testline\n" { c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") } if res.Stream != "stdout" { c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") } if !time.Now().After(res.Time) { c.Fatalf("Log time %v in future", res.Time) } } func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "--log-driver=none", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := s.d.Cmd("wait", id); err != nil { c.Fatal(out, err) } logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) } } func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := s.d.Cmd("wait", id); err != nil { c.Fatal(out, err) } logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) } } func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := s.d.Cmd("wait", id); err != nil { c.Fatal(out, err) } logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { c.Fatal(err) } f, err := os.Open(logPath) if err != nil { c.Fatal(err) } var res struct { Log string `json:"log"` Stream string `json:"stream"` Time time.Time `json:"time"` } if err := json.NewDecoder(f).Decode(&res); err != nil { c.Fatal(err) } if res.Log != "testline\n" { c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") } if res.Stream != "stdout" { c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") } if !time.Now().After(res.Time) { c.Fatalf("Log time %v in future", res.Time) } } func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } id := strings.TrimSpace(out) out, err = s.d.Cmd("logs", id) if err != nil { c.Fatalf("Logs request should be sent and then fail with \"none\" driver") } if !strings.Contains(out, `Error running logs job: Failed to get logging factory: logger: no log driver named 'none' is registered`) { c.Fatalf("There should be an error about none not being a recognized log driver, got: %s", out) } } func (s *DockerDaemonSuite) TestDaemonDots(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } // Now create 4 containers if _, err := s.d.Cmd("create", "busybox"); err != nil { c.Fatalf("Error creating container: %q", err) } if _, err := s.d.Cmd("create", "busybox"); err != nil { c.Fatalf("Error creating container: %q", err) } if _, err := s.d.Cmd("create", "busybox"); err != nil { c.Fatalf("Error creating container: %q", err) } if _, err := s.d.Cmd("create", "busybox"); err != nil { c.Fatalf("Error creating container: %q", err) } s.d.Stop() s.d.Start("--log-level=debug") s.d.Stop() content, _ := ioutil.ReadFile(s.d.logFile.Name()) if strings.Contains(string(content), "....") { c.Fatalf("Debug level should not have ....\n%s", string(content)) } s.d.Start("--log-level=error") s.d.Stop() content, _ = ioutil.ReadFile(s.d.logFile.Name()) if strings.Contains(string(content), "....") { c.Fatalf("Error level should not have ....\n%s", string(content)) } s.d.Start("--log-level=info") s.d.Stop() content, _ = ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), "....") { c.Fatalf("Info level should have ....\n%s", string(content)) } } func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { dir, err := ioutil.TempDir("", "socket-cleanup-test") if err != nil { c.Fatal(err) } defer os.RemoveAll(dir) sockPath := filepath.Join(dir, "docker.sock") if err := s.d.Start("--host", "unix://"+sockPath); err != nil { c.Fatal(err) } if _, err := os.Stat(sockPath); err != nil { c.Fatal("socket does not exist") } if err := s.d.Stop(); err != nil { c.Fatal(err) } if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { c.Fatal("unix socket is not cleaned up") } } func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { type Config struct { Crv string `json:"crv"` D string `json:"d"` Kid string `json:"kid"` Kty string `json:"kty"` X string `json:"x"` Y string `json:"y"` } os.Remove("/etc/docker/key.json") if err := s.d.Start(); err != nil { c.Fatalf("Failed to start daemon: %v", err) } if err := s.d.Stop(); err != nil { c.Fatalf("Could not stop daemon: %v", err) } config := &Config{} bytes, err := ioutil.ReadFile("/etc/docker/key.json") if err != nil { c.Fatalf("Error reading key.json file: %s", err) } // byte[] to Data-Struct if err := json.Unmarshal(bytes, &config); err != nil { c.Fatalf("Error Unmarshal: %s", err) } //replace config.Kid with the fake value config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4" // NEW Data-Struct to byte[] newBytes, err := json.Marshal(&config) if err != nil { c.Fatalf("Error Marshal: %s", err) } // write back if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { c.Fatalf("Error ioutil.WriteFile: %s", err) } defer os.Remove("/etc/docker/key.json") if err := s.d.Start(); err == nil { c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), "Public Key ID does not match") { c.Fatal("Missing KeyID message from daemon logs") } } func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") if err != nil { c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out) } containerID := strings.TrimSpace(out) if out, err := s.d.Cmd("kill", containerID); err != nil { c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) } if err := s.d.Restart(); err != nil { c.Fatalf("Could not restart daemon: %v", err) } errchan := make(chan error) go func() { if out, err := s.d.Cmd("wait", containerID); err != nil { errchan <- fmt.Errorf("%v:\n%s", err, out) } close(errchan) }() select { case <-time.After(5 * time.Second): c.Fatal("Waiting on a stopped (killed) container timed out") case err := <-errchan: if err != nil { c.Fatal(err) } } } // TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint func (s *DockerDaemonSuite) TestHttpsInfo(c *check.C) { const ( testDaemonHTTPSAddr = "tcp://localhost:4271" ) if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem"} out, err := s.d.CmdWithArgs(daemonArgs, "info") if err != nil { c.Fatalf("Error Occurred: %s and output: %s", err, out) } } // TestTlsVerify verifies that --tlsverify=false turns on tls func (s *DockerDaemonSuite) TestTlsVerify(c *check.C) { out, err := exec.Command(dockerBinary, "daemon", "--tlsverify=false").CombinedOutput() if err == nil || !strings.Contains(string(out), "Could not load X509 key pair") { c.Fatalf("Daemon should not have started due to missing certs: %v\n%s", err, string(out)) } } // TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint // by using a rogue client certificate and checks that it fails with the expected error. func (s *DockerDaemonSuite) TestHttpsInfoRogueCert(c *check.C) { const ( errBadCertificate = "remote error: bad certificate" testDaemonHTTPSAddr = "tcp://localhost:4271" ) if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"} out, err := s.d.CmdWithArgs(daemonArgs, "info") if err == nil || !strings.Contains(out, errBadCertificate) { c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out) } } // TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint // which provides a rogue server certificate and checks that it fails with the expected error func (s *DockerDaemonSuite) TestHttpsInfoRogueServerCert(c *check.C) { const ( errCaUnknown = "x509: certificate signed by unknown authority" testDaemonRogueHTTPSAddr = "tcp://localhost:4272" ) if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem", "--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHTTPSAddr); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } daemonArgs := []string{"--host", testDaemonRogueHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"} out, err := s.d.CmdWithArgs(daemonArgs, "info") if err == nil || !strings.Contains(out, errCaUnknown) { c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out) } } func pingContainers(c *check.C, d *Daemon, expectFailure bool) { var dargs []string if d != nil { dargs = []string{"--host", d.sock()} } args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") dockerCmd(c, args...) args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c") pingCmd := "ping -c 1 %s -W 1" args = append(args, fmt.Sprintf(pingCmd, "alias1")) _, _, err := dockerCmdWithError(args...) if expectFailure { c.Assert(err, check.NotNil) } else { c.Assert(err, check.IsNil) } args = append(dargs, "rm", "-f", "container1") dockerCmd(c, args...) } func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { c.Assert(s.d.StartWithBusybox(), check.IsNil) socket := filepath.Join(s.d.folder, "docker.sock") out, err := s.d.Cmd("run", "-d", "--restart=always", "-v", socket+":/sock", "busybox") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(s.d.Restart(), check.IsNil) } func (s *DockerDaemonSuite) TestCleanupMountsAfterCrash(c *check.C) { c.Assert(s.d.StartWithBusybox(), check.IsNil) out, err := s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) id := strings.TrimSpace(out) c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) c.Assert(s.d.Start(), check.IsNil) mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) } func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { testRequires(c, NativeExecDriver, NotUserNamespace) c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil) out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(strings.Contains(out, "eth0"), check.Equals, false, check.Commentf("There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled: %s", out)) out, err = s.d.Cmd("run", "--rm", "--net=bridge", "busybox", "ip", "l") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(strings.Contains(out, "eth0"), check.Equals, false, check.Commentf("There shouldn't be eth0 in container in bridge mode when bridge network is disabled: %s", out)) // the extra grep and awk clean up the output of `ip` to only list the number and name of // interfaces, allowing for different versions of ip (e.g. inside and outside the container) to // be used while still verifying that the interface list is the exact same cmd := exec.Command("sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") stdout := bytes.NewBuffer(nil) cmd.Stdout = stdout if err := cmd.Run(); err != nil { c.Fatal("Failed to get host network interface") } out, err = s.d.Cmd("run", "--rm", "--net=host", "busybox", "sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(out, check.Equals, fmt.Sprintf("%s", stdout), check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out)) } func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { if err := s.d.StartWithBusybox(); err != nil { t.Fatal(err) } if out, err := s.d.Cmd("run", "-ti", "-d", "--name", "test", "busybox"); err != nil { t.Fatal(out, err) } if err := s.d.Restart(); err != nil { t.Fatal(err) } // Container 'test' should be removed without error if out, err := s.d.Cmd("rm", "test"); err != nil { t.Fatal(out, err) } } func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top") if err != nil { c.Fatal(out, err) } // Get sandbox key via inspect out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.SandboxKey}}'", "netns") if err != nil { c.Fatalf("Error inspecting container: %s, %v", out, err) } fileName := strings.Trim(out, " \r\n'") if out, err := s.d.Cmd("stop", "netns"); err != nil { c.Fatal(out, err) } // Test if the file still exists out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) out = strings.TrimSpace(out) c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(out, check.Equals, fileName, check.Commentf("Output: %s", out)) // Remove the container and restart the daemon if out, err := s.d.Cmd("rm", "netns"); err != nil { c.Fatal(out, err) } if err := s.d.Restart(); err != nil { c.Fatal(err) } // Test again and see now the netns file does not exist out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) out = strings.TrimSpace(out) c.Assert(err, check.Not(check.IsNil), check.Commentf("Output: %s", out)) } // tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored func (s *DockerDaemonSuite) TestDaemonNoTlsCliTlsVerifyWithEnv(c *check.C) { host := "tcp://localhost:4271" c.Assert(s.d.Start("-H", host), check.IsNil) cmd := exec.Command(dockerBinary, "-H", host, "info") cmd.Env = []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"} out, _, err := runCommandWithOutput(cmd) c.Assert(err, check.Not(check.IsNil), check.Commentf("%s", out)) c.Assert(strings.Contains(out, "error occurred trying to connect"), check.Equals, true) } func setupV6() error { // Hack to get the right IPv6 address on docker0, which has already been created err := exec.Command("ip", "addr", "add", "fe80::1/64", "dev", "docker0").Run() if err != nil { return err } return nil } func teardownV6() error { err := exec.Command("ip", "addr", "del", "fe80::1/64", "dev", "docker0").Run() if err != nil { return err } return nil } func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *check.C) { c.Assert(s.d.StartWithBusybox(), check.IsNil) out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") c.Assert(err, check.IsNil) id := strings.TrimSpace(out) _, err = s.d.Cmd("stop", id) c.Assert(err, check.IsNil) _, err = s.d.Cmd("wait", id) c.Assert(err, check.IsNil) out, err = s.d.Cmd("ps", "-q") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "") c.Assert(s.d.Restart(), check.IsNil) out, err = s.d.Cmd("ps", "-q") c.Assert(err, check.IsNil) c.Assert(strings.TrimSpace(out), check.Equals, id[:12]) } func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) { if err := s.d.StartWithBusybox("--log-driver=json-file", "--log-opt=max-size=1k"); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "--name=logtest", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s, err: %v", out, err)) out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Config }}", "logtest") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) cfg := strings.TrimSpace(out) if cfg != "map[max-size:1k]" { c.Fatalf("Unexpected log-opt: %s, expected map[max-size:1k]", cfg) } } func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil { c.Fatal(err, out) } if out, err := s.d.Cmd("pause", "test"); err != nil { c.Fatal(err, out) } if err := s.d.Restart(); err != nil { c.Fatal(err) } errchan := make(chan error) go func() { out, err := s.d.Cmd("start", "test") if err != nil { errchan <- fmt.Errorf("%v:\n%s", err, out) } name := strings.TrimSpace(out) if name != "test" { errchan <- fmt.Errorf("Paused container start error on docker daemon restart, expected 'test' but got '%s'", name) } close(errchan) }() select { case <-time.After(5 * time.Second): c.Fatal("Waiting on start a container timed out") case err := <-errchan: if err != nil { c.Fatal(err) } } } func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) { c.Assert(s.d.StartWithBusybox(), check.IsNil) out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox") c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(s.d.Restart(), check.IsNil) out, err = s.d.Cmd("volume", "rm", "test") c.Assert(err, check.Not(check.IsNil), check.Commentf("should not be able to remove in use volume after daemon restart")) c.Assert(strings.Contains(out, "in use"), check.Equals, true) } func (s *DockerDaemonSuite) TestDaemonRestartLocalVolumes(c *check.C) { c.Assert(s.d.Start(), check.IsNil) _, err := s.d.Cmd("volume", "create", "--name", "test") c.Assert(err, check.IsNil) c.Assert(s.d.Restart(), check.IsNil) _, err = s.d.Cmd("volume", "inspect", "test") c.Assert(err, check.IsNil) } func (s *DockerDaemonSuite) TestDaemonCorruptedLogDriverAddress(c *check.C) { for _, driver := range []string{ "syslog", "gelf", } { args := []string{"--log-driver=" + driver, "--log-opt", driver + "-address=corrupted:42"} c.Assert(s.d.Start(args...), check.NotNil, check.Commentf(fmt.Sprintf("Expected daemon not to start with invalid %s-address provided", driver))) expected := fmt.Sprintf("Failed to set log opts: %s-address should be in form proto://address", driver) runCmd := exec.Command("grep", expected, s.d.LogfileName()) if out, _, err := runCommandWithOutput(runCmd); err != nil { c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) } } } func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { c.Assert(s.d.Start("--log-driver=fluentd", "--log-opt", "fluentd-address=corrupted:c"), check.NotNil) expected := "Failed to set log opts: invalid fluentd-address corrupted:c: " runCmd := exec.Command("grep", expected, s.d.LogfileName()) if out, _, err := runCommandWithOutput(runCmd); err != nil { c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) } } func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { s.d.useDefaultHost = true defer func() { s.d.useDefaultHost = false }() c.Assert(s.d.Start(), check.IsNil) }
[ "\"HOME\"", "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
userbot/__init__.py
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.c (the "License"); # you may not use this file except in compliance with the License. # """ Userbot initialization. """ import os from sys import version_info from logging import basicConfig, getLogger, INFO, DEBUG from distutils.util import strtobool as sb import pylast from pySmartDL import SmartDL from dotenv import load_dotenv from requests import get from telethon import TelegramClient from telethon.sessions import StringSession load_dotenv("config.env") # Bot Logs setup: CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False")) if CONSOLE_LOGGER_VERBOSE: basicConfig( format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=DEBUG, ) else: basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=INFO) LOGS = getLogger(__name__) if version_info[0] < 3 or version_info[1] < 6: LOGS.error("You MUST have a python version of at least 3.6." "Multiple features depend on this. Bot quitting.") quit(1) # Check if the config was edited by using the already used variable. # Basically, its the 'virginity check' for the config file ;) CONFIG_CHECK = os.environ.get( " ", None) if CONFIG_CHECK: LOGS.error( "Please remove the line mentioned in the first hashtag from the config.env file" ) quit(1) # Telegram App KEY and HASH API_KEY = os.environ.get("API_KEY", "761806" ) API_HASH = os.environ.get("API_HASH", "73d98a25a55fb763e05d82ded30335e4") # Userbot Session String STRING_SESSION = os.environ.get("STRING_SESSION", None) # Logging channel/group configuration. BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", "0")) BOTLOG = sb(os.environ.get("BOTLOG", "False")) # Bleep Blop, this is a bot ;) PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False")) # Console verbose logging CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False")) # SQL Database URI DB_URI = os.environ.get("DATABASE_URL", None) # OCR API key OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None) # remove.bg API key REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None) # Chrome Driver and Headless Google Chrome Binaries CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None) GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None) # OpenWeatherMap API Key OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None) # Anti Spambot Config ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False")) ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False")) # Youtube API key YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None) # Default .alive name ALIVE_NAME = os.environ.get("ALIVE_NAME", None) # Time & Date - Country and Time Zone COUNTRY = str(os.environ.get("COUNTRY", "")) TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1)) # Clean Welcome CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True")) # Last.fm Module BIO_PREFIX = os.environ.get("BIO_PREFIX", None) DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None) LASTFM_API = os.environ.get("LASTFM_API", None) LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None) LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None) LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None) LASTFM_PASS = pylast.md5(LASTFM_PASSWORD_PLAIN) if not LASTFM_USERNAME == "None": lastfm = pylast.LastFMNetwork(api_key=LASTFM_API, api_secret=LASTFM_SECRET, username=LASTFM_USERNAME, password_hash=LASTFM_PASS) else: lastfm = None # Google Drive Module G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None) G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None) G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None) GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None) TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY", "./downloads") # Setting Up CloudMail.ru and MEGA.nz extractor binaries, # and giving them correct perms to work properly. if not os.path.exists('bin'): os.mkdir('bin') binaries = { "https://raw.githubusercontent.com/yshalsager/megadown/master/megadown": "bin/megadown", "https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py": "bin/cmrudl" } for binary, path in binaries.items(): downloader = SmartDL(binary, path, progress_bar=False) downloader.start() os.chmod(path, 0o755) # 'bot' variable if STRING_SESSION: # pylint: disable=invalid-name bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH) else: # pylint: disable=invalid-name bot = TelegramClient("userbot", API_KEY, API_HASH) # Global Variables COUNT_MSG = 0 USERS = {} COUNT_PM = {} LASTMSG = {} ENABLE_KILLME = True CMD_HELP = {} ISAFK = False AFKREASON = None
[]
[]
[ "GOOGLE_CHROME_BIN", "G_DRIVE_CLIENT_SECRET", "COUNTRY", "LASTFM_API", "ANTI_SPAMBOT_SHOUT", "OCR_SPACE_API_KEY", "BIO_PREFIX", "TZ_NUMBER", "LASTFM_PASSWORD", " ", "DATABASE_URL", "GDRIVE_FOLDER_ID", "CHROME_DRIVER", "YOUTUBE_API_KEY", "LASTFM_USERNAME", "G_DRIVE_CLIENT_ID", "API_KEY", "PM_AUTO_BAN", "DEFAULT_BIO", "ANTI_SPAMBOT", "OPEN_WEATHER_MAP_APPID", "LASTFM_SECRET", "G_DRIVE_AUTH_TOKEN_DATA", "STRING_SESSION", "CONSOLE_LOGGER_VERBOSE", "ALIVE_NAME", "BOTLOG_CHATID", "TMP_DOWNLOAD_DIRECTORY", "CLEAN_WELCOME", "REM_BG_API_KEY", "BOTLOG", "API_HASH" ]
[]
["GOOGLE_CHROME_BIN", "G_DRIVE_CLIENT_SECRET", "COUNTRY", "LASTFM_API", "ANTI_SPAMBOT_SHOUT", "OCR_SPACE_API_KEY", "BIO_PREFIX", "TZ_NUMBER", "LASTFM_PASSWORD", " ", "DATABASE_URL", "GDRIVE_FOLDER_ID", "CHROME_DRIVER", "YOUTUBE_API_KEY", "LASTFM_USERNAME", "G_DRIVE_CLIENT_ID", "API_KEY", "PM_AUTO_BAN", "DEFAULT_BIO", "ANTI_SPAMBOT", "OPEN_WEATHER_MAP_APPID", "LASTFM_SECRET", "G_DRIVE_AUTH_TOKEN_DATA", "STRING_SESSION", "CONSOLE_LOGGER_VERBOSE", "ALIVE_NAME", "BOTLOG_CHATID", "TMP_DOWNLOAD_DIRECTORY", "CLEAN_WELCOME", "REM_BG_API_KEY", "BOTLOG", "API_HASH"]
python
32
0
server.go
package main import ( "bytes" "database/sql" "encoding/json" _ "github.com/jackc/pgx/stdlib" "go.stockscraper/reddit" "html/template" "log" "net/http" "os" "strconv" "time" ) var subreddits = []string{"wallstreetbets", "pennystocks", "CanadianInvestor", "StockMarket", "Daytrading", "stocks"} var sorts = []string{"hot", "new", "top"} const insertSQL = "INSERT INTO requests(date, ip, requested_limit) VALUES($1, $2, $3)" var DB *sql.DB var connectedToDatabase = true func main(){ db, err := sql.Open("pgx", os.Getenv("DATABASE_URL")) if err != nil { log.Print("Database not connected") connectedToDatabase = false } else { if err = db.Ping(); err != nil { log.Print("Lost connection to database") connectedToDatabase = false } else { DB = db } } http.HandleFunc("/", serveIndex) http.HandleFunc("/json", getJson) log.Println("Running...") if err := http.ListenAndServe(":"+os.Getenv("PORT"), nil); err != nil { log.Fatal(err) } } func getJson (w http.ResponseWriter, r *http.Request){ var subArray []reddit.SubReddits limit, err := strconv.Atoi(r.URL.Query().Get("limit")) if err != nil { limit = 10 } subChannel := make(chan reddit.SubReddits) defer close(subChannel) for _, subreddit := range subreddits { for _, sort := range sorts { go func(r, s string, l int) { subChannel <- reddit.GetTickers(r, s, l) }(subreddit, sort, limit) } } for i:= 0; i < len(subreddits) * len(sorts); i++ { subArray = append(subArray, <-subChannel) } subArray = append(subArray, reddit.SumTotal(subArray)) buf := &bytes.Buffer{} enc := json.NewEncoder(buf) enc.SetEscapeHTML(true) if err = enc.Encode(subArray); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json; charset=utf-8") w.WriteHeader(200) _, err = w.Write(buf.Bytes()) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } } func serveIndex (w http.ResponseWriter, r *http.Request){ limit, err := strconv.Atoi(r.URL.Query().Get("limit")) if err != nil { limit = 10 } if connectedToDatabase { ip := getIP(r) go func(limit int, ip string) { t := time.Now() query, err := DB.Prepare(insertSQL) defer query.Close() if err == nil { _, err = query.Exec(t.Format("2006-01-02"), ip, limit) } }(limit, ip) } tmpl := template.Must(template.ParseFiles("templates/index.html")) err = tmpl.Execute(w, limit) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } func getIP(r *http.Request) string { forwarded := r.Header.Get("X-FORWARDED-FOR") if forwarded != "" { return forwarded } return r.RemoteAddr }
[ "\"DATABASE_URL\"", "\"PORT\"" ]
[]
[ "PORT", "DATABASE_URL" ]
[]
["PORT", "DATABASE_URL"]
go
2
0
Stage 4-4/banking.py
import random import sqlite3 conn = sqlite3.connect('card.s3db') cur = conn.cursor() cur.execute( "CREATE TABLE IF NOT EXISTS card (id INTEGER PRIMARY KEY, number TEXT UNIQUE, pin TEXT, balance INTEGER DEFAULT 0);") conn.commit() def main(): while 1: n = int(input("1. create an account\n" + "2. Log into account\n" + "0. Exit\n")) if n == 1: createaccount() elif n == 2: r = logintoaccount() if r == 2: break elif n == 0: print("Bye!") break def luhh(cardnumber): l = list(cardnumber) s = int(l[-1]) for i in range(0, 15): if i % 2 == 0: if int(l[i]) * 2 > 9: s = s + (int(l[i]) * 2) - 9 else: s += int(l[i]) * 2 else: s += int(l[i]) if s % 10 == 0: return True else: return False def createaccount(): while True: inputpin = random.randint(1000, 10000) y = random.randint(10 ** 9, 10 ** 10) cardnumber = str(400000) + str(y) if luhh(cardnumber): break print("Your card has been created") print("Your card number") print(cardnumber) print("Your card pin") print(inputpin) cur.execute("INSERT INTO card(number, pin, balance) VALUES (?,?,?);", (cardnumber, inputpin, 0)) conn.commit() def logintoaccount(): x1 = 1 while x1 != 0: cardnumber = input("Enter your card number:") pin = input("Enter your PIN") if len(cardnumber) == 16: old_pin = cur.execute(f"SELECT pin FROM card WHERE number = {cardnumber} And pin = {pin};").fetchone() if type(old_pin) == type(None): print("Wrong card number or PIN!") return 0 if len(old_pin) == 1 and old_pin[0] == pin: print("You successfully logged in!") while 1: print("1. Balance\n2. Add income\n3. Do transfer\n4. Close account\n5. Log out\n0. Exit\n") n1 = int(input()) if n1 == 1: balance = cur.execute(f"SELECT balance From card WHERE number = {cardnumber};").fetchone() print("Balance: ", balance[0]) elif n1 == 2: print("Enter income:") income = int(input()) balance = cur.execute(f"SELECT balance From card WHERE number = {cardnumber};").fetchone() cur.execute(f"UPDATE card SET balance = {income + balance[0]} WHERE number = {cardnumber}") conn.commit() print("Income was added!") elif n1 == 3: print("Transfer\nEnter card number:") cardnumber_to_transfer = input("Enter your card number:") cardnumber1 = cur.execute( f"SELECT number FROM card WHERE number = {cardnumber_to_transfer};").fetchone() if not luhh(cardnumber_to_transfer): print("Probably you made a mistake in the card number. Please try again!") elif type(cardnumber1) == type(None): print("Such a card does not exist.") elif cardnumber == cardnumber_to_transfer: print("You can't transfer money to the same account!") else: print("Enter how much money you want to transfer:") amount_to_transfer = int(input()) acc_balance = cur.execute( f"SELECT balance From card WHERE number = {cardnumber};").fetchone() if acc_balance[0] < amount_to_transfer: print("Not enough money!") else: acc_balance3 = cur.execute( f"SELECT balance From card WHERE number = {cardnumber_to_transfer};").fetchone() cur.execute( f"UPDATE card SET balance = {acc_balance[0] - amount_to_transfer} WHERE number = {cardnumber}") cur.execute( f"UPDATE card SET balance = {amount_to_transfer + acc_balance3[0]} WHERE number = {cardnumber_to_transfer}") conn.commit() print("Success!") elif n1 == 4: cur.execute(f"DELETE FROM card WHERE number = {cardnumber};") conn.commit() print("The account has been closed!") return 0 elif n1 == 5: print("You have successfully logged out!") return 0 elif n1 == 0: print("Bye!") return 2 else: print("Wrong card number or PIN!") return 0 else: print("Wrong card number or PIN!") return 0 main()
[]
[]
[]
[]
[]
python
null
null
null
docs/conf.py
# -*- coding: utf-8 -*- # # lemur documentation build configuration file, created by # sphinx-quickstart on Sat Jun 7 18:43:48 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os from unittest.mock import MagicMock # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # Mock packages that cannot be installed on rtd on_rtd = os.environ.get('READTHEDOCS') == 'True' if on_rtd: class Mock(MagicMock): @classmethod def __getattr__(cls, name): return MagicMock() MOCK_MODULES = ['ldap'] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinxcontrib.autohttp.flask', 'sphinx.ext.todo', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'lemur' copyright = u'2015, Netflix Inc.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # base_dir = os.path.join(os.path.dirname(__file__), os.pardir) about = {} with open(os.path.join(base_dir, "lemur", "__about__.py")) as f: exec(f.read(), about) version = release = about["__version__"] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'lemurdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'lemur.tex', u'Lemur Documentation', u'Kevin Glisson', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'Lemur', u'Lemur Documentation', [u'Kevin Glisson'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Lemur', u'Lemur Documentation', u'Kevin Glisson', 'Lemur', 'SSL Certificate Management', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
[]
[]
[ "READTHEDOCS" ]
[]
["READTHEDOCS"]
python
1
0
inference_demo.py
import os import time import socket from mmdet.apis import init_detector, inference_detector, show_result_pyplot, show_result_ins import mmcv # map # config_file = '../configs/solo/decoupled_solo_r50_fpn_8gpu_3x.py' # # download the checkpoint from model zoo and put it in `checkpoints/` # checkpoint_file = '../checkpoints/DECOUPLED_SOLO_R50_3x.pth' # config_file = '../configs/solo/solo_r50_fpn_8gpu_1x.py' # checkpoint_file = '../checkpoints/SOLO_R50_1x.pth' # # config_file = '../configs/solo/solo_r50_fpn_8gpu_3x.py' # checkpoint_file = '../checkpoints/SOLO_R50_3x.pth' ## AP # # config_file = './configs/solo/solo_r101_fpn_8gpu_3x.py' # checkpoint_file = './checkpoints/SOLO_R101_3x.pth' # config_file = '../configs/solo/decoupled_solo_r101_fpn_8gpu_3x.py' # checkpoint_file = '../checkpoints/DECOUPLED_SOLO_R101_3x.pth' # config_file = './configs/solov2/solov2_r101_fpn_8gpu_3x.py' # checkpoint_file = './checkpoints/SOLOv2_R101_3x.pth' # config_file = './configs/solov2/solov2_r101_dcn_fpn_8gpu_3x.py' # checkpoint_file = './checkpoints/SOLOv2_R101_DCN_3x.pth' # config_file = './configs/solov2/solov2_x101_dcn_fpn_8gpu_3x.py' # checkpoint_file = './checkpoints/SOLOv2_X101_DCN_3x.pth' ## speed # config_file = '../configs/solo/decoupled_solo_light_dcn_r50_fpn_8gpu_3x.py' # checkpoint_file = '../checkpoints/DECOUPLED_SOLO_LIGHT_DCN_R50_3x.pth' # config_file = './configs/solov2/solov2_light_512_dcn_r50_fpn_8gpu_3x.py' # checkpoint_file = './checkpoints/SOLOv2_LIGHT_512_DCN_R50_3x.pth' config_file = 'configs/solov2/solov2_light_448_r18_fpn_8gpu_3x.py' checkpoint_file = './work_dir/0602/ps-X10DRG/solov2_light_448_r18_fpn_8gpu_3x/epoch_36.pth' print(config_file) # build the model from a config file and a checkpoint file cuda_n = 0 print('gpu:', cuda_n) os.environ['CUDA_VISIBLE_DEVICES'] = f'{cuda_n}' model = init_detector(config_file, checkpoint_file, device=f'cuda') # # # test a single image # # # for video_name in ['1', '2', '3']: score_thr = 0.25 # for video_name in ['coco_72']: # for video_name in ['Yotube-vos-3rd']: # for video_name in ['transformed']: save_dir = f'result/{socket.gethostname()}0530/' # for video_name in ['cityscape_100', 'GTA5_99']: for video_name in ['coco_72']: # for video_name in ['Yotube-vos-3rd_rotate180']: data_dir = f'data/{video_name}/' out_img_dir = f"{save_dir}{config_file.split('/')[-1].split('.')[0]}/{video_name}_score_thr_{score_thr}/" if not os.path.exists(out_img_dir): os.makedirs(out_img_dir) print('save', save_dir, os.path.abspath(save_dir), out_img_dir) n = len(os.listdir(data_dir)) start = time.time() # for i in range(1, 141): for img in os.listdir(data_dir): # img = f'{i}.jpg' result = inference_detector(model, f'{data_dir}{img}') show_result_ins(f'{data_dir}{img}', result, model.CLASSES, score_thr=score_thr, out_file=f"./{out_img_dir}{img}") # print('save', os.path.abspath(f"../{out_img_dir}{img}")) end = time.time() # print() # for img in os.listdir(directory): # # print(f'{directory}{img}') # # result = inference_detector(model, f'{directory}{img}') # # show_result_ins(f'{directory}{img}', result, model.CLASSES, score_thr=0.25, out_file=f"../data/out/{img}") # break print('fps:', n/(end - start), 'n:', n)
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
app.py
import hmac import hashlib import os from flask import Flask, jsonify, request, abort from datadog import initialize, statsd # get keys from environment variables GIT_SHA = os.environ.get("GIT_SHA") SEGMENT_SHARED_SECRET = os.environ.get("SEGMENT_SHARED_SECRET", "") SIGNATURE_DISABLED = os.environ.get("SIGNATURE_DISABLED", True) # initialize datadog options = {"statsd_host": os.environ.get("DATADOG_STATSD_HOST", "127.0.0.1")} initialize(**options) app = Flask(__name__) ALLOWED_EVENTS = ["track"] def emit(source, event, event_type): """Emits metric to datadog. Returns nothing.""" if event_type in ALLOWED_EVENTS: statsd.increment( "segment.event", tags=[ "source:" + source, "event:" + "-".join(event.split()), "type:" + event_type, ], ) def check_signature(signature, data): """Verifies signature (ensures matched shared secrets). Returns Bool.""" if SIGNATURE_DISABLED: return True # check signature try: digest = hmac.new( SEGMENT_SHARED_SECRET.encode(), msg=data, digestmod=hashlib.sha1 ).hexdigest() if digest == signature: return True else: print(f"Invalid signature. Expected {digest} but got {signature}") except KeyError: pass return False @app.route("/") def index(): """Returns healthcheck.""" print(f"Received request on /. {GIT_SHA}") return f"Segment2Datadog is up and running! {GIT_SHA}" @app.route("/api/<string:source>", methods=["POST"]) def segment2datadog(source): """Main function. Accepts JSON payload on POST only.""" print(f"Received request on /api/{source}") signature = request.headers.get("x-signature", "") if not check_signature(signature=signature, data=request.data): abort(403, "Signature not valid.") content = request.get_json() event_type = content["type"] if event_type not in ALLOWED_EVENTS: return jsonify({"source": source, "data": content}) event = content["event"] emit(source=source, event=event, event_type=event_type) return jsonify({"source": source, "data": content})
[]
[]
[ "GIT_SHA", "SEGMENT_SHARED_SECRET", "DATADOG_STATSD_HOST", "SIGNATURE_DISABLED" ]
[]
["GIT_SHA", "SEGMENT_SHARED_SECRET", "DATADOG_STATSD_HOST", "SIGNATURE_DISABLED"]
python
4
0
thefuck/shells/bash.py
import os from tempfile import gettempdir from uuid import uuid4 from ..conf import settings from ..const import ARGUMENT_PLACEHOLDER, USER_COMMAND_MARK from ..utils import memoize, parse_alias from .generic import Generic class Bash(Generic): def app_alias(self, alias_name): # It is VERY important to have the variables declared WITHIN the function return ''' function {name} () {{ TF_PYTHONIOENCODING=$PYTHONIOENCODING; export TF_SHELL=bash; export TF_ALIAS={name}; export TF_SHELL_ALIASES=$(alias); export TF_HISTORY=$(fc -ln -10); export PYTHONIOENCODING=utf-8; TF_CMD=$( thefuck {argument_placeholder} $@ ) && eval $TF_CMD; unset TF_HISTORY; export PYTHONIOENCODING=$TF_PYTHONIOENCODING; {alter_history} }} '''.format( name=alias_name, argument_placeholder=ARGUMENT_PLACEHOLDER, alter_history=('history -s $TF_CMD;' if settings.alter_history else '')) def instant_mode_alias(self, alias_name): if os.environ.get('THEFUCK_INSTANT_MODE', '').lower() == 'true': mark = USER_COMMAND_MARK + '\b' * len(USER_COMMAND_MARK) return ''' export PS1="{user_command_mark}$PS1"; {app_alias} '''.format(user_command_mark=mark, app_alias=self.app_alias(alias_name)) else: log_path = os.path.join( gettempdir(), 'thefuck-script-log-{}'.format(uuid4().hex)) return ''' export THEFUCK_INSTANT_MODE=True; export THEFUCK_OUTPUT_LOG={log}; thefuck --shell-logger {log}; rm {log}; exit '''.format(log=log_path) def _parse_alias(self, alias): return parse_alias(alias) @memoize def get_aliases(self): raw_aliases = os.environ.get('TF_SHELL_ALIASES', '').split('\n') return dict(self._parse_alias(alias) for alias in raw_aliases if alias and '=' in alias) def _get_history_file_name(self): return os.environ.get("HISTFILE", os.path.expanduser('~/.bash_history')) def _get_history_line(self, command_script): return u'{}\n'.format(command_script) def how_to_configure(self): if os.path.join(os.path.expanduser('~'), '.bashrc'): config = '~/.bashrc' elif os.path.join(os.path.expanduser('~'), '.bash_profile'): config = '~/.bash_profile' else: config = 'bash config' return self._create_shell_configuration( content=u'eval $(thefuck --alias)', path=config, reload=u'source {}'.format(config))
[]
[]
[ "HISTFILE", "TF_SHELL_ALIASES", "THEFUCK_INSTANT_MODE" ]
[]
["HISTFILE", "TF_SHELL_ALIASES", "THEFUCK_INSTANT_MODE"]
python
3
0
autotrade/asgi.py
""" ASGI config for autotrade project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'autotrade.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
internal/conf/computed.go
package conf import ( "context" "log" "os" "strconv" "strings" "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/conf/confdefaults" "github.com/sourcegraph/sourcegraph/internal/conf/conftypes" "github.com/sourcegraph/sourcegraph/schema" ) func init() { deployType := DeployType() if !IsValidDeployType(deployType) { log.Fatalf("The 'DEPLOY_TYPE' environment variable is invalid. Expected one of: %q, %q, %q, %q, %q. Got: %q", DeployKubernetes, DeployDockerCompose, DeployPureDocker, DeploySingleDocker, DeployDev, deployType) } confdefaults.Default = defaultConfigForDeployment() } func defaultConfigForDeployment() conftypes.RawUnified { deployType := DeployType() switch { case IsDev(deployType): return confdefaults.DevAndTesting case IsDeployTypeSingleDockerContainer(deployType): return confdefaults.DockerContainer case IsDeployTypeKubernetes(deployType), IsDeployTypeDockerCompose(deployType), IsDeployTypePureDocker(deployType): return confdefaults.KubernetesOrDockerComposeOrPureDocker default: panic("deploy type did not register default configuration") } } func AWSCodeCommitConfigs(ctx context.Context) ([]*schema.AWSCodeCommitConnection, error) { var config []*schema.AWSCodeCommitConnection if err := api.InternalClient.ExternalServiceConfigs(ctx, "AWSCODECOMMIT", &config); err != nil { return nil, err } return config, nil } func BitbucketServerConfigs(ctx context.Context) ([]*schema.BitbucketServerConnection, error) { var config []*schema.BitbucketServerConnection if err := api.InternalClient.ExternalServiceConfigs(ctx, "BITBUCKETSERVER", &config); err != nil { return nil, err } return config, nil } func GitHubConfigs(ctx context.Context) ([]*schema.GitHubConnection, error) { var config []*schema.GitHubConnection if err := api.InternalClient.ExternalServiceConfigs(ctx, "GITHUB", &config); err != nil { return nil, err } return config, nil } func GitLabConfigs(ctx context.Context) ([]*schema.GitLabConnection, error) { var config []*schema.GitLabConnection if err := api.InternalClient.ExternalServiceConfigs(ctx, "GITLAB", &config); err != nil { return nil, err } return config, nil } func GitoliteConfigs(ctx context.Context) ([]*schema.GitoliteConnection, error) { var config []*schema.GitoliteConnection if err := api.InternalClient.ExternalServiceConfigs(ctx, "GITOLITE", &config); err != nil { return nil, err } return config, nil } func PhabricatorConfigs(ctx context.Context) ([]*schema.PhabricatorConnection, error) { var config []*schema.PhabricatorConnection if err := api.InternalClient.ExternalServiceConfigs(ctx, "PHABRICATOR", &config); err != nil { return nil, err } return config, nil } type AccessTokAllow string const ( AccessTokensNone AccessTokAllow = "none" AccessTokensAll AccessTokAllow = "all-users-create" AccessTokensAdmin AccessTokAllow = "site-admin-create" ) // AccessTokensAllow returns whether access tokens are enabled, disabled, or restricted to creation by admin users. func AccessTokensAllow() AccessTokAllow { cfg := Get().AuthAccessTokens if cfg == nil { return AccessTokensAll } switch cfg.Allow { case "": return AccessTokensAll case string(AccessTokensAll): return AccessTokensAll case string(AccessTokensNone): return AccessTokensNone case string(AccessTokensAdmin): return AccessTokensAdmin default: return AccessTokensNone } } // EmailVerificationRequired returns whether users must verify an email address before they // can perform most actions on this site. // // It's false for sites that do not have an email sending API key set up. func EmailVerificationRequired() bool { return Get().EmailSmtp != nil } // CanSendEmail returns whether the site can send emails (e.g., to reset a password or // invite a user to an org). // // It's false for sites that do not have an email sending API key set up. func CanSendEmail() bool { return Get().EmailSmtp != nil } // Deploy type constants. Any changes here should be reflected in the DeployType type declared in web/src/globals.d.ts: // https://sourcegraph.com/search?q=r:github.com/sourcegraph/sourcegraph%24+%22type+DeployType%22 const ( DeployKubernetes = "kubernetes" DeploySingleDocker = "docker-container" DeployDockerCompose = "docker-compose" DeployPureDocker = "pure-docker" DeployDev = "dev" ) // DeployType tells the deployment type. func DeployType() string { if e := os.Getenv("DEPLOY_TYPE"); e != "" { return e } // Default to Kubernetes cluster so that every Kubernetes c // cluster deployment doesn't need to be configured with DEPLOY_TYPE. return DeployKubernetes } // IsDeployTypeKubernetes tells if the given deployment type is a Kubernetes // cluster (and non-dev, not docker-compose, not pure-docker, and non-single Docker image). func IsDeployTypeKubernetes(deployType string) bool { switch deployType { // includes older Kubernetes aliases for backwards compatibility case "k8s", "cluster", DeployKubernetes: return true } return false } // IsDeployTypeDockerCompose tells if the given deployment type is the Docker Compose // deployment (and non-dev, not pure-docker, non-cluster, and non-single Docker image). func IsDeployTypeDockerCompose(deployType string) bool { return deployType == DeployDockerCompose } // IsDeployTypePureDocker tells if the given deployment type is the pure Docker // deployment (and non-dev, not docker-compose, non-cluster, and non-single Docker image). func IsDeployTypePureDocker(deployType string) bool { return deployType == DeployPureDocker } // IsDeployTypeSingleDockerContainer tells if the given deployment type is Docker sourcegraph/server // single-container (non-Kubernetes, not docker-compose, not pure-docker, non-cluster, non-dev). func IsDeployTypeSingleDockerContainer(deployType string) bool { return deployType == DeploySingleDocker } // IsDev tells if the given deployment type is "dev". func IsDev(deployType string) bool { return deployType == DeployDev } // IsValidDeployType returns true iff the given deployType is a Kubernetes deployment, a Docker Compose // deployment, a pure Docker deployment, a Docker deployment, or a local development environment. func IsValidDeployType(deployType string) bool { return IsDeployTypeKubernetes(deployType) || IsDeployTypeDockerCompose(deployType) || IsDeployTypePureDocker(deployType) || IsDeployTypeSingleDockerContainer(deployType) || IsDev(deployType) } // UpdateChannel tells the update channel. Default is "release". func UpdateChannel() string { channel := Get().UpdateChannel if channel == "" { return "release" } return channel } // SearchIndexEnabled returns true if sourcegraph should index all // repositories for text search. If the configuration is unset, it returns // false for the docker server image (due to resource usage) but true // elsewhere. Additionally it also checks for the outdated environment // variable INDEXED_SEARCH. func SearchIndexEnabled() bool { if v := Get().SearchIndexEnabled; v != nil { return *v } if v := os.Getenv("INDEXED_SEARCH"); v != "" { enabled, _ := strconv.ParseBool(v) return enabled } return DeployType() != DeploySingleDocker } func SymbolIndexEnabled() bool { enabled := SearchIndexEnabled() if v := Get().SearchIndexSymbolsEnabled; v != nil { enabled = enabled && *v } return enabled } func CampaignsReadAccessEnabled() bool { if v := Get().CampaignsReadAccessEnabled; v != nil { return *v } // DEPRECATED property name. if v := Get().AutomationReadAccessEnabled; v != nil { return *v } return false } func ExternalURL() string { return Get().ExternalURL } func UsingExternalURL() bool { url := Get().ExternalURL return !(url == "" || strings.HasPrefix(url, "http://localhost") || strings.HasPrefix(url, "https://localhost") || strings.HasPrefix(url, "http://127.0.0.1") || strings.HasPrefix(url, "https://127.0.0.1")) // CI:LOCALHOST_OK } func IsExternalURLSecure() bool { return strings.HasPrefix(Get().ExternalURL, "https:") } func IsBuiltinSignupAllowed() bool { provs := Get().AuthProviders for _, prov := range provs { if prov.Builtin != nil { return prov.Builtin.AllowSignup } } return false } func Branding() *schema.Branding { branding := Get().Branding if branding != nil && branding.BrandName == "" { bcopy := *branding bcopy.BrandName = "Sourcegraph" branding = &bcopy } return branding } func BrandName() string { branding := Branding() if branding == nil || branding.BrandName == "" { return "Sourcegraph" } return branding.BrandName } // SearchSymbolsParallelism returns 20, or the site config // "debug.search.symbolsParallelism" value if configured. func SearchSymbolsParallelism() int { val := Get().DebugSearchSymbolsParallelism if val == 0 { return 20 } return val } func PermissionsBackgroundSyncEnabled() bool { val := Get().PermissionsBackgroundSync if val == nil { return false } return val.Enabled } func BitbucketServerPluginPerm() bool { val := Get().ExperimentalFeatures.BitbucketServerFastPerm return val == "enabled" } func EventLoggingEnabled() bool { val := Get().ExperimentalFeatures.EventLogging if val == "" { return true } return val == "enabled" } func StructuralSearchEnabled() bool { val := Get().ExperimentalFeatures.StructuralSearch if val == "" { return true } return val == "enabled" } func AndOrQueryEnabled() bool { e := Get().ExperimentalFeatures if e == nil || e.AndOrQuery == "" { return false } return e.AndOrQuery == "enabled" } func SearchMultipleRevisionsPerRepository() bool { x := ExperimentalFeatures() return x.SearchMultipleRevisionsPerRepository != nil && *x.SearchMultipleRevisionsPerRepository } func ExperimentalFeatures() schema.ExperimentalFeatures { val := Get().ExperimentalFeatures if val == nil { return schema.ExperimentalFeatures{} } return *val } // AuthMinPasswordLength returns the value of minimum password length requirement. // If not set, it returns the default value 12. func AuthMinPasswordLength() int { val := Get().AuthMinPasswordLength if val <= 0 { return 12 } return val }
[ "\"DEPLOY_TYPE\"", "\"INDEXED_SEARCH\"" ]
[]
[ "DEPLOY_TYPE", "INDEXED_SEARCH" ]
[]
["DEPLOY_TYPE", "INDEXED_SEARCH"]
go
2
0
code/rnaseq/star_align_p2.py
#!/usr/bin/env python3 ''' __Task__ 1. run star to generate second pass alignments ''' import os import glob import subprocess import argparse import math def run_star(in_path,out_path,batch,batch_size,array_max,live): output = 'star_align_p2_'+batch+'_%a.out' os.chdir(scripts_dir) open('star_align_p2.flag','w').close() ## to serve as output for snakemake shell_args = ' '.join([in_path,out_path,str(batch_size),batch]) cmd = ' '.join(['sbatch','--output='+output,'--array=1-'+str(array_max),'star_align_p2.sh',shell_args]) if live == True: subprocess.call(cmd, shell=True) else: print(cmd) ## ## set stable environmental variables ## home = os.environ['HOME'] scripts_dir = home + '/projects/glioma/code/rnaseq/' work_dir = '/scratch/chd5n/glioma/' fq_dir = work_dir + 'fastq/' fqc_dir = work_dir + 'fqc/' mqc_dir = work_dir + 'mqc/' qnt_dir = work_dir + 'quant/' str_dir = work_dir + 'star/' suff = '.fastq.gz' ## ## parse command line arguments ## parser = argparse.ArgumentParser(description='star align 2') parser.add_argument('--batch_size',help='number of samples to be processed in single thread',action='store',dest='batch_size') parser.add_argument('--live',help='execute a live run',action='store_true',dest='run_type') parser.add_argument('--test',help='execute a test run',action='store_false',dest='run_type') parser.set_defaults(run_type=False,batch_size=10) args = parser.parse_args() ## ## prepare and execute ## batches = ['chrom9', 'chrom20'] for batch in batches: in_path = fq_dir+batch+'/' out_path = str_dir+batch+'/p2/' if os.path.exists(str_dir+batch): pass else: os.mkdir(str_dir+batch) if os.path.exists(out_path): pass else: os.mkdir(out_path) fileset = glob.glob(in_path+'*'+suff) array_max = math.ceil(len(fileset)/int(args.batch_size)) run_star(in_path,out_path,batch,int(args.batch_size),array_max,args.run_type)
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
main/main.go
package main import ( "flag" "log" "os" "time" "strings" "strconv" "io/ioutil" ) // Logger default logger var ( Debug = log.New(ioutil.Discard, "[DEBUG] ", log.Ldate|log.Ltime|log.Lshortfile) Logger = log.New(os.Stdout, "[MAIN] ", log.Ldate|log.Ltime|log.Lshortfile) ) var ( t int interval int ts []string rr []string domain string ttl int delete bool get bool value string ) var ( ipv4 string ipv6 string ) func init() { if os.Getenv("DNS_ENV") != "PRODUCTION" { Debug.SetOutput(os.Stdout) } // flag var region, u, p, r, types string flag.StringVar(&region, "e", getenv("REGION", "default"), "region of service.") flag.StringVar(&u, "u", os.Getenv("AKID"), "Aliyun ID") flag.StringVar(&p, "p", os.Getenv("AKSCT"), "Aliyun secret") flag.StringVar(&domain, "d", os.Getenv("DOMAIN"), "root domain name") flag.StringVar(&r, "r", os.Getenv("RR"), "list of sub-domain name. Can split with \",\".") flag.StringVar(&types, "t", getenv("TYPE", "A"), "list of type need to update. Can split with \",\".") flag.IntVar(&interval, "i", getenvInt("INTERVAL", -1), "time interval. -1 means it will only run once.") flag.IntVar(&ttl, "l", getenvInt("TTL", 600), "time") flag.BoolVar(&delete, "x", false, "delete domains from rr") flag.BoolVar(&get, "g", false, "get values") flag.StringVar(&value, "v", "", "set value") flag.Parse() // dealing InitDomain(region, u, p) if (len(r) == 0 || len(domain) == 0) { Logger.Fatalf("Value domain, rr should not be empty\n") os.Exit(1) } rr = strings.Split(r, ",") for i, or := range rr { rr[i] = strings.TrimSpace(or) } ts = strings.Split(types, ",") for i, ot := range ts { ts[i] = strings.TrimSpace(ot) } } func getenvInt(key string, fallback int) int { if value, ok := os.LookupEnv(key); ok { i, err := strconv.Atoi(value) if err == nil { return i } } return fallback } func getenv(key, fallback string) string { if value, ok := os.LookupEnv(key); ok { return value } return fallback } func main() { if delete || get || len(value) != 0{ Logger.Println("Staring to manual task") for _, r := range rr { for _, t := range ts { if delete { DelDomainRecord(domain, r, t) } else if get { resp, _ := GetDomainRecord(domain, r, t) Logger.Println(resp.DomainRecords) } else if len(value) != 0{ AddDomainRecord(domain, r, ttl, t, value) } } } return } Logger.Println("Staring task") for true { Logger.Println("Perform update") routing() if interval == -1 { break } time.Sleep(time.Duration(interval) * time.Second) } } func routing() { Logger.Printf("Start task\n") ipv4 = GetIPv4() Logger.Printf("current ipv4: %s\n", ipv4) ipv6 = GetIPv6() Logger.Printf("current ipv6: %s\n", ipv6) for _, r := range rr { for _, t := range ts { doDomain(r, t) } } t = t + 1 } func doDomain(r, t string) { Debug.Printf("doDomain(%s, %s)", r, t) ip := getIP(t) if len(ip) <= 0 { Logger.Printf("no valid ip address! cant set %s type for %s.\n", t, r + "." + domain) return } Debug.Printf("try to update domain %s for %s type.\n", r + "." + domain, t) resp, err := GetDomainRecord(domain, r, t) if err != nil {return} if (resp.IsSuccess()) { if resp.TotalCount <= 0 { // add record Logger.Printf("found 0 record for %s in default line, try to add record.\n", r + "." + domain) AddDomainRecord(domain, r, ttl, t, ip) } else { Debug.Printf("found existing %s", r + "." + domain) for _, record := range resp.DomainRecords.Record { if record.RR == r && record.DomainName == domain && record.Type == t && record.Line == "default" { if record.Value == ip { Logger.Printf("found same ip record for %s at default line for id %s.\n", r + "." + domain, record.RecordId) break } Debug.Printf("try to update domain %s: %s", r + "." + domain, record.RecordId) UpdateDomainRecord(record.RecordId, r, ttl, t, ip) break } } } } else { Logger.Printf("Request failed for: %s", r + "." + domain) } } func getIP(recordType string) string { switch recordType { case "AAAA": return ipv6 case "A": return ipv4 default: return "" } }
[ "\"DNS_ENV\"", "\"AKID\"", "\"AKSCT\"", "\"DOMAIN\"", "\"RR\"" ]
[]
[ "DOMAIN", "AKSCT", "RR", "DNS_ENV", "AKID" ]
[]
["DOMAIN", "AKSCT", "RR", "DNS_ENV", "AKID"]
go
5
0
src/ray_parameter_server/async_parameter_server.py
import os import random import ray import torch import torch.optim as optim from .abstract_parameter_server import AbstractParameterServer from .data_worker import DataWorker MIN_FREQ = int(os.environ['SUSML_MIN_FREQ']) RAND_SEED = int(os.environ['SUSML_RAND_SEED']) NUM_EPOCHS = int(os.environ['SUSML_NUM_EPOCHS']) BATCH_SIZE = int(os.environ['SUSML_BATCH_SIZE']) LR = float(os.environ['SUSML_LR']) PARALLELISM_LEVEL = int(os.environ['SUSML_PARALLELISM_LEVEL']) random.seed(RAND_SEED) torch.manual_seed(RAND_SEED) torch.backends.cudnn.deterministic = True @ray.remote(num_cpus=1) class AsyncParameterServer(AbstractParameterServer): def __init__(self): super().__init__() self.workers = [DataWorker.remote(i) for i in range(PARALLELISM_LEVEL)] self.model.apply(self.init_weights) self.model.embedding.weight.data[self.PAD_IDX] = torch.zeros(self.EMBEDDING_DIM) self.optimizer = optim.Adam(self.model.parameters(), lr=LR) def train(self): updates = len(self.train_iterators[0]) * len(self.workers) current_weights = self.get_weights() gradients = {} for worker in self.workers: gradients[worker.compute_gradients.remote(current_weights)] = worker batches_processed_by_worker = {worker_id: 0 for worker_id in range(PARALLELISM_LEVEL)} for iteration in range(updates): ready_gradient_list, rest = ray.wait(list(gradients)) if len(ready_gradient_list) == 0: print(f'Wait failed {ready_gradient_list}, {rest}') ready_gradient_id = ready_gradient_list[0] worker = gradients.pop(ready_gradient_id) worker_rank = ray.get(worker.get_rank.remote()) batches_processed_by_worker[worker_rank] += 1 self.model.train() current_weights = self.apply_gradients(*[ray.get(ready_gradient_id)]) if batches_processed_by_worker[worker_rank] <= len(self.train_iterators[0]): gradients[worker.compute_gradients.remote(current_weights)] = worker
[]
[]
[ "SUSML_RAND_SEED", "SUSML_NUM_EPOCHS", "SUSML_MIN_FREQ", "SUSML_LR", "SUSML_BATCH_SIZE", "SUSML_PARALLELISM_LEVEL" ]
[]
["SUSML_RAND_SEED", "SUSML_NUM_EPOCHS", "SUSML_MIN_FREQ", "SUSML_LR", "SUSML_BATCH_SIZE", "SUSML_PARALLELISM_LEVEL"]
python
6
0
core/chaincode/platforms/golang/platform_test.go
/* Copyright IBM Corp. 2016 All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package golang import ( "archive/tar" "bytes" "compress/gzip" "fmt" "os" "strings" "testing" "time" "github.com/docker/docker/pkg/testutil/assert" "github.com/hyperledger/fabric/core/config" pb "github.com/hyperledger/fabric/protos/peer" "github.com/spf13/viper" ) func testerr(err error, succ bool) error { if succ && err != nil { return fmt.Errorf("Expected success but got error %s", err) } else if !succ && err == nil { return fmt.Errorf("Expected failure but succeeded") } return nil } func writeBytesToPackage(name string, payload []byte, mode int64, tw *tar.Writer) error { //Make headers identical by using zero time var zeroTime time.Time tw.WriteHeader(&tar.Header{Name: name, Size: int64(len(payload)), ModTime: zeroTime, AccessTime: zeroTime, ChangeTime: zeroTime, Mode: mode}) tw.Write(payload) return nil } func generateFakeCDS(ccname, path, file string, mode int64) (*pb.ChaincodeDeploymentSpec, error) { codePackage := bytes.NewBuffer(nil) gw := gzip.NewWriter(codePackage) tw := tar.NewWriter(gw) payload := make([]byte, 25, 25) err := writeBytesToPackage(file, payload, mode, tw) if err != nil { return nil, err } tw.Close() gw.Close() cds := &pb.ChaincodeDeploymentSpec{ ChaincodeSpec: &pb.ChaincodeSpec{ ChaincodeId: &pb.ChaincodeID{ Name: ccname, Path: path, }, }, CodePackage: codePackage.Bytes(), } return cds, nil } type spec struct { CCName string Path, File string Mode int64 SuccessExpected bool RealGen bool } func TestValidateCDS(t *testing.T) { platform := &Platform{} specs := make([]spec, 0) specs = append(specs, spec{CCName: "NoCode", Path: "path/to/nowhere", File: "/bin/warez", Mode: 0100400, SuccessExpected: false}) specs = append(specs, spec{CCName: "NoCode", Path: "path/to/somewhere", File: "/src/path/to/somewhere/main.go", Mode: 0100400, SuccessExpected: true}) specs = append(specs, spec{CCName: "NoCode", Path: "path/to/somewhere", File: "/src/path/to/somewhere/warez", Mode: 0100555, SuccessExpected: false}) for _, s := range specs { cds, err := generateFakeCDS(s.CCName, s.Path, s.File, s.Mode) err = platform.ValidateDeploymentSpec(cds) if s.SuccessExpected == true && err != nil { t.Errorf("Unexpected failure: %s", err) } if s.SuccessExpected == false && err == nil { t.Log("Expected validation failure") t.Fail() } } } func TestPlatform_GoPathNotSet(t *testing.T) { p := &Platform{} spec := &pb.ChaincodeSpec{ ChaincodeId: &pb.ChaincodeID{ Path: "/opt/gopath/src/github.com/hyperledger/fabric", }, } gopath := os.Getenv("GOPATH") defer os.Setenv("GOPATH", gopath) os.Setenv("GOPATH", "") err := p.ValidateSpec(spec) assert.Contains(t, err.Error(), "invalid GOPATH environment variable value") } func Test_findSource(t *testing.T) { gopath, err := getGopath() if err != nil { t.Errorf("failed to get GOPATH: %s", err) } var source SourceMap source, err = findSource(gopath, "github.com/hyperledger/fabric/peer") if err != nil { t.Errorf("failed to find source: %s", err) } if _, ok := source["src/github.com/hyperledger/fabric/peer/main.go"]; !ok { t.Errorf("Failed to find expected source file: %v", source) } source, err = findSource(gopath, "acme.com/this/should/not/exist") if err == nil { t.Errorf("Success when failure was expected") } } func Test_DeploymentPayload(t *testing.T) { platform := &Platform{} spec := &pb.ChaincodeSpec{ ChaincodeId: &pb.ChaincodeID{ Path: "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02", }, } payload, err := platform.GetDeploymentPayload(spec) assert.NilError(t, err) t.Logf("payload size: %d", len(payload)) is := bytes.NewReader(payload) gr, err := gzip.NewReader(is) if err == nil { tr := tar.NewReader(gr) for { header, err := tr.Next() if err != nil { // We only get here if there are no more entries to scan break } t.Logf("%s (%d)", header.Name, header.Size) } } } func Test_decodeUrl(t *testing.T) { cs := &pb.ChaincodeSpec{ ChaincodeId: &pb.ChaincodeID{ Name: "Test Chaincode", Path: "http://github.com/hyperledger/fabric/examples/chaincode/go/map", }, } if _, err := decodeUrl(cs); err != nil { t.Fail() t.Logf("Error to decodeUrl unsuccessfully with valid path: %s, %s", cs.ChaincodeId.Path, err) } cs.ChaincodeId.Path = "" if _, err := decodeUrl(cs); err == nil { t.Fail() t.Logf("Error to decodeUrl successfully with invalid path: %s", cs.ChaincodeId.Path) } cs.ChaincodeId.Path = "/" if _, err := decodeUrl(cs); err == nil { t.Fail() t.Logf("Error to decodeUrl successfully with invalid path: %s", cs.ChaincodeId.Path) } cs.ChaincodeId.Path = "http:///" if _, err := decodeUrl(cs); err == nil { t.Fail() t.Logf("Error to decodeUrl successfully with invalid path: %s", cs.ChaincodeId.Path) } } func TestValidateSpec(t *testing.T) { platform := &Platform{} var tests = []struct { spec *pb.ChaincodeSpec succ bool }{ {spec: &pb.ChaincodeSpec{ChaincodeId: &pb.ChaincodeID{Name: "Test Chaincode", Path: "http://github.com/hyperledger/fabric/examples/chaincode/go/map"}}, succ: true}, {spec: &pb.ChaincodeSpec{ChaincodeId: &pb.ChaincodeID{Name: "Test Chaincode", Path: "https://github.com/hyperledger/fabric/examples/chaincode/go/map"}}, succ: true}, {spec: &pb.ChaincodeSpec{ChaincodeId: &pb.ChaincodeID{Name: "Test Chaincode", Path: "github.com/hyperledger/fabric/examples/chaincode/go/map"}}, succ: true}, {spec: &pb.ChaincodeSpec{ChaincodeId: &pb.ChaincodeID{Name: "Test Chaincode", Path: "github.com/hyperledger/fabric/bad/chaincode/go/map"}}, succ: false}, {spec: &pb.ChaincodeSpec{ChaincodeId: &pb.ChaincodeID{Name: "Test Chaincode", Path: ":github.com/hyperledger/fabric/examples/chaincode/go/map"}}, succ: false}, } for _, tst := range tests { err := platform.ValidateSpec(tst.spec) if err = testerr(err, tst.succ); err != nil { t.Errorf("Error validating chaincode spec: %s, %s", tst.spec.ChaincodeId.Path, err) } } } func TestGetDeploymentPayload(t *testing.T) { platform := &Platform{} var tests = []struct { spec *pb.ChaincodeSpec succ bool }{ {spec: &pb.ChaincodeSpec{ChaincodeId: &pb.ChaincodeID{Name: "Test Chaincode", Path: "github.com/hyperledger/fabric/examples/chaincode/go/map"}}, succ: true}, {spec: &pb.ChaincodeSpec{ChaincodeId: &pb.ChaincodeID{Name: "Test Chaincode", Path: "github.com/hyperledger/fabric/examples/bad/go/map"}}, succ: false}, {spec: &pb.ChaincodeSpec{ChaincodeId: &pb.ChaincodeID{Name: "Test Chaincode", Path: "github.com/hyperledger/fabric/test/chaincodes/BadImport"}}, succ: false}, } for _, tst := range tests { _, err := platform.GetDeploymentPayload(tst.spec) if err = testerr(err, tst.succ); err != nil { t.Errorf("Error validating chaincode spec: %s, %s", tst.spec.ChaincodeId.Path, err) } } } //TestGenerateDockerBuild goes through the functions needed to do docker build func TestGenerateDockerBuild(t *testing.T) { platform := &Platform{} specs := make([]spec, 0) specs = append(specs, spec{CCName: "NoCode", Path: "path/to/nowhere", File: "/bin/warez", Mode: 0100400, SuccessExpected: false}) specs = append(specs, spec{CCName: "invalidhttp", Path: "https://not/a/valid/path", File: "/src/github.com/hyperledger/fabric/examples/chaincode/go/map/map.go", Mode: 0100400, SuccessExpected: false, RealGen: true}) specs = append(specs, spec{CCName: "map", Path: "github.com/hyperledger/fabric/examples/chaincode/go/map", File: "/src/github.com/hyperledger/fabric/examples/chaincode/go/map/map.go", Mode: 0100400, SuccessExpected: true, RealGen: true}) specs = append(specs, spec{CCName: "AutoVendor", Path: "github.com/hyperledger/fabric/test/chaincodes/AutoVendor/chaincode", File: "/src/github.com/hyperledger/fabric/test/chaincodes/AutoVendor/chaincode/main.go", Mode: 0100400, SuccessExpected: true, RealGen: true}) specs = append(specs, spec{CCName: "mapBadPath", Path: "github.com/hyperledger/fabric/examples/chaincode/go/map", File: "/src/github.com/hyperledger/fabric/examples/bad/path/to/map.go", Mode: 0100400, SuccessExpected: false}) specs = append(specs, spec{CCName: "mapBadMode", Path: "github.com/hyperledger/fabric/examples/chaincode/go/map", File: "/src/github.com/hyperledger/fabric/examples/chaincode/go/map/map.go", Mode: 0100555, SuccessExpected: false}) var err error for _, tst := range specs { inputbuf := bytes.NewBuffer(nil) tw := tar.NewWriter(inputbuf) var cds *pb.ChaincodeDeploymentSpec if tst.RealGen { cds = &pb.ChaincodeDeploymentSpec{ ChaincodeSpec: &pb.ChaincodeSpec{ ChaincodeId: &pb.ChaincodeID{ Name: tst.CCName, Path: tst.Path, Version: "0", }, }, } cds.CodePackage, err = platform.GetDeploymentPayload(cds.ChaincodeSpec) if err = testerr(err, tst.SuccessExpected); err != nil { t.Errorf("test failed in GetDeploymentPayload: %s, %s", cds.ChaincodeSpec.ChaincodeId.Path, err) } } else { cds, err = generateFakeCDS(tst.CCName, tst.Path, tst.File, tst.Mode) } if _, err = platform.GenerateDockerfile(cds); err != nil { t.Errorf("could not generate docker file for a valid spec: %s, %s", cds.ChaincodeSpec.ChaincodeId.Path, err) } err = platform.GenerateDockerBuild(cds, tw) if err = testerr(err, tst.SuccessExpected); err != nil { t.Errorf("Error validating chaincode spec: %s, %s", cds.ChaincodeSpec.ChaincodeId.Path, err) } } } func TestMain(m *testing.M) { viper.SetConfigName("core") viper.SetEnvPrefix("CORE") config.AddDevConfigPath(nil) viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) viper.AutomaticEnv() if err := viper.ReadInConfig(); err != nil { fmt.Printf("could not read config %s\n", err) os.Exit(-1) } os.Exit(m.Run()) }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
src/forwardsc/forwardsc.go
package main import ( "context" "io" "log" "os" "runtime" "time" cloudevents "github.com/cloudevents/sdk-go/v2" "github.com/google/uuid" ) func parseOrder(event cloudevents.Event) (order *OrderEvent, err error) { if err := event.DataAs(&order); err != nil { log.Printf("Error while extracting cloudevent Data: %s\n", err.Error()) } return } func parseContainer(event cloudevents.Event) (container *ContainerAssignedToOrder, err error) { if err := event.DataAs(&container); err != nil { log.Printf("Error while extracting cloudevent Data: %s\n", err.Error()) } return } var sleepduration = 100 func receive(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { // Here is where your code to process the event will go. // In this example we will log the event msg //log.Printf("Event received. \n%s\n", event) time.Sleep(time.Duration(sleepduration) * time.Millisecond) var result = cloudevents.NewHTTPResult(200, "OK") //cloudevents.Result = nil switch eventType := event.Type(); eventType { case "OrderEvent": if order, err := parseOrder(event); err != nil { result = cloudevents.NewHTTPResult(400, "failed to convert data: %s", err) } else { t := time.Now() if order.Payload.Timestamp == 0 { order.Payload.Timestamp = order.Timestamp } order.Payload.MidTimestamp = append(order.Payload.MidTimestamp, t.UnixNano()/1000000) newEvent := cloudevents.NewEvent() newEvent.SetID(uuid.New().String()) newEvent.SetSource("dev.knative.containerproducerimpl") newEvent.SetType("OrderEvent") newEvent.SetTime(t) msg := OrderEvent{ Timestamp: t.UnixNano() / 1000000, Type: order.Type, Payload: order.Payload, } if err := newEvent.SetData(cloudevents.ApplicationJSON, msg); err != nil { return nil, cloudevents.NewHTTPResult(500, "failed to set response data: %s", err) } //log.Printf("Received order %s and response with %s", event, newEvent) log.Printf("Received order ID = %s", order.Payload.OrderID) // sum := 0 // for i := 1; i < 10000000; i++ { // sum += i // } log.Printf("Forwarded %s", order.Payload.OrderID) c.Send(ctx, newEvent) // log.Printf("Forwarded %s", newEvent) } default: log.Printf("Skip event type: %s\n", eventType) // log.Printf("Finish skipping event %d", t.UnixNano()) } return nil, result } var c cloudevents.Client func main() { logFile, err := os.OpenFile("log", os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666) if err != nil { panic(err) } mw := io.MultiWriter(os.Stdout, logFile) log.SetOutput(mw) log.Printf("Forward Spring Container") log.Printf("Initalizing sender\n") log.Printf("Sleep duration: %d", sleepduration) numcpu := runtime.NumCPU() mcpus := 1000 runtime.GOMAXPROCS(mcpus) log.Printf("Number of available CPU: %d Expected Paralleism: %d\n", numcpu, mcpus) //runtime.GOMAXPROCS(2) mcpus = runtime.GOMAXPROCS(0) log.Printf("Current parallelism: %d\n", mcpus) addr := os.Getenv("ORDER_ENDPOINT") log.Printf("Order endpoint: %s", addr) ctx := cloudevents.ContextWithTarget(context.Background(), addr) //ctx := cloudevents.ContextWithTarget(context.Background(), "http://order-observer.kcontainer.svc.cluster.local") p, err := cloudevents.NewHTTP() if err != nil { log.Fatalf("failed to create protocol: %s", err.Error()) } c, err = cloudevents.NewClient(p, cloudevents.WithTimeNow(), cloudevents.WithUUIDs()) if err != nil { log.Fatalf("failed to create sender, %v", err) } log.Printf("Starting receiver\n") log.Fatal(c.StartReceiver(ctx, receive)) }
[ "\"ORDER_ENDPOINT\"" ]
[]
[ "ORDER_ENDPOINT" ]
[]
["ORDER_ENDPOINT"]
go
1
0
app/init.go
package app import ( "net/http" "os" "gopkg.in/mgo.v2" "github.com/otiai10/marmoset" "github.com/seqpod/seqpod-api/controllers/v0" "github.com/seqpod/seqpod-api/filters" ) func init() { session, err := mgo.Dial(os.Getenv("MONGODB_URI")) if err != nil { panic(err) } // defer session.Close() mf := filters.InitMongoFilter(session) lf := filters.InitLogFilter() af := filters.InitializeAuthFilter() cf := new(marmoset.ContextFilter) unauthorized := marmoset.NewRouter() unauthorized.GET("/v0/status", v0.Status) // TODO: Make download endpoint authorized unauthorized.GET("/v0/jobs/(?P<id>[0-9a-f]+)/results/(?P<result>[0-9a-zA-Z\\._-]+)", v0.Download) authorized := marmoset.NewRouter() authorized.GET("/v0/jobs/(?P<id>[0-9a-f]+)", v0.JobGet) authorized.POST("/v0/jobs/(?P<id>[0-9a-f]+)/inputs/upload", v0.JobInputUpload) authorized.POST("/v0/jobs/(?P<id>[0-9a-f]+)/ready", v0.JobMarkReady) authorized.POST("/v0/jobs/workspace", v0.JobWorkspace) authorized.Apply(cf, af, mf) root := marmoset.NewRouter() root.Apply(lf) root.Subrouter(unauthorized) root.Subrouter(authorized) http.Handle("/", root) }
[ "\"MONGODB_URI\"" ]
[]
[ "MONGODB_URI" ]
[]
["MONGODB_URI"]
go
1
0
pkg/database/database_util.go
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package database import ( "context" "errors" "os" "strconv" "testing" "time" "github.com/google/exposure-notifications-server/pkg/secrets" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/ory/dockertest" "github.com/sethvargo/go-retry" ) var ( approxTime = cmp.Options{cmpopts.EquateApproxTime(time.Second)} ) // NewTestDatabaseWithConfig creates a new database suitable for use in testing. // This should not be used outside of testing, but it is exposed in the main // package so it can be shared with other packages. // // All database tests can be skipped by running `go test -short` or by setting // the `SKIP_DATABASE_TESTS` environment variable. func NewTestDatabaseWithConfig(tb testing.TB) (*Database, *Config) { tb.Helper() if testing.Short() { tb.Skipf("🚧 Skipping database tests (short)!") } if skip, _ := strconv.ParseBool(os.Getenv("SKIP_DATABASE_TESTS")); skip { tb.Skipf("🚧 Skipping database tests (SKIP_DATABASE_TESTS is set)!") } // Context. ctx := context.Background() // Create the pool (docker instance). pool, err := dockertest.NewPool("") if err != nil { tb.Fatalf("failed to create Docker pool: %s", err) } // Start the container. dbname, username, password := "en-verification-server", "my-username", "abcd1234" tb.Log("Starting database") container, err := pool.RunWithOptions(&dockertest.RunOptions{ Repository: "postgres", Tag: "12-alpine", Env: []string{ "LANG=C", "POSTGRES_DB=" + dbname, "POSTGRES_USER=" + username, "POSTGRES_PASSWORD=" + password, }, }) if err != nil { tb.Fatalf("failed to start postgres container: %s", err) } // Ensure container is cleaned up. tb.Cleanup(func() { if err := pool.Purge(container); err != nil { tb.Fatalf("failed to cleanup postgres container: %s", err) } }) // Get the host. On Mac, Docker runs in a VM. host := container.GetBoundIP("5432/tcp") port := container.GetPort("5432/tcp") // build database config. config := Config{ CacheTTL: 30 * time.Second, User: username, Port: port, Host: host, Name: dbname, Password: password, SSLMode: "disable", Secrets: secrets.Config{ SecretManagerType: secrets.SecretManagerTypeNoop, }, } // Wait for the container to start - we'll retry connections in a loop below, // but there's no point in trying immediately. time.Sleep(1 * time.Second) // Establish a connection to the database. Use a Fibonacci backoff instead of // exponential so wait times scale appropriately. b, err := retry.NewFibonacci(500 * time.Millisecond) if err != nil { tb.Fatalf("failed to configure backoff: %v", err) } b = retry.WithMaxRetries(10, b) b = retry.WithCappedDuration(10*time.Second, b) var db *Database if err := retry.Do(ctx, b, func(_ context.Context) error { var err error db, err = config.Open(ctx) if err != nil { tb.Logf("retrying error: %v", err) return retry.RetryableError(err) } return nil }); err != nil { tb.Fatalf("failed to start postgres: %s", err) } if err := db.RunMigrations(ctx); err != nil { tb.Fatalf("failed to migrate database: %v", err) } // Close db when done. tb.Cleanup(func() { db.db.Close() }) return db, &config } func NewTestDatabase(tb testing.TB) *Database { tb.Helper() db, _ := NewTestDatabaseWithConfig(tb) return db } var ErrSecretNotExist = errors.New("secret does not exist") // InMemorySecretManager is a secret manager that returns the value at the given // key or an error if it does not exist. type InMemorySecretManager map[string]string // GetSecretValue implements secrets. func (s InMemorySecretManager) GetSecretValue(_ context.Context, v string) (string, error) { if v, ok := s[v]; ok { return v, nil } return "", ErrSecretNotExist }
[ "\"SKIP_DATABASE_TESTS\"" ]
[]
[ "SKIP_DATABASE_TESTS" ]
[]
["SKIP_DATABASE_TESTS"]
go
1
0
python-socketio.py
""" python-socketio.py Sample Mcity OCTANE python socketio script """ import os from dotenv import load_dotenv import socketio #Load environment variables load_dotenv() api_key = os.environ.get('MCITY_OCTANE_KEY', None) server = os.environ.get('MCITY_OCTANE_SERVER', 'http://localhost:5000') namespace = "/octane" #If no API Key provided, exit. if not api_key: print ("No API KEY SPECIFIED. EXITING") exit() #Create an SocketIO Python client. sio = socketio.Client() # Async client is available also: sio = socketio.AsyncClient() def send_auth(): """ Emit an authentication event. """ sio.emit('auth', {'x-api-key': api_key}, namespace=namespace) #Define event callbacks @sio.on('connect', namespace=namespace) def on_connect(): """ Handle connection event and send authentication key """ send_auth() @sio.on('join', namespace=namespace) def on_join(data): """ Event fired when user joins a channel """ print('Join received with ', data) @sio.on('channels', namespace=namespace) def on_channels(data): """ Event fired when a user requests current channel information. """ print('Channel information', data) @sio.on('disconnect', namespace=namespace) def on_disconnect(): """ Event fired on disconnect. """ print('disconnected from server') #Make connection. sio.connect(server, namespaces=[namespace]) sio.wait()
[]
[]
[ "MCITY_OCTANE_SERVER", "MCITY_OCTANE_KEY" ]
[]
["MCITY_OCTANE_SERVER", "MCITY_OCTANE_KEY"]
python
2
0
src/syscall/exec_linux_test.go
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build linux package syscall_test import ( "flag" "fmt" "internal/testenv" "io" "os" "os/exec" "os/user" "path/filepath" "runtime" "strconv" "strings" "syscall" "testing" "unsafe" ) func isDocker() bool { _, err := os.Stat("/.dockerenv") return err == nil } func isLXC() bool { return os.Getenv("container") == "lxc" } func skipInContainer(t *testing.T) { // TODO: the callers of this func are using this func to skip // tests when running as some sort of "fake root" that's uid 0 // but lacks certain Linux capabilities. Most of the Go builds // run in privileged containers, though, where root is much // closer (if not identical) to the real root. We should test // for what we need exactly (which capabilities are active?), // instead of just assuming "docker == bad". Then we'd get more test // coverage on a bunch of builders too. if isDocker() { t.Skip("skip this test in Docker container") } if isLXC() { t.Skip("skip this test in LXC container") } } func skipNoUserNamespaces(t *testing.T) { if _, err := os.Stat("/proc/self/ns/user"); err != nil { if os.IsNotExist(err) { t.Skip("kernel doesn't support user namespaces") } if os.IsPermission(err) { t.Skip("unable to test user namespaces due to permissions") } t.Fatalf("Failed to stat /proc/self/ns/user: %v", err) } } func skipUnprivilegedUserClone(t *testing.T) { // Skip the test if the sysctl that prevents unprivileged user // from creating user namespaces is enabled. data, errRead := os.ReadFile("/proc/sys/kernel/unprivileged_userns_clone") if errRead != nil || len(data) < 1 || data[0] == '0' { t.Skip("kernel prohibits user namespace in unprivileged process") } } // Check if we are in a chroot by checking if the inode of / is // different from 2 (there is no better test available to non-root on // linux). func isChrooted(t *testing.T) bool { root, err := os.Stat("/") if err != nil { t.Fatalf("cannot stat /: %v", err) } return root.Sys().(*syscall.Stat_t).Ino != 2 } func checkUserNS(t *testing.T) { skipInContainer(t) skipNoUserNamespaces(t) if isChrooted(t) { // create_user_ns in the kernel (see // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c) // forbids the creation of user namespaces when chrooted. t.Skip("cannot create user namespaces when chrooted") } // On some systems, there is a sysctl setting. if os.Getuid() != 0 { skipUnprivilegedUserClone(t) } // On Centos 7 make sure they set the kernel parameter user_namespace=1 // See issue 16283 and 20796. if _, err := os.Stat("/sys/module/user_namespace/parameters/enable"); err == nil { buf, _ := os.ReadFile("/sys/module/user_namespace/parameters/enabled") if !strings.HasPrefix(string(buf), "Y") { t.Skip("kernel doesn't support user namespaces") } } // On Centos 7.5+, user namespaces are disabled if user.max_user_namespaces = 0 if _, err := os.Stat("/proc/sys/user/max_user_namespaces"); err == nil { buf, errRead := os.ReadFile("/proc/sys/user/max_user_namespaces") if errRead == nil && buf[0] == '0' { t.Skip("kernel doesn't support user namespaces") } } // When running under the Go continuous build, skip tests for // now when under Kubernetes. (where things are root but not quite) // Both of these are our own environment variables. // See Issue 12815. if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" { t.Skip("skipping test on Kubernetes-based builders; see Issue 12815") } } func whoamiCmd(t *testing.T, uid, gid int, setgroups bool) *exec.Cmd { checkUserNS(t) cmd := exec.Command("whoami") cmd.SysProcAttr = &syscall.SysProcAttr{ Cloneflags: syscall.CLONE_NEWUSER, UidMappings: []syscall.SysProcIDMap{ {ContainerID: 0, HostID: uid, Size: 1}, }, GidMappings: []syscall.SysProcIDMap{ {ContainerID: 0, HostID: gid, Size: 1}, }, GidMappingsEnableSetgroups: setgroups, } return cmd } func testNEWUSERRemap(t *testing.T, uid, gid int, setgroups bool) { cmd := whoamiCmd(t, uid, gid, setgroups) out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Cmd failed with err %v, output: %s", err, out) } sout := strings.TrimSpace(string(out)) want := "root" if sout != want { t.Fatalf("whoami = %q; want %q", out, want) } } func TestCloneNEWUSERAndRemapRootDisableSetgroups(t *testing.T) { if os.Getuid() != 0 { t.Skip("skipping root only test") } testNEWUSERRemap(t, 0, 0, false) } func TestCloneNEWUSERAndRemapRootEnableSetgroups(t *testing.T) { if os.Getuid() != 0 { t.Skip("skipping root only test") } testNEWUSERRemap(t, 0, 0, true) } func TestCloneNEWUSERAndRemapNoRootDisableSetgroups(t *testing.T) { if os.Getuid() == 0 { t.Skip("skipping unprivileged user only test") } testNEWUSERRemap(t, os.Getuid(), os.Getgid(), false) } func TestCloneNEWUSERAndRemapNoRootSetgroupsEnableSetgroups(t *testing.T) { if os.Getuid() == 0 { t.Skip("skipping unprivileged user only test") } cmd := whoamiCmd(t, os.Getuid(), os.Getgid(), true) err := cmd.Run() if err == nil { t.Skip("probably old kernel without security fix") } if !os.IsPermission(err) { t.Fatalf("Unprivileged gid_map rewriting with GidMappingsEnableSetgroups must fail") } } func TestEmptyCredGroupsDisableSetgroups(t *testing.T) { cmd := whoamiCmd(t, os.Getuid(), os.Getgid(), false) cmd.SysProcAttr.Credential = &syscall.Credential{} if err := cmd.Run(); err != nil { t.Fatal(err) } } func TestUnshare(t *testing.T) { skipInContainer(t) // Make sure we are running as root so we have permissions to use unshare // and create a network namespace. if os.Getuid() != 0 { t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace") } // When running under the Go continuous build, skip tests for // now when under Kubernetes. (where things are root but not quite) // Both of these are our own environment variables. // See Issue 12815. if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" { t.Skip("skipping test on Kubernetes-based builders; see Issue 12815") } path := "/proc/net/dev" if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Skip("kernel doesn't support proc filesystem") } if os.IsPermission(err) { t.Skip("unable to test proc filesystem due to permissions") } t.Fatal(err) } if _, err := os.Stat("/proc/self/ns/net"); err != nil { if os.IsNotExist(err) { t.Skip("kernel doesn't support net namespace") } t.Fatal(err) } orig, err := os.ReadFile(path) if err != nil { t.Fatal(err) } origLines := strings.Split(strings.TrimSpace(string(orig)), "\n") cmd := exec.Command("cat", path) cmd.SysProcAttr = &syscall.SysProcAttr{ Unshareflags: syscall.CLONE_NEWNET, } out, err := cmd.CombinedOutput() if err != nil { if strings.Contains(err.Error(), "operation not permitted") { // Issue 17206: despite all the checks above, // this still reportedly fails for some users. // (older kernels?). Just skip. t.Skip("skipping due to permission error") } t.Fatalf("Cmd failed with err %v, output: %s", err, out) } // Check there is only the local network interface sout := strings.TrimSpace(string(out)) if !strings.Contains(sout, "lo:") { t.Fatalf("Expected lo network interface to exist, got %s", sout) } lines := strings.Split(sout, "\n") if len(lines) >= len(origLines) { t.Fatalf("Got %d lines of output, want <%d", len(lines), len(origLines)) } } func TestGroupCleanup(t *testing.T) { if os.Getuid() != 0 { t.Skip("we need root for credential") } cmd := exec.Command("id") cmd.SysProcAttr = &syscall.SysProcAttr{ Credential: &syscall.Credential{ Uid: 0, Gid: 0, }, } out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Cmd failed with err %v, output: %s", err, out) } strOut := strings.TrimSpace(string(out)) expected := "uid=0(root) gid=0(root)" // Just check prefix because some distros reportedly output a // context parameter; see https://golang.org/issue/16224. // Alpine does not output groups; see https://golang.org/issue/19938. if !strings.HasPrefix(strOut, expected) { t.Errorf("id command output: %q, expected prefix: %q", strOut, expected) } } func TestGroupCleanupUserNamespace(t *testing.T) { if os.Getuid() != 0 { t.Skip("we need root for credential") } checkUserNS(t) cmd := exec.Command("id") uid, gid := os.Getuid(), os.Getgid() cmd.SysProcAttr = &syscall.SysProcAttr{ Cloneflags: syscall.CLONE_NEWUSER, Credential: &syscall.Credential{ Uid: uint32(uid), Gid: uint32(gid), }, UidMappings: []syscall.SysProcIDMap{ {ContainerID: 0, HostID: uid, Size: 1}, }, GidMappings: []syscall.SysProcIDMap{ {ContainerID: 0, HostID: gid, Size: 1}, }, } out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Cmd failed with err %v, output: %s", err, out) } strOut := strings.TrimSpace(string(out)) // Strings we've seen in the wild. expected := []string{ "uid=0(root) gid=0(root) groups=0(root)", "uid=0(root) gid=0(root) groups=0(root),65534(nobody)", "uid=0(root) gid=0(root) groups=0(root),65534(nogroup)", "uid=0(root) gid=0(root) groups=0(root),65534", "uid=0(root) gid=0(root) groups=0(root),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody)", // Alpine; see https://golang.org/issue/19938 "uid=0(root) gid=0(root) groups=0(root) context=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023", // CentOS with SELinux context, see https://golang.org/issue/34547 } for _, e := range expected { if strOut == e { return } } t.Errorf("id command output: %q, expected one of %q", strOut, expected) } // TestUnshareHelperProcess isn't a real test. It's used as a helper process // for TestUnshareMountNameSpace. func TestUnshareMountNameSpaceHelper(*testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } defer os.Exit(0) if err := syscall.Mount("none", flag.Args()[0], "proc", 0, ""); err != nil { fmt.Fprintf(os.Stderr, "unshare: mount %v failed: %v", os.Args, err) os.Exit(2) } } // Test for Issue 38471: unshare fails because systemd has forced / to be shared func TestUnshareMountNameSpace(t *testing.T) { skipInContainer(t) // Make sure we are running as root so we have permissions to use unshare // and create a network namespace. if os.Getuid() != 0 { t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace") } d, err := os.MkdirTemp("", "unshare") if err != nil { t.Fatalf("tempdir: %v", err) } cmd := exec.Command(os.Args[0], "-test.run=TestUnshareMountNameSpaceHelper", d) cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1") cmd.SysProcAttr = &syscall.SysProcAttr{Unshareflags: syscall.CLONE_NEWNS} o, err := cmd.CombinedOutput() if err != nil { if strings.Contains(err.Error(), ": permission denied") { t.Skipf("Skipping test (golang.org/issue/19698); unshare failed due to permissions: %s, %v", o, err) } t.Fatalf("unshare failed: %s, %v", o, err) } // How do we tell if the namespace was really unshared? It turns out // to be simple: just try to remove the directory. If it's still mounted // on the rm will fail with EBUSY. Then we have some cleanup to do: // we must unmount it, then try to remove it again. if err := os.Remove(d); err != nil { t.Errorf("rmdir failed on %v: %v", d, err) if err := syscall.Unmount(d, syscall.MNT_FORCE); err != nil { t.Errorf("Can't unmount %v: %v", d, err) } if err := os.Remove(d); err != nil { t.Errorf("rmdir after unmount failed on %v: %v", d, err) } } } // Test for Issue 20103: unshare fails when chroot is used func TestUnshareMountNameSpaceChroot(t *testing.T) { skipInContainer(t) // Make sure we are running as root so we have permissions to use unshare // and create a network namespace. if os.Getuid() != 0 { t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace") } d, err := os.MkdirTemp("", "unshare") if err != nil { t.Fatalf("tempdir: %v", err) } // Since we are doing a chroot, we need the binary there, // and it must be statically linked. x := filepath.Join(d, "syscall.test") cmd := exec.Command(testenv.GoToolPath(t), "test", "-c", "-o", x, "syscall") cmd.Env = append(os.Environ(), "CGO_ENABLED=0") if o, err := cmd.CombinedOutput(); err != nil { t.Fatalf("Build of syscall in chroot failed, output %v, err %v", o, err) } cmd = exec.Command("/syscall.test", "-test.run=TestUnshareMountNameSpaceHelper", "/") cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1") cmd.SysProcAttr = &syscall.SysProcAttr{Chroot: d, Unshareflags: syscall.CLONE_NEWNS} o, err := cmd.CombinedOutput() if err != nil { if strings.Contains(err.Error(), ": permission denied") { t.Skipf("Skipping test (golang.org/issue/19698); unshare failed due to permissions: %s, %v", o, err) } t.Fatalf("unshare failed: %s, %v", o, err) } // How do we tell if the namespace was really unshared? It turns out // to be simple: just try to remove the executable. If it's still mounted // on, the rm will fail. Then we have some cleanup to do: // we must force unmount it, then try to remove it again. if err := os.Remove(x); err != nil { t.Errorf("rm failed on %v: %v", x, err) if err := syscall.Unmount(d, syscall.MNT_FORCE); err != nil { t.Fatalf("Can't unmount %v: %v", d, err) } if err := os.Remove(x); err != nil { t.Fatalf("rm failed on %v: %v", x, err) } } if err := os.Remove(d); err != nil { t.Errorf("rmdir failed on %v: %v", d, err) } } func TestUnshareUidGidMappingHelper(*testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } defer os.Exit(0) if err := syscall.Chroot(os.TempDir()); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(2) } } // Test for Issue 29789: unshare fails when uid/gid mapping is specified func TestUnshareUidGidMapping(t *testing.T) { if os.Getuid() == 0 { t.Skip("test exercises unprivileged user namespace, fails with privileges") } checkUserNS(t) cmd := exec.Command(os.Args[0], "-test.run=TestUnshareUidGidMappingHelper") cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1") cmd.SysProcAttr = &syscall.SysProcAttr{ Unshareflags: syscall.CLONE_NEWNS | syscall.CLONE_NEWUSER, GidMappingsEnableSetgroups: false, UidMappings: []syscall.SysProcIDMap{ { ContainerID: 0, HostID: syscall.Getuid(), Size: 1, }, }, GidMappings: []syscall.SysProcIDMap{ { ContainerID: 0, HostID: syscall.Getgid(), Size: 1, }, }, } out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Cmd failed with err %v, output: %s", err, out) } } type capHeader struct { version uint32 pid int32 } type capData struct { effective uint32 permitted uint32 inheritable uint32 } const CAP_SYS_TIME = 25 const CAP_SYSLOG = 34 type caps struct { hdr capHeader data [2]capData } func getCaps() (caps, error) { var c caps // Get capability version if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(nil)), 0); errno != 0 { return c, fmt.Errorf("SYS_CAPGET: %v", errno) } // Get current capabilities if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(&c.data[0])), 0); errno != 0 { return c, fmt.Errorf("SYS_CAPGET: %v", errno) } return c, nil } func mustSupportAmbientCaps(t *testing.T) { var uname syscall.Utsname if err := syscall.Uname(&uname); err != nil { t.Fatalf("Uname: %v", err) } var buf [65]byte for i, b := range uname.Release { buf[i] = byte(b) } ver := string(buf[:]) if i := strings.Index(ver, "\x00"); i != -1 { ver = ver[:i] } if strings.HasPrefix(ver, "2.") || strings.HasPrefix(ver, "3.") || strings.HasPrefix(ver, "4.1.") || strings.HasPrefix(ver, "4.2.") { t.Skipf("kernel version %q predates required 4.3; skipping test", ver) } } // TestAmbientCapsHelper isn't a real test. It's used as a helper process for // TestAmbientCaps. func TestAmbientCapsHelper(*testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } defer os.Exit(0) caps, err := getCaps() if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(2) } if caps.data[0].effective&(1<<uint(CAP_SYS_TIME)) == 0 { fmt.Fprintln(os.Stderr, "CAP_SYS_TIME unexpectedly not in the effective capability mask") os.Exit(2) } if caps.data[1].effective&(1<<uint(CAP_SYSLOG&31)) == 0 { fmt.Fprintln(os.Stderr, "CAP_SYSLOG unexpectedly not in the effective capability mask") os.Exit(2) } } func TestAmbientCaps(t *testing.T) { // Make sure we are running as root so we have permissions to use unshare // and create a network namespace. if os.Getuid() != 0 { t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace") } testAmbientCaps(t, false) } func TestAmbientCapsUserns(t *testing.T) { checkUserNS(t) testAmbientCaps(t, true) } func testAmbientCaps(t *testing.T, userns bool) { skipInContainer(t) mustSupportAmbientCaps(t) skipUnprivilegedUserClone(t) // skip on android, due to lack of lookup support if runtime.GOOS == "android" { t.Skip("skipping test on android; see Issue 27327") } u, err := user.Lookup("nobody") if err != nil { t.Fatal(err) } uid, err := strconv.ParseInt(u.Uid, 0, 32) if err != nil { t.Fatal(err) } gid, err := strconv.ParseInt(u.Gid, 0, 32) if err != nil { t.Fatal(err) } // Copy the test binary to a temporary location which is readable by nobody. f, err := os.CreateTemp("", "gotest") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) defer f.Close() e, err := os.Open(os.Args[0]) if err != nil { t.Fatal(err) } defer e.Close() if _, err := io.Copy(f, e); err != nil { t.Fatal(err) } if err := f.Chmod(0755); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } cmd := exec.Command(f.Name(), "-test.run=TestAmbientCapsHelper") cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.SysProcAttr = &syscall.SysProcAttr{ Credential: &syscall.Credential{ Uid: uint32(uid), Gid: uint32(gid), }, AmbientCaps: []uintptr{CAP_SYS_TIME, CAP_SYSLOG}, } if userns { cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWUSER const nobody = 65534 uid := os.Getuid() gid := os.Getgid() cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{{ ContainerID: int(nobody), HostID: int(uid), Size: int(1), }} cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{{ ContainerID: int(nobody), HostID: int(gid), Size: int(1), }} // Set credentials to run as user and group nobody. cmd.SysProcAttr.Credential = &syscall.Credential{ Uid: nobody, Gid: nobody, } } if err := cmd.Run(); err != nil { t.Fatal(err.Error()) } }
[ "\"container\"", "\"GO_BUILDER_NAME\"", "\"IN_KUBERNETES\"", "\"GO_BUILDER_NAME\"", "\"IN_KUBERNETES\"", "\"GO_WANT_HELPER_PROCESS\"", "\"GO_WANT_HELPER_PROCESS\"", "\"GO_WANT_HELPER_PROCESS\"" ]
[]
[ "GO_BUILDER_NAME", "GO_WANT_HELPER_PROCESS", "IN_KUBERNETES", "container" ]
[]
["GO_BUILDER_NAME", "GO_WANT_HELPER_PROCESS", "IN_KUBERNETES", "container"]
go
4
0
djangae/tests/test_storage.py
# coding: utf-8 import cStringIO import httplib import os import urlparse from unittest import skipIf from google.appengine.api import urlfetch from google.appengine.api.images import TransformationError from django.core.files.base import File, ContentFile from django.test.utils import override_settings from djangae.contrib import sleuth from djangae.storage import BlobstoreStorage, CloudStorage, has_cloudstorage from djangae.test import TestCase from google.appengine.tools.devappserver2.gcs_server import GCSServer # _URL_STRING_MAP is {'GET': 1, etc} so reverse dict to convert int to string URL_INT_TO_STRING_MAP = {v: k for k, v in urlfetch._URL_STRING_MAP.items()} @skipIf(not has_cloudstorage, "Cloud Storage not available") class CloudStorageTests(TestCase): @override_settings(CLOUD_STORAGE_BUCKET='test_bucket') def test_basic_actions(self): storage = CloudStorage() f = ContentFile('content', name='my_file') filename = storage.save('tmp', f) self.assertIsInstance(filename, basestring) self.assertTrue(filename.endswith('tmp')) self.assertTrue(storage.exists(filename)) self.assertEqual(storage.size(filename), len('content')) url = storage.url(filename) self.assertIsInstance(url, basestring) self.assertNotEqual(url, '') abs_url = urlparse.urlunparse( ('http', os.environ['HTTP_HOST'], url, None, None, None) ) response = urlfetch.fetch(abs_url) self.assertEqual(response.status_code, httplib.OK) self.assertEqual(response.content, 'content') f = storage.open(filename) self.assertIsInstance(f, File) self.assertEqual(f.read(), 'content') # Delete it storage.delete(filename) self.assertFalse(storage.exists(filename)) @override_settings(CLOUD_STORAGE_BUCKET='test_bucket') def test_supports_nameless_files(self): storage = CloudStorage() f2 = ContentFile('nameless-content') storage.save('tmp2', f2) class BlobstoreStorageTests(TestCase): def test_basic_actions(self): storage = BlobstoreStorage() # Save a new file f = ContentFile('content', name='my_file') filename = storage.save('tmp', f) self.assertIsInstance(filename, basestring) self.assertTrue(filename.endswith('tmp')) # Check .exists(), .size() and .url() self.assertTrue(storage.exists(filename)) self.assertEqual(storage.size(filename), len('content')) url = storage.url(filename) self.assertIsInstance(url, basestring) self.assertNotEqual(url, '') # Check URL can be fetched abs_url = urlparse.urlunparse( ('http', os.environ['HTTP_HOST'], url, None, None, None) ) response = urlfetch.fetch(abs_url) self.assertEqual(response.status_code, httplib.OK) self.assertEqual(response.content, 'content') # Open it, read it # NOTE: Blobstore doesn’t support updating existing files. f = storage.open(filename) self.assertIsInstance(f, File) self.assertEqual(f.read(), 'content') # Delete it storage.delete(filename) self.assertFalse(storage.exists(filename)) def test_supports_nameless_files(self): storage = BlobstoreStorage() f2 = ContentFile('nameless-content') storage.save('tmp2', f2) def test_transformation_error(self): storage = BlobstoreStorage() with sleuth.detonate('djangae.storage.get_serving_url', TransformationError): self.assertIsNone(storage.url('thing'))
[]
[]
[ "HTTP_HOST" ]
[]
["HTTP_HOST"]
python
1
0
pkg/controller/middleware/logger.go
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package middleware import ( "fmt" "net/http" "os" "strings" "github.com/google/exposure-notifications-server/pkg/logging" "github.com/google/exposure-notifications-verification-server/pkg/controller" "go.uber.org/zap" "github.com/gorilla/mux" ) const ( // googleCloudTraceHeader is the header with trace data. googleCloudTraceHeader = "X-Cloud-Trace-Context" // googleCloudTraceKey is the key in the structured log where trace information // is expected to be present. googleCloudTraceKey = "logging.googleapis.com/trace" ) // googleCloudProjectID is the project id, populated by Terraform during service // deployment. var googleCloudProjectID = os.Getenv("PROJECT_ID") // PopulateLogger populates the logger onto the context. func PopulateLogger(originalLogger *zap.SugaredLogger) mux.MiddlewareFunc { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() logger := originalLogger // Only override the logger if it's the default logger. This is only used // for testing and is intentionally a strict object equality check because // the default logger is a global default in the logger package. if existing := logging.FromContext(ctx); existing == logging.DefaultLogger() { logger = existing } // If there's a request ID, set that on the logger. if id := controller.RequestIDFromContext(ctx); id != "" { logger = logger.With("request_id", id) } // On Google Cloud, extract the trace context and add it to the logger. if v := r.Header.Get(googleCloudTraceHeader); v != "" && googleCloudProjectID != "" { parts := strings.Split(v, "/") if len(parts) > 0 && len(parts[0]) > 0 { val := fmt.Sprintf("projects/%s/traces/%s", googleCloudProjectID, parts[0]) logger = logger.With(googleCloudTraceKey, val) } } ctx = logging.WithLogger(ctx, logger) r = r.Clone(ctx) next.ServeHTTP(w, r) }) } }
[ "\"PROJECT_ID\"" ]
[]
[ "PROJECT_ID" ]
[]
["PROJECT_ID"]
go
1
0
go_scripts/tests/update_tests/credentials_test.go
package update_tests import ( "fmt" "github.com/a-novel/divanDocker/config" "github.com/a-novel/divanDocker/credentials" "github.com/a-novel/divanDocker/resources" "github.com/a-novel/divanDocker/tests/test_utils" "github.com/a-novel/divanDocker/utils" "os" "testing" ) func TestCredentialsUpdate(t *testing.T) { test_utils.Clean(t) test_utils.Launch(t) defer test_utils.Clean(t) dconf := config.Config{ Resources: resources.Resources{ RamSize: 1024, FtsRamSize: 256, IndexRamSize: 256, }, Credentials: credentials.Credentials{ Username: "Administrator", Password: "password", }, } test_utils.WriteConfigAuto(dconf, t) defer test_utils.DeleteConfigAuto(t) test_utils.ShouldPass(false, "credentials are setup") if !utils.IsClusterSetup(&dconf.Credentials) { timer := test_utils.Time("") timer.EndWithFatalError("cannot access cluster with credentials", t) } oldCreds := dconf.Credentials dconf.Credentials.Username = "admin" dconf.Credentials.Password = "123456" timer := test_utils.Time("updating credentials manually (required)...") if _, err := utils.Command("sh", "-c", fmt.Sprintf( "%s setting-cluster -c 127.0.0.1 --username \"%s\" --password \"%s\" --cluster-username \"%s\" --cluster-password \"%s\"", os.Getenv("COUCHBASE_CLI_PATH"), oldCreds.Username, oldCreds.Password, dconf.Credentials.Username, dconf.Credentials.Password, )); err != nil { timer.EndWithFatalError(fmt.Sprintf("cannot update credentials : %s", err.Error()), t) } else { timer.End("updated credentials successfully") } test_utils.WriteConfigAuto(dconf, t) test_utils.ShouldPass(false, "credentials are updated") if !utils.IsClusterSetup(&dconf.Credentials) { timer := test_utils.Time("") timer.EndWithFatalError("cannot access cluster with updated credentials", t) } }
[ "\"COUCHBASE_CLI_PATH\"" ]
[]
[ "COUCHBASE_CLI_PATH" ]
[]
["COUCHBASE_CLI_PATH"]
go
1
0
test/functional/test_framework/test_node.py
#!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for terracashd node under test""" import decimal import errno import http.client import json import logging import os import subprocess import time from .util import ( assert_equal, get_rpc_proxy, rpc_url, wait_until, ) from .authproxy import JSONRPCException BITCOIND_PROC_WAIT_TIMEOUT = 60 class TestNode(): """A class for representing a terracashd node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node To make things easier for the test writer, a bit of magic is happening under the covers. Any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir): self.index = i self.datadir = os.path.join(dirname, "node" + str(i)) self.rpchost = rpchost if timewait: self.rpc_timeout = timewait else: # Wait for up to 60 seconds for the RPC server to respond self.rpc_timeout = 60 if binary is None: self.binary = os.getenv("LITECOIND", "terracashd") else: self.binary = binary self.stderr = stderr self.coverage_dir = coverage_dir # Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly. self.extra_args = extra_args self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i] self.cli = TestNodeCLI(os.getenv("LITECOINCLI", "terracash-cli"), self.datadir) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) def __getattr__(self, *args, **kwargs): """Dispatches any unrecognised messages to the RPC connection.""" assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection" return self.rpc.__getattr__(*args, **kwargs) def start(self, extra_args=None, stderr=None): """Start the node.""" if extra_args is None: extra_args = self.extra_args if stderr is None: stderr = self.stderr self.process = subprocess.Popen(self.args + extra_args, stderr=stderr) self.running = True self.log.debug("terracashd started, waiting for RPC to come up") def wait_for_rpc_connection(self): """Sets up an RPC connection to the terracashd process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): assert self.process.poll() is None, "terracashd exited with status %i during initialization" % self.process.returncode try: self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir) self.rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up self.rpc_connected = True self.url = self.rpc.url self.log.debug("RPC successfully started") return except IOError as e: if e.errno != errno.ECONNREFUSED: # Port not yet open? raise # unknown IO error except JSONRPCException as e: # Initialization phase if e.error['code'] != -28: # RPC in warmup? raise # unknown JSON RPC exception except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) raise AssertionError("Unable to connect to terracashd") def get_wallet_rpc(self, wallet_name): assert self.rpc_connected assert self.rpc wallet_path = "wallet/%s" % wallet_name return self.rpc / wallet_path def stop_node(self): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop() except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert_equal(return_code, 0) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until(self.is_node_stopped, timeout=timeout) def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. This causes terracashd to shutdown, so this method takes care of cleaning up resources.""" self.encryptwallet(passphrase) self.wait_until_stopped() class TestNodeCLI(): """Interface to terracash-cli for an individual node""" def __init__(self, binary, datadir): self.args = [] self.binary = binary self.datadir = datadir self.input = None def __call__(self, *args, input=None): # TestNodeCLI is callable with terracash-cli command-line args self.args = [str(arg) for arg in args] self.input = input return self def __getattr__(self, command): def dispatcher(*args, **kwargs): return self.send_cli(command, *args, **kwargs) return dispatcher def send_cli(self, command, *args, **kwargs): """Run terracash-cli command. Deserializes returned string as python object.""" pos_args = [str(arg) for arg in args] named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()] assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same terracash-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.args if named_args: p_args += ["-named"] p_args += [command] + pos_args + named_args process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) return json.loads(cli_stdout, parse_float=decimal.Decimal)
[]
[]
[ "LITECOIND", "LITECOINCLI" ]
[]
["LITECOIND", "LITECOINCLI"]
python
2
0
src/cmd/root.go
/* * Copyright © 2019 – 2021 Red Hat Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "bufio" "errors" "fmt" "io/ioutil" "os" "os/user" "path/filepath" "strings" "syscall" "github.com/containers/toolbox/pkg/podman" "github.com/containers/toolbox/pkg/utils" "github.com/containers/toolbox/pkg/version" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var ( cgroupsVersion int currentUser *user.User executable string executableBase string rootCmd = &cobra.Command{ Use: "toolbox", Short: "Tool for containerized command line environments on Linux", PersistentPreRunE: preRun, RunE: rootRun, Version: version.GetVersion(), } rootFlags struct { assumeYes bool logLevel string logPodman bool verbose int } workingDirectory string ) func Execute() { if err := setUpGlobals(); err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err) os.Exit(1) } if err := rootCmd.Execute(); err != nil { os.Exit(1) } os.Exit(0) } func init() { persistentFlags := rootCmd.PersistentFlags() persistentFlags.BoolVarP(&rootFlags.assumeYes, "assumeyes", "y", false, "Automatically answer yes for all questions") persistentFlags.StringVar(&rootFlags.logLevel, "log-level", "error", "Log messages at the specified level: trace, debug, info, warn, error, fatal or panic") persistentFlags.BoolVar(&rootFlags.logPodman, "log-podman", false, "Show the log output of Podman. The log level is handled by the log-level option") persistentFlags.CountVarP(&rootFlags.verbose, "verbose", "v", "Set log-level to 'debug'") rootCmd.SetHelpFunc(rootHelp) rootCmd.SetUsageFunc(rootUsage) } func preRun(cmd *cobra.Command, args []string) error { cmd.Root().SilenceUsage = true if err := setUpLoggers(); err != nil { return err } logrus.Debugf("Running as real user ID %s", currentUser.Uid) logrus.Debugf("Resolved absolute path to the executable as %s", executable) if !utils.IsInsideContainer() { logrus.Debugf("Running on a cgroups v%d host", cgroupsVersion) if currentUser.Uid != "0" { logrus.Debugf("Checking if /etc/subgid and /etc/subuid have entries for user %s", currentUser.Username) if _, err := validateSubIDFile("/etc/subuid"); err != nil { return newSubIDFileError() } if _, err := validateSubIDFile("/etc/subgid"); err != nil { return newSubIDFileError() } } } toolboxPath := os.Getenv("TOOLBOX_PATH") if toolboxPath == "" { if utils.IsInsideContainer() { return errors.New("TOOLBOX_PATH not set") } os.Setenv("TOOLBOX_PATH", executable) toolboxPath = os.Getenv("TOOLBOX_PATH") } logrus.Debugf("TOOLBOX_PATH is %s", toolboxPath) if err := migrate(); err != nil { return err } return nil } func rootHelp(cmd *cobra.Command, args []string) { if utils.IsInsideContainer() { if !utils.IsInsideToolboxContainer() { fmt.Fprintf(os.Stderr, "Error: this is not a toolbox container\n") return } if _, err := utils.ForwardToHost(); err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err) return } return } manual := "toolbox" for _, arg := range args { if !strings.HasPrefix(arg, "-") { manual = manual + "-" + arg break } } if err := utils.ShowManual(manual); err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err) return } } func rootRun(cmd *cobra.Command, args []string) error { if len(args) != 0 { panic("unexpected argument: commands known or unknown shouldn't reach here") } if utils.IsInsideContainer() { if !utils.IsInsideToolboxContainer() { return errors.New("this is not a toolbox container") } if _, err := utils.ForwardToHost(); err != nil { return err } return nil } image, release, err := utils.ResolveImageName("", "", "") if err != nil { return err } container, err := utils.ResolveContainerName("", image, release) if err != nil { return err } userShell := os.Getenv("SHELL") if userShell == "" { return errors.New("failed to get the current user's default shell") } command := []string{userShell, "-l"} hostID, err := utils.GetHostID() if err != nil { return fmt.Errorf("failed to get the host ID: %w", err) } hostVariantID, err := utils.GetHostVariantID() if err != nil { return errors.New("failed to get the host VARIANT_ID") } var emitEscapeSequence bool if hostID == "fedora" && (hostVariantID == "silverblue" || hostVariantID == "workstation") { emitEscapeSequence = true } if err := runCommand(container, true, image, release, command, emitEscapeSequence, true, false); err != nil { return err } return nil } func rootUsage(cmd *cobra.Command) error { err := fmt.Errorf("Run '%s --help' for usage.", executableBase) fmt.Fprintf(os.Stderr, "%s", err) return err } func migrate() error { logrus.Debug("Migrating to newer Podman") if utils.IsInsideContainer() { return nil } configDir, err := os.UserConfigDir() if err != nil { logrus.Debugf("Migrating to newer Podman: failed to get the user config directory: %s", err) return errors.New("failed to get the user config directory") } toolboxConfigDir := configDir + "/toolbox" stampPath := toolboxConfigDir + "/podman-system-migrate" logrus.Debugf("Toolbox config directory is %s", toolboxConfigDir) podmanVersion, err := podman.GetVersion() if err != nil { logrus.Debugf("Migrating to newer Podman: failed to get the Podman version: %s", err) return errors.New("failed to get the Podman version") } logrus.Debugf("Current Podman version is %s", podmanVersion) err = os.MkdirAll(toolboxConfigDir, 0775) if err != nil { logrus.Debugf("Migrating to newer Podman: failed to create configuration directory %s: %s", toolboxConfigDir, err) return errors.New("failed to create configuration directory") } toolboxRuntimeDirectory, err := utils.GetRuntimeDirectory(currentUser) if err != nil { return err } migrateLock := toolboxRuntimeDirectory + "/migrate.lock" migrateLockFile, err := os.Create(migrateLock) if err != nil { logrus.Debugf("Migrating to newer Podman: failed to create migration lock file %s: %s", migrateLock, err) return errors.New("failed to create migration lock file") } defer migrateLockFile.Close() migrateLockFD := migrateLockFile.Fd() migrateLockFDInt := int(migrateLockFD) if err := syscall.Flock(migrateLockFDInt, syscall.LOCK_EX); err != nil { logrus.Debugf("Migrating to newer Podman: failed to acquire migration lock on %s: %s", migrateLock, err) return errors.New("failed to acquire migration lock") } stampBytes, err := ioutil.ReadFile(stampPath) if err != nil { if !os.IsNotExist(err) { logrus.Debugf("Migrating to newer Podman: failed to read migration stamp file %s: %s", stampPath, err) return errors.New("failed to read migration stamp file") } } else { stampString := string(stampBytes) podmanVersionOld := strings.TrimSpace(stampString) if podmanVersionOld != "" { logrus.Debugf("Old Podman version is %s", podmanVersionOld) if podmanVersion == podmanVersionOld { logrus.Debugf("Migration not needed: Podman version %s is unchanged", podmanVersion) return nil } if !podman.CheckVersion(podmanVersionOld) { logrus.Debugf("Migration not needed: Podman version %s is old", podmanVersion) return nil } } } if err = podman.SystemMigrate(""); err != nil { logrus.Debugf("Migrating to newer Podman: failed to migrate containers: %s", err) return errors.New("failed to migrate containers") } logrus.Debugf("Migration to Podman version %s was ok", podmanVersion) logrus.Debugf("Updating Podman version in %s", stampPath) podmanVersionBytes := []byte(podmanVersion + "\n") err = ioutil.WriteFile(stampPath, podmanVersionBytes, 0664) if err != nil { logrus.Debugf("Migrating to newer Podman: failed to update Podman version in migration stamp file %s: %s", stampPath, err) return errors.New("failed to update Podman version in migration stamp file") } return nil } func newSubIDFileError() error { var builder strings.Builder fmt.Fprintf(&builder, "/etc/subgid and /etc/subuid don't have entries for user %s\n", currentUser.Username) fmt.Fprintf(&builder, "See the podman(1), subgid(5), subuid(5) and usermod(8) manuals for more\n") fmt.Fprintf(&builder, "information.") errMsg := builder.String() return errors.New(errMsg) } func setUpGlobals() error { var err error if !utils.IsInsideContainer() { cgroupsVersion, err = utils.GetCgroupsVersion() if err != nil { return fmt.Errorf("failed to get the cgroups version: %w", err) } } currentUser, err = user.Current() if err != nil { return fmt.Errorf("failed to get the current user: %w", err) } executable, err = os.Executable() if err != nil { return fmt.Errorf("failed to get the path to the executable: %w", err) } executable, err = filepath.EvalSymlinks(executable) if err != nil { return fmt.Errorf("failed to resolve absolute path to the executable: %w", err) } executableBase = filepath.Base(executable) workingDirectory, err = os.Getwd() if err != nil { return fmt.Errorf("failed to get the working directory: %w", err) } return nil } func setUpLoggers() error { logrus.SetOutput(os.Stderr) logrus.SetFormatter(&logrus.TextFormatter{ DisableTimestamp: true, }) if rootFlags.verbose > 0 { rootFlags.logLevel = "debug" } logLevel, err := logrus.ParseLevel(rootFlags.logLevel) if err != nil { return fmt.Errorf("failed to parse log-level: %w", err) } logrus.SetLevel(logLevel) if rootFlags.verbose > 1 { rootFlags.logPodman = true } if rootFlags.logPodman { podman.SetLogLevel(logLevel) } return nil } func validateSubIDFile(path string) (bool, error) { logrus.Debugf("Validating sub-ID file %s", path) file, err := os.Open(path) if err != nil { logrus.Debugf("Validating sub-ID file: failed to open %s: %s", path, err) return false, fmt.Errorf("failed to open %s", path) } scanner := bufio.NewScanner(file) scanner.Split(bufio.ScanLines) prefix := currentUser.Username + ":" for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, prefix) { return true, nil } } return false, fmt.Errorf("failed to find an entry for user %s in %s", currentUser.Username, path) }
[ "\"TOOLBOX_PATH\"", "\"TOOLBOX_PATH\"", "\"SHELL\"" ]
[]
[ "SHELL", "TOOLBOX_PATH" ]
[]
["SHELL", "TOOLBOX_PATH"]
go
2
0
python/ray/worker.py
from contextlib import contextmanager import colorama import atexit import faulthandler import hashlib import inspect import io import json import logging import os import redis from six.moves import queue import sys import threading import time import traceback # Ray modules import ray.cloudpickle as pickle import ray.gcs_utils import ray.memory_monitor as memory_monitor import ray.node import ray.parameter import ray.ray_constants as ray_constants import ray.remote_function import ray.serialization as serialization import ray.services as services import ray import setproctitle import ray.signature import ray.state from ray import ( ActorID, JobID, ObjectID, Language, ) from ray import import_thread from ray import profiling from ray.exceptions import ( RayConnectionError, RayError, RayTaskError, ObjectStoreFullError, ) from ray.function_manager import FunctionActorManager from ray.utils import (_random_string, check_oversized_pickle, is_cython, setup_logger, create_and_init_new_worker_log, open_log) SCRIPT_MODE = 0 WORKER_MODE = 1 LOCAL_MODE = 2 ERROR_KEY_PREFIX = b"Error:" # Logger for this module. It should be configured at the entry point # into the program using Ray. Ray provides a default configuration at # entry/init points. logger = logging.getLogger(__name__) class ActorCheckpointInfo: """Information used to maintain actor checkpoints.""" __slots__ = [ # Number of tasks executed since last checkpoint. "num_tasks_since_last_checkpoint", # Timestamp of the last checkpoint, in milliseconds. "last_checkpoint_timestamp", # IDs of the previous checkpoints. "checkpoint_ids", ] def __init__(self, num_tasks_since_last_checkpoint, last_checkpoint_timestamp, checkpoint_ids): self.num_tasks_since_last_checkpoint = num_tasks_since_last_checkpoint self.last_checkpoint_timestamp = last_checkpoint_timestamp self.checkpoint_ids = checkpoint_ids class Worker: """A class used to define the control flow of a worker process. Note: The methods in this class are considered unexposed to the user. The functions outside of this class are considered exposed. Attributes: connected (bool): True if Ray has been started and False otherwise. node (ray.node.Node): The node this worker is attached to. mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and WORKER_MODE. cached_functions_to_run (List): A list of functions to run on all of the workers that should be exported as soon as connect is called. """ def __init__(self): """Initialize a Worker object.""" self.node = None self.mode = None self.cached_functions_to_run = [] self.actor_init_error = None self.actors = {} # Information used to maintain actor checkpoints. self.actor_checkpoint_info = {} self.actor_task_counter = 0 # When the worker is constructed. Record the original value of the # CUDA_VISIBLE_DEVICES environment variable. self.original_gpu_ids = ray.utils.get_cuda_visible_devices() self.memory_monitor = memory_monitor.MemoryMonitor() # A dictionary that maps from driver id to SerializationContext # TODO: clean up the SerializationContext once the job finished. self.serialization_context_map = {} self.function_actor_manager = FunctionActorManager(self) # This event is checked regularly by all of the threads so that they # know when to exit. self.threads_stopped = threading.Event() # Index of the current session. This number will # increment every time when `ray.shutdown` is called. self._session_index = 0 # Functions to run to process the values returned by ray.get. Each # postprocessor must take two arguments ("object_ids", and "values"). self._post_get_hooks = [] @property def connected(self): return self.node is not None @property def node_ip_address(self): self.check_connected() return self.node.node_ip_address @property def load_code_from_local(self): self.check_connected() return self.node.load_code_from_local @property def current_job_id(self): if hasattr(self, "core_worker"): return self.core_worker.get_current_job_id() return JobID.nil() @property def actor_id(self): if hasattr(self, "core_worker"): return self.core_worker.get_actor_id() return ActorID.nil() @property def current_task_id(self): return self.core_worker.get_current_task_id() @property def current_session_and_job(self): """Get the current session index and job id as pair.""" assert isinstance(self._session_index, int) assert isinstance(self.current_job_id, ray.JobID) return self._session_index, self.current_job_id def mark_actor_init_failed(self, error): """Called to mark this actor as failed during initialization.""" self.actor_init_error = error def reraise_actor_init_error(self): """Raises any previous actor initialization error.""" if self.actor_init_error is not None: raise self.actor_init_error def get_serialization_context(self, job_id=None): """Get the SerializationContext of the job that this worker is processing. Args: job_id: The ID of the job that indicates which job to get the serialization context for. Returns: The serialization context of the given job. """ # This function needs to be protected by a lock, because it will be # called by`register_class_for_serialization`, as well as the import # thread, from different threads. Also, this function will recursively # call itself, so we use RLock here. if job_id is None: job_id = self.current_job_id with self.lock: if job_id not in self.serialization_context_map: self.serialization_context_map[ job_id] = serialization.SerializationContext(self) return self.serialization_context_map[job_id] def check_connected(self): """Check if the worker is connected. Raises: Exception: An exception is raised if the worker is not connected. """ if not self.connected: raise RayConnectionError("Ray has not been started yet. You can " "start Ray with 'ray.init()'.") def set_mode(self, mode): """Set the mode of the worker. The mode SCRIPT_MODE should be used if this Worker is a driver that is being run as a Python script or interactively in a shell. It will print information about task failures. The mode WORKER_MODE should be used if this Worker is not a driver. It will not print information about tasks. The mode LOCAL_MODE should be used if this Worker is a driver and if you want to run the driver in a manner equivalent to serial Python for debugging purposes. It will not send remote function calls to the scheduler and will instead execute them in a blocking fashion. Args: mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE. """ self.mode = mode def put_object(self, value, object_id=None, pin_object=True): """Put value in the local object store with object id `objectid`. This assumes that the value for `objectid` has not yet been placed in the local object store. If the plasma store is full, the worker will automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each retry will delay for an exponentially doubling amount of time, starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception will be raised. Args: value: The value to put in the object store. object_id (object_id.ObjectID): The object ID of the value to be put. If None, one will be generated. pin_object: If set, the object will be pinned at the raylet. Returns: object_id.ObjectID: The object ID the object was put under. Raises: ray.exceptions.ObjectStoreFullError: This is raised if the attempt to store the object fails because the object store is full even after multiple retries. """ # Make sure that the value is not an object ID. if isinstance(value, ObjectID): raise TypeError( "Calling 'put' on an ray.ObjectID is not allowed " "(similarly, returning an ray.ObjectID from a remote " "function is not allowed). If you really want to " "do this, you can wrap the ray.ObjectID in a list and " "call 'put' on it (or return it).") if self.mode == LOCAL_MODE: assert object_id is None, ("Local Mode does not support " "inserting with an objectID") serialized_value = self.get_serialization_context().serialize(value) # This *must* be the first place that we construct this python # ObjectID because an entry with 0 local references is created when # the object is Put() in the core worker, expecting that this python # reference will be created. If another reference is created and # removed before this one, it will corrupt the state in the # reference counter. return ray.ObjectID( self.core_worker.put_serialized_object( serialized_value, object_id=object_id, pin_object=pin_object)) def deserialize_objects(self, data_metadata_pairs, object_ids): context = self.get_serialization_context() return context.deserialize_objects(data_metadata_pairs, object_ids) def get_objects(self, object_ids, timeout=None): """Get the values in the object store associated with the IDs. Return the values from the local object store for object_ids. This will block until all the values for object_ids have been written to the local object store. Args: object_ids (List[object_id.ObjectID]): A list of the object IDs whose values should be retrieved. timeout (float): timeout (float): The maximum amount of time in seconds to wait before returning. """ # Make sure that the values are object IDs. for object_id in object_ids: if not isinstance(object_id, ObjectID): raise TypeError( "Attempting to call `get` on the value {}, " "which is not an ray.ObjectID.".format(object_id)) timeout_ms = int(timeout * 1000) if timeout else -1 data_metadata_pairs = self.core_worker.get_objects( object_ids, self.current_task_id, timeout_ms) return self.deserialize_objects(data_metadata_pairs, object_ids) def run_function_on_all_workers(self, function, run_on_other_drivers=False): """Run arbitrary code on all of the workers. This function will first be run on the driver, and then it will be exported to all of the workers to be run. It will also be run on any new workers that register later. If ray.init has not been called yet, then cache the function and export it later. Args: function (Callable): The function to run on all of the workers. It takes only one argument, a worker info dict. If it returns anything, its return values will not be used. run_on_other_drivers: The boolean that indicates whether we want to run this function on other drivers. One case is we may need to share objects across drivers. """ # If ray.init has not been called yet, then cache the function and # export it when connect is called. Otherwise, run the function on all # workers. if self.mode is None: self.cached_functions_to_run.append(function) else: # Attempt to pickle the function before we need it. This could # fail, and it is more convenient if the failure happens before we # actually run the function locally. pickled_function = pickle.dumps(function) function_to_run_id = hashlib.sha1(pickled_function).digest() key = b"FunctionsToRun:" + function_to_run_id # First run the function on the driver. # We always run the task locally. function({"worker": self}) # Check if the function has already been put into redis. function_exported = self.redis_client.setnx(b"Lock:" + key, 1) if not function_exported: # In this case, the function has already been exported, so # we don't need to export it again. return check_oversized_pickle(pickled_function, function.__name__, "function", self) # Run the function on all workers. self.redis_client.hmset( key, { "job_id": self.current_job_id.binary(), "function_id": function_to_run_id, "function": pickled_function, "run_on_other_drivers": str(run_on_other_drivers), }) self.redis_client.rpush("Exports", key) # TODO(rkn): If the worker fails after it calls setnx and before it # successfully completes the hmset and rpush, then the program will # most likely hang. This could be fixed by making these three # operations into a transaction (or by implementing a custom # command that does all three things). def main_loop(self): """The main loop a worker runs to receive and execute tasks.""" def sigterm_handler(signum, frame): shutdown(True) sys.exit(1) ray.utils.set_sigterm_handler(sigterm_handler) self.core_worker.run_task_loop() sys.exit(0) def get_gpu_ids(): """Get the IDs of the GPUs that are available to the worker. If the CUDA_VISIBLE_DEVICES environment variable was set when the worker started up, then the IDs returned by this method will be a subset of the IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range [0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has. Returns: A list of GPU IDs. """ # TODO(ilr) Handle inserting resources in local mode all_resource_ids = global_worker.core_worker.resource_ids() assigned_ids = [ resource_id for resource_id, _ in all_resource_ids.get("GPU", []) ] # If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in # the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be # returned). if global_worker.original_gpu_ids is not None: assigned_ids = [ global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids ] # Give all GPUs in local_mode. if global_worker.mode == LOCAL_MODE: max_gpus = global_worker.node.get_resource_spec().num_gpus return global_worker.original_gpu_ids[:max_gpus] return assigned_ids def get_resource_ids(): """Get the IDs of the resources that are available to the worker. Returns: A dictionary mapping the name of a resource to a list of pairs, where each pair consists of the ID of a resource and the fraction of that resource reserved for this worker. """ if _mode() == LOCAL_MODE: raise RuntimeError("ray.get_resource_ids() currently does not work in " "local_mode.") return global_worker.core_worker.resource_ids() def get_webui_url(): """Get the URL to access the web UI. Note that the URL does not specify which node the web UI is on. Returns: The URL of the web UI as a string. """ if _global_node is None: raise RuntimeError("Ray has not been initialized/connected.") return _global_node.webui_url global_worker = Worker() """Worker: The global Worker object for this worker process. We use a global Worker object to ensure that there is a single worker object per worker process. """ _global_node = None """ray.node.Node: The global node object that is created by ray.init().""" def print_failed_task(task_status): """Print information about failed tasks. Args: task_status (Dict): A dictionary containing the name, operationid, and error message for a failed task. """ logger.error(""" Error: Task failed Function Name: {} Task ID: {} Error Message: \n{} """.format(task_status["function_name"], task_status["operationid"], task_status["error_message"])) def init(address=None, redis_address=None, redis_port=None, num_cpus=None, num_gpus=None, memory=None, object_store_memory=None, resources=None, driver_object_store_memory=None, redis_max_memory=None, log_to_driver=True, node_ip_address=ray_constants.NODE_DEFAULT_IP, object_id_seed=None, local_mode=False, redirect_worker_output=None, redirect_output=None, ignore_reinit_error=False, num_redis_shards=None, redis_max_clients=None, redis_password=ray_constants.REDIS_DEFAULT_PASSWORD, plasma_directory=None, huge_pages=False, include_java=False, include_dashboard=None, dashboard_host="localhost", dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT, job_id=None, configure_logging=True, logging_level=logging.INFO, logging_format=ray_constants.LOGGER_FORMAT, plasma_store_socket_name=None, raylet_socket_name=None, temp_dir=None, load_code_from_local=False, java_worker_options=None, use_pickle=True, _internal_config=None, lru_evict=False): """ Connect to an existing Ray cluster or start one and connect to it. This method handles two cases; either a Ray cluster already exists and we just attach this driver to it or we start all of the processes associated with a Ray cluster and attach to the newly started cluster. To start Ray and all of the relevant processes, use this as follows: .. code-block:: python ray.init() To connect to an existing Ray cluster, use this as follows (substituting in the appropriate address): .. code-block:: python ray.init(address="123.45.67.89:6379") You can also define an environment variable called `RAY_ADDRESS` in the same format as the `address` parameter to connect to an existing cluster with ray.init(). Args: address (str): The address of the Ray cluster to connect to. If this address is not provided, then this command will start Redis, a raylet, a plasma store, a plasma manager, and some workers. It will also kill these processes when Python exits. If the driver is running on a node in a Ray cluster, using `auto` as the value tells the driver to detect the the cluster, removing the need to specify a specific node address. redis_address (str): Deprecated; same as address. redis_port (int): The port that the primary Redis shard should listen to. If None, then a random port will be chosen. num_cpus (int): Number of CPUs the user wishes to assign to each raylet. num_gpus (int): Number of GPUs the user wishes to assign to each raylet. resources: A dictionary mapping the names of custom resources to the quantities for them available. memory: The amount of memory (in bytes) that is available for use by workers requesting memory resources. By default, this is automatically set based on available system memory. object_store_memory: The amount of memory (in bytes) to start the object store with. By default, this is automatically set based on available system memory, subject to a 20GB cap. redis_max_memory: The max amount of memory (in bytes) to allow each redis shard to use. Once the limit is exceeded, redis will start LRU eviction of entries. This only applies to the sharded redis tables (task, object, and profile tables). By default, this is autoset based on available system memory, subject to a 10GB cap. log_to_driver (bool): If true, the output from all of the worker processes on all nodes will be directed to the driver. node_ip_address (str): The IP address of the node that we are on. object_id_seed (int): Used to seed the deterministic generation of object IDs. The same value can be used across multiple runs of the same driver in order to generate the object IDs in a consistent manner. However, the same ID should not be used for different drivers. local_mode (bool): If true, the code will be executed serially. This is useful for debugging. driver_object_store_memory (int): Limit the amount of memory the driver can use in the object store for creating objects. By default, this is autoset based on available system memory, subject to a 20GB cap. ignore_reinit_error: If true, Ray suppresses errors from calling ray.init() a second time. Ray won't be restarted. num_redis_shards: The number of Redis shards to start in addition to the primary Redis shard. redis_max_clients: If provided, attempt to configure Redis with this maxclients number. redis_password (str): Prevents external clients without the password from connecting to Redis if provided. plasma_directory: A directory where the Plasma memory mapped files will be created. huge_pages: Boolean flag indicating whether to start the Object Store with hugetlbfs support. Requires plasma_directory. include_java: Boolean flag indicating whether or not to enable java workers. include_dashboard: Boolean flag indicating whether or not to start the Ray dashboard, which displays the status of the Ray cluster. If this argument is None, then the UI will be started if the relevant dependencies are present. dashboard_host: The host to bind the dashboard server to. Can either be localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces). By default, this is set to localhost to prevent access from external machines. dashboard_port: The port to bind the dashboard server to. Defaults to 8265. job_id: The ID of this job. configure_logging: True (default) if configuration of logging is allowed here. Otherwise, the user may want to configure it separately. logging_level: Logging level, defaults to logging.INFO. Ignored unless "configure_logging" is true. logging_format: Logging format, defaults to string containing a timestamp, filename, line number, and message. See the source file ray_constants.py for details. Ignored unless "configure_logging" is true. plasma_store_socket_name (str): If provided, specifies the socket name used by the plasma store. raylet_socket_name (str): If provided, specifies the socket path used by the raylet process. temp_dir (str): If provided, specifies the root temporary directory for the Ray process. Defaults to an OS-specific conventional location, e.g., "/tmp/ray". load_code_from_local: Whether code should be loaded from a local module or from the GCS. java_worker_options: Overwrite the options to start Java workers. use_pickle: Deprecated. _internal_config (str): JSON configuration for overriding RayConfig defaults. For testing purposes ONLY. lru_evict (bool): If True, when an object store is full, it will evict objects in LRU order to make more space and when under memory pressure, ray.UnreconstructableError may be thrown. If False, then reference counting will be used to decide which objects are safe to evict and when under memory pressure, ray.ObjectStoreFullError may be thrown. Returns: Address information about the started processes. Raises: Exception: An exception is raised if an inappropriate combination of arguments is passed in. """ if not use_pickle: raise DeprecationWarning("The use_pickle argument is deprecated.") if redis_address is not None: raise DeprecationWarning("The redis_address argument is deprecated. " "Please use address instead.") if "RAY_ADDRESS" in os.environ: if redis_address is None and (address is None or address == "auto"): address = os.environ["RAY_ADDRESS"] else: raise RuntimeError( "Cannot use both the RAY_ADDRESS environment variable and " "the address argument of ray.init simultaneously. If you " "use RAY_ADDRESS to connect to a specific Ray cluster, " "please call ray.init() or ray.init(address=\"auto\") on the " "driver.") if redis_address is not None or address is not None: redis_address, _, _ = services.validate_redis_address( address, redis_address) if configure_logging: setup_logger(logging_level, logging_format) if local_mode: driver_mode = LOCAL_MODE else: driver_mode = SCRIPT_MODE if global_worker.connected: if ignore_reinit_error: logger.error("Calling ray.init() again after it has already been " "called.") return else: raise RuntimeError("Maybe you called ray.init twice by accident? " "This error can be suppressed by passing in " "'ignore_reinit_error=True' or by calling " "'ray.shutdown()' prior to 'ray.init()'.") # Convert hostnames to numerical IP address. if node_ip_address is not None: node_ip_address = services.address_to_ip(node_ip_address) raylet_ip_address = node_ip_address _internal_config = (json.loads(_internal_config) if _internal_config else {}) # Set the internal config options for LRU eviction. if lru_evict: # Turn off object pinning. if _internal_config.get("object_pinning_enabled", False): raise Exception( "Object pinning cannot be enabled if using LRU eviction.") _internal_config["object_pinning_enabled"] = False _internal_config["object_store_full_max_retries"] = -1 _internal_config["free_objects_period_milliseconds"] = 1000 global _global_node if redis_address is None: # In this case, we need to start a new cluster. ray_params = ray.parameter.RayParams( redis_address=redis_address, redis_port=redis_port, node_ip_address=node_ip_address, raylet_ip_address=raylet_ip_address, object_id_seed=object_id_seed, driver_mode=driver_mode, redirect_worker_output=redirect_worker_output, redirect_output=redirect_output, num_cpus=num_cpus, num_gpus=num_gpus, resources=resources, num_redis_shards=num_redis_shards, redis_max_clients=redis_max_clients, redis_password=redis_password, plasma_directory=plasma_directory, huge_pages=huge_pages, include_java=include_java, include_dashboard=include_dashboard, dashboard_host=dashboard_host, dashboard_port=dashboard_port, memory=memory, object_store_memory=object_store_memory, redis_max_memory=redis_max_memory, plasma_store_socket_name=plasma_store_socket_name, raylet_socket_name=raylet_socket_name, temp_dir=temp_dir, load_code_from_local=load_code_from_local, java_worker_options=java_worker_options, _internal_config=_internal_config, ) # Start the Ray processes. We set shutdown_at_exit=False because we # shutdown the node in the ray.shutdown call that happens in the atexit # handler. We still spawn a reaper process in case the atexit handler # isn't called. _global_node = ray.node.Node( head=True, shutdown_at_exit=False, spawn_reaper=True, ray_params=ray_params) else: # In this case, we are connecting to an existing cluster. if num_cpus is not None or num_gpus is not None: raise ValueError( "When connecting to an existing cluster, num_cpus " "and num_gpus must not be provided.") if resources is not None: raise ValueError("When connecting to an existing cluster, " "resources must not be provided.") if num_redis_shards is not None: raise ValueError("When connecting to an existing cluster, " "num_redis_shards must not be provided.") if redis_max_clients is not None: raise ValueError("When connecting to an existing cluster, " "redis_max_clients must not be provided.") if memory is not None: raise ValueError("When connecting to an existing cluster, " "memory must not be provided.") if object_store_memory is not None: raise ValueError("When connecting to an existing cluster, " "object_store_memory must not be provided.") if redis_max_memory is not None: raise ValueError("When connecting to an existing cluster, " "redis_max_memory must not be provided.") if plasma_directory is not None: raise ValueError("When connecting to an existing cluster, " "plasma_directory must not be provided.") if huge_pages: raise ValueError("When connecting to an existing cluster, " "huge_pages must not be provided.") if temp_dir is not None: raise ValueError("When connecting to an existing cluster, " "temp_dir must not be provided.") if plasma_store_socket_name is not None: raise ValueError("When connecting to an existing cluster, " "plasma_store_socket_name must not be provided.") if raylet_socket_name is not None: raise ValueError("When connecting to an existing cluster, " "raylet_socket_name must not be provided.") if java_worker_options is not None: raise ValueError("When connecting to an existing cluster, " "java_worker_options must not be provided.") if _internal_config is not None and len(_internal_config) != 0: raise ValueError("When connecting to an existing cluster, " "_internal_config must not be provided.") # In this case, we only need to connect the node. ray_params = ray.parameter.RayParams( node_ip_address=node_ip_address, raylet_ip_address=raylet_ip_address, redis_address=redis_address, redis_password=redis_password, object_id_seed=object_id_seed, temp_dir=temp_dir, load_code_from_local=load_code_from_local, _internal_config=_internal_config) _global_node = ray.node.Node( ray_params, head=False, shutdown_at_exit=False, spawn_reaper=False, connect_only=True) connect( _global_node, mode=driver_mode, log_to_driver=log_to_driver, worker=global_worker, driver_object_store_memory=driver_object_store_memory, job_id=job_id) for hook in _post_init_hooks: hook() return _global_node.address_info # Functions to run as callback after a successful ray init. _post_init_hooks = [] def shutdown(exiting_interpreter=False): """Disconnect the worker, and terminate processes started by ray.init(). This will automatically run at the end when a Python process that uses Ray exits. It is ok to run this twice in a row. The primary use case for this function is to cleanup state between tests. Note that this will clear any remote function definitions, actor definitions, and existing actors, so if you wish to use any previously defined remote functions or actors after calling ray.shutdown(), then you need to redefine them. If they were defined in an imported module, then you will need to reload the module. Args: exiting_interpreter (bool): True if this is called by the atexit hook and false otherwise. If we are exiting the interpreter, we will wait a little while to print any extra error messages. """ if exiting_interpreter and global_worker.mode == SCRIPT_MODE: # This is a duration to sleep before shutting down everything in order # to make sure that log messages finish printing. time.sleep(0.5) disconnect(exiting_interpreter) # We need to destruct the core worker here because after this function, # we will tear down any processes spawned by ray.init() and the background # IO thread in the core worker doesn't currently handle that gracefully. if hasattr(global_worker, "core_worker"): del global_worker.core_worker # Disconnect global state from GCS. ray.state.state.disconnect() # Shut down the Ray processes. global _global_node if _global_node is not None: _global_node.kill_all_processes(check_alive=False, allow_graceful=True) _global_node = None # TODO(rkn): Instead of manually resetting some of the worker fields, we # should simply set "global_worker" to equal "None" or something like that. global_worker.set_mode(None) global_worker._post_get_hooks = [] atexit.register(shutdown, True) # TODO(edoakes): this should only be set in the driver. def sigterm_handler(signum, frame): sys.exit(signum) try: ray.utils.set_sigterm_handler(sigterm_handler) except ValueError: logger.warning("Failed to set SIGTERM handler, processes might" "not be cleaned up properly on exit.") # Define a custom excepthook so that if the driver exits with an exception, we # can push that exception to Redis. normal_excepthook = sys.excepthook def custom_excepthook(type, value, tb): # If this is a driver, push the exception to GCS worker table. if global_worker.mode == SCRIPT_MODE: error_message = "".join(traceback.format_tb(tb)) worker_id = global_worker.worker_id worker_type = ray.gcs_utils.DRIVER worker_info = {"exception": error_message} ray.state.state.add_worker(worker_id, worker_type, worker_info) # Call the normal excepthook. normal_excepthook(type, value, tb) sys.excepthook = custom_excepthook # The last time we raised a TaskError in this process. We use this value to # suppress redundant error messages pushed from the workers. last_task_error_raise_time = 0 # The max amount of seconds to wait before printing out an uncaught error. UNCAUGHT_ERROR_GRACE_PERIOD = 5 def _set_log_file(file_name, worker_pid, old_obj, setter_func): # Line-buffer the output (mode 1). f = create_and_init_new_worker_log(file_name, worker_pid) # TODO (Alex): Python seems to always flush when writing. If that is no # longer true, then we need to manually flush the old buffer. # old_obj.flush() # TODO (Alex): Flush the c/c++ userspace buffers if necessary. # `fflush(stdout); cout.flush();` fileno = old_obj.fileno() # C++ logging requires redirecting the stdout file descriptor. Note that # dup2 will automatically close the old file descriptor before overriding # it. os.dup2(f.fileno(), fileno) # We also manually set sys.stdout and sys.stderr because that seems to # have an effect on the output buffering. Without doing this, stdout # and stderr are heavily buffered resulting in seemingly lost logging # statements. We never want to close the stdout file descriptor, dup2 will # close it when necessary and we don't want python's GC to close it. setter_func(open_log(fileno, closefd=False)) return os.path.abspath(f.name) def set_log_file(stdout_name, stderr_name): """Sets up logging for the current worker, creating the (fd backed) file and flushing buffers as is necessary. Args: stdout_name (str): The file name that stdout should be written to. stderr_name(str): The file name that stderr should be written to. Returns: (tuple) The absolute paths of the files that stdout and stderr will be written to. """ stdout_path = "" stderr_path = "" worker_pid = os.getpid() # lambda cannot contain assignment def stdout_setter(x): sys.stdout = x def stderr_setter(x): sys.stderr = x if stdout_name: _set_log_file(stdout_name, worker_pid, sys.stdout, stdout_setter) # The stderr case should be analogous to the stdout case if stderr_name: _set_log_file(stderr_name, worker_pid, sys.stderr, stderr_setter) return stdout_path, stderr_path def print_logs(redis_client, threads_stopped, job_id): """Prints log messages from workers on all of the nodes. Args: redis_client: A client to the primary Redis shard. threads_stopped (threading.Event): A threading event used to signal to the thread that it should exit. job_id (JobID): The id of the driver's job """ pubsub_client = redis_client.pubsub(ignore_subscribe_messages=True) pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL) localhost = services.get_node_ip_address() try: # Keep track of the number of consecutive log messages that have been # received with no break in between. If this number grows continually, # then the worker is probably not able to process the log messages as # rapidly as they are coming in. num_consecutive_messages_received = 0 while True: # Exit if we received a signal that we should stop. if threads_stopped.is_set(): return msg = pubsub_client.get_message() if msg is None: num_consecutive_messages_received = 0 threads_stopped.wait(timeout=0.01) continue num_consecutive_messages_received += 1 if (num_consecutive_messages_received % 100 == 0 and num_consecutive_messages_received > 0): logger.warning( "The driver may not be able to keep up with the " "stdout/stderr of the workers. To avoid forwarding logs " "to the driver, use 'ray.init(log_to_driver=False)'.") data = json.loads(ray.utils.decode(msg["data"])) # Don't show logs from other drivers. if data["job"] and ray.utils.binary_to_hex( job_id.binary()) != data["job"]: continue def color_for(data): if data["pid"] == "raylet": return colorama.Fore.YELLOW else: return colorama.Fore.CYAN if data["ip"] == localhost: for line in data["lines"]: print("{}{}(pid={}){} {}".format( colorama.Style.DIM, color_for(data), data["pid"], colorama.Style.RESET_ALL, line)) else: for line in data["lines"]: print("{}{}(pid={}, ip={}){} {}".format( colorama.Style.DIM, color_for(data), data["pid"], data["ip"], colorama.Style.RESET_ALL, line)) except (OSError, redis.exceptions.ConnectionError) as e: logger.error("print_logs: {}".format(e)) finally: # Close the pubsub client to avoid leaking file descriptors. pubsub_client.close() def print_error_messages_raylet(task_error_queue, threads_stopped): """Prints message received in the given output queue. This checks periodically if any un-raised errors occured in the background. Args: task_error_queue (queue.Queue): A queue used to receive errors from the thread that listens to Redis. threads_stopped (threading.Event): A threading event used to signal to the thread that it should exit. """ while True: # Exit if we received a signal that we should stop. if threads_stopped.is_set(): return try: error, t = task_error_queue.get(block=False) except queue.Empty: threads_stopped.wait(timeout=0.01) continue # Delay errors a little bit of time to attempt to suppress redundant # messages originating from the worker. while t + UNCAUGHT_ERROR_GRACE_PERIOD > time.time(): threads_stopped.wait(timeout=1) if threads_stopped.is_set(): break if t < last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD: logger.debug("Suppressing error from worker: {}".format(error)) else: logger.error( "Possible unhandled error from worker: {}".format(error)) def listen_error_messages_raylet(worker, task_error_queue, threads_stopped): """Listen to error messages in the background on the driver. This runs in a separate thread on the driver and pushes (error, time) tuples to the output queue. Args: worker: The worker class that this thread belongs to. task_error_queue (queue.Queue): A queue used to communicate with the thread that prints the errors found by this thread. threads_stopped (threading.Event): A threading event used to signal to the thread that it should exit. """ worker.error_message_pubsub_client = worker.redis_client.pubsub( ignore_subscribe_messages=True) # Exports that are published after the call to # error_message_pubsub_client.subscribe and before the call to # error_message_pubsub_client.listen will still be processed in the loop. # Really we should just subscribe to the errors for this specific job. # However, currently all errors seem to be published on the same channel. error_pubsub_channel = str( ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB")).encode("ascii") worker.error_message_pubsub_client.subscribe(error_pubsub_channel) # worker.error_message_pubsub_client.psubscribe("*") try: # Get the errors that occurred before the call to subscribe. error_messages = ray.errors() for error_message in error_messages: logger.error(error_message) while True: # Exit if we received a signal that we should stop. if threads_stopped.is_set(): return msg = worker.error_message_pubsub_client.get_message() if msg is None: threads_stopped.wait(timeout=0.01) continue gcs_entry = ray.gcs_utils.GcsEntry.FromString(msg["data"]) assert len(gcs_entry.entries) == 1 error_data = ray.gcs_utils.ErrorTableData.FromString( gcs_entry.entries[0]) job_id = error_data.job_id if job_id not in [ worker.current_job_id.binary(), JobID.nil().binary(), ]: continue error_message = error_data.error_message if (error_data.type == ray_constants.TASK_PUSH_ERROR): # Delay it a bit to see if we can suppress it task_error_queue.put((error_message, time.time())) else: logger.warning(error_message) except (OSError, redis.exceptions.ConnectionError) as e: logger.error("listen_error_messages_raylet: {}".format(e)) finally: # Close the pubsub client to avoid leaking file descriptors. worker.error_message_pubsub_client.close() def is_initialized(): """Check if ray.init has been called yet. Returns: True if ray.init has already been called and false otherwise. """ return ray.worker.global_worker.connected def connect(node, mode=WORKER_MODE, log_to_driver=False, worker=global_worker, driver_object_store_memory=None, job_id=None): """Connect this worker to the raylet, to Plasma, and to Redis. Args: node (ray.node.Node): The node to connect. mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE. log_to_driver (bool): If true, then output from all of the worker processes on all nodes will be directed to the driver. worker: The ray.Worker instance. driver_object_store_memory: Limit the amount of memory the driver can use in the object store when creating objects. job_id: The ID of job. If it's None, then we will generate one. """ # Do some basic checking to make sure we didn't call ray.init twice. error_message = "Perhaps you called ray.init twice by accident?" assert not worker.connected, error_message assert worker.cached_functions_to_run is not None, error_message # Enable nice stack traces on SIGSEGV etc. try: if not faulthandler.is_enabled(): faulthandler.enable(all_threads=False) except io.UnsupportedOperation: pass # ignore # Create a Redis client to primary. # The Redis client can safely be shared between threads. However, # that is not true of Redis pubsub clients. See the documentation at # https://github.com/andymccurdy/redis-py#thread-safety. worker.redis_client = node.create_redis_client() # Initialize some fields. if mode is WORKER_MODE: # We should not specify the job_id if it's `WORKER_MODE`. assert job_id is None job_id = JobID.nil() # TODO(qwang): Rename this to `worker_id_str` or type to `WorkerID` worker.worker_id = _random_string() else: # This is the code path of driver mode. if job_id is None: # TODO(qwang): use `GcsClient::GenerateJobId()` here. job_id = JobID.from_int( int(worker.redis_client.incr("JobCounter"))) # When tasks are executed on remote workers in the context of multiple # drivers, the current job ID is used to keep track of which job is # responsible for the task so that error messages will be propagated to # the correct driver. worker.worker_id = ray.utils.compute_driver_id_from_job( job_id).binary() if mode is not SCRIPT_MODE and setproctitle: setproctitle.setproctitle("ray::IDLE") if not isinstance(job_id, JobID): raise TypeError("The type of given job id must be JobID.") # All workers start out as non-actors. A worker can be turned into an actor # after it is created. worker.node = node worker.set_mode(mode) # For driver's check that the version information matches the version # information that the Ray cluster was started with. try: ray.services.check_version_info(worker.redis_client) except Exception as e: if mode == SCRIPT_MODE: raise e elif mode == WORKER_MODE: traceback_str = traceback.format_exc() ray.utils.push_error_to_driver_through_redis( worker.redis_client, ray_constants.VERSION_MISMATCH_PUSH_ERROR, traceback_str, job_id=None) worker.lock = threading.RLock() driver_name = "" log_stdout_file_path = "" log_stderr_file_path = "" if mode == SCRIPT_MODE: import __main__ as main driver_name = (main.__file__ if hasattr(main, "__file__") else "INTERACTIVE MODE") elif mode == WORKER_MODE: # Check the RedirectOutput key in Redis and based on its value redirect # worker output and error to their own files. # This key is set in services.py when Redis is started. redirect_worker_output_val = worker.redis_client.get("RedirectOutput") if (redirect_worker_output_val is not None and int(redirect_worker_output_val) == 1): log_stdout_file_name, log_stderr_file_name = ( node.get_job_redirected_log_file(worker.worker_id)) try: log_stdout_file_path, log_stderr_file_path = \ set_log_file(log_stdout_file_name, log_stderr_file_name) except IOError: raise IOError( "Workers must be able to redirect their output at" "the file descriptor level.") elif not LOCAL_MODE: raise ValueError( "Invalid worker mode. Expected DRIVER, WORKER or LOCAL.") # TODO (Alex): `current_logging_job` tracks the current job so that we know # when to switch log files. If all logging functionaility was moved to c++, # the functionaility in `_raylet.pyx::switch_worker_log_if_necessary` could # be moved to `CoreWorker::SetCurrentTaskId()`. worker.current_logging_job_id = None redis_address, redis_port = node.redis_address.split(":") gcs_options = ray._raylet.GcsClientOptions( redis_address, int(redis_port), node.redis_password, ) worker.core_worker = ray._raylet.CoreWorker( (mode == SCRIPT_MODE or mode == LOCAL_MODE), node.plasma_store_socket_name, node.raylet_socket_name, job_id, gcs_options, node.get_logs_dir_path(), node.node_ip_address, node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE), driver_name, log_stdout_file_path, log_stderr_file_path, ) # Create an object for interfacing with the global state. # Note, global state should be intialized after `CoreWorker`, because it # will use glog, which is intialized in `CoreWorker`. ray.state.state._initialize_global_state( node.redis_address, redis_password=node.redis_password) if driver_object_store_memory is not None: worker.core_worker.set_object_store_client_options( "ray_driver_{}".format(os.getpid()), driver_object_store_memory) # Put something in the plasma store so that subsequent plasma store # accesses will be faster. Currently the first access is always slow, and # we don't want the user to experience this. if mode != LOCAL_MODE: temporary_object_id = ray.ObjectID.from_random() worker.put_object(1, object_id=temporary_object_id) ray.internal.free([temporary_object_id]) # Start the import thread worker.import_thread = import_thread.ImportThread(worker, mode, worker.threads_stopped) worker.import_thread.start() # If this is a driver running in SCRIPT_MODE, start a thread to print error # messages asynchronously in the background. Ideally the scheduler would # push messages to the driver's worker service, but we ran into bugs when # trying to properly shutdown the driver's worker service, so we are # temporarily using this implementation which constantly queries the # scheduler for new error messages. if mode == SCRIPT_MODE: q = queue.Queue() worker.listener_thread = threading.Thread( target=listen_error_messages_raylet, name="ray_listen_error_messages", args=(worker, q, worker.threads_stopped)) worker.printer_thread = threading.Thread( target=print_error_messages_raylet, name="ray_print_error_messages", args=(q, worker.threads_stopped)) worker.listener_thread.daemon = True worker.listener_thread.start() worker.printer_thread.daemon = True worker.printer_thread.start() if log_to_driver: worker.logger_thread = threading.Thread( target=print_logs, name="ray_print_logs", args=(worker.redis_client, worker.threads_stopped, job_id)) worker.logger_thread.daemon = True worker.logger_thread.start() if mode == SCRIPT_MODE: # Add the directory containing the script that is running to the Python # paths of the workers. Also add the current directory. Note that this # assumes that the directory structures on the machines in the clusters # are the same. script_directory = os.path.abspath(os.path.dirname(sys.argv[0])) current_directory = os.path.abspath(os.path.curdir) worker.run_function_on_all_workers( lambda worker_info: sys.path.insert(1, script_directory)) worker.run_function_on_all_workers( lambda worker_info: sys.path.insert(1, current_directory)) # TODO(rkn): Here we first export functions to run, then remote # functions. The order matters. For example, one of the functions to # run may set the Python path, which is needed to import a module used # to define a remote function. We may want to change the order to # simply be the order in which the exports were defined on the driver. # In addition, we will need to retain the ability to decide what the # first few exports are (mostly to set the Python path). Additionally, # note that the first exports to be defined on the driver will be the # ones defined in separate modules that are imported by the driver. # Export cached functions_to_run. for function in worker.cached_functions_to_run: worker.run_function_on_all_workers(function) worker.cached_functions_to_run = None def disconnect(exiting_interpreter=False): """Disconnect this worker from the raylet and object store.""" # Reset the list of cached remote functions and actors so that if more # remote functions or actors are defined and then connect is called again, # the remote functions will be exported. This is mostly relevant for the # tests. worker = global_worker if worker.connected: # Shutdown all of the threads that we've started. TODO(rkn): This # should be handled cleanly in the worker object's destructor and not # in this disconnect method. worker.threads_stopped.set() if hasattr(worker, "import_thread"): worker.import_thread.join_import_thread() if hasattr(worker, "listener_thread"): worker.listener_thread.join() if hasattr(worker, "printer_thread"): worker.printer_thread.join() if hasattr(worker, "logger_thread"): worker.logger_thread.join() worker.threads_stopped.clear() worker._session_index += 1 worker.node = None # Disconnect the worker from the node. worker.cached_functions_to_run = [] worker.serialization_context_map.clear() try: ray_actor = ray.actor except AttributeError: ray_actor = None # This can occur during program termination if ray_actor is not None: ray_actor.ActorClassMethodMetadata.reset_cache() @contextmanager def _changeproctitle(title, next_title): setproctitle.setproctitle(title) try: yield finally: setproctitle.setproctitle(next_title) def register_custom_serializer(cls, serializer, deserializer, use_pickle=False, use_dict=False, class_id=None): """Registers custom functions for efficient object serialization. The serializer and deserializer are used when transferring objects of `cls` across processes and nodes. This can be significantly faster than the Ray default fallbacks. Wraps `register_custom_serializer` underneath. Args: cls (type): The class that ray should use this custom serializer for. serializer: The custom serializer that takes in a cls instance and outputs a serialized representation. use_pickle and use_dict must be False if provided. deserializer: The custom deserializer that takes in a serialized representation of the cls and outputs a cls instance. use_pickle and use_dict must be False if provided. use_pickle: Deprecated. use_dict: Deprecated. class_id (str): Unique ID of the class. Autogenerated if None. """ worker = global_worker worker.check_connected() if use_pickle: raise DeprecationWarning( "`use_pickle` is no longer a valid parameter and will be removed " "in future versions of Ray. If this breaks your application, " "see `SerializationContext.register_custom_serializer`.") if use_dict: raise DeprecationWarning( "`use_pickle` is no longer a valid parameter and will be removed " "in future versions of Ray. If this breaks your application, " "see `SerializationContext.register_custom_serializer`.") assert serializer is not None and deserializer is not None context = global_worker.get_serialization_context() context.register_custom_serializer( cls, serializer, deserializer, class_id=class_id) def show_in_webui(message, key="", dtype="text"): """Display message in dashboard. Display message for the current task or actor in the dashboard. For example, this can be used to display the status of a long-running computation. Args: message (str): Message to be displayed. key (str): The key name for the message. Multiple message under different keys will be displayed at the same time. Messages under the same key will be overriden. data_type (str): The type of message for rendering. One of the following: text, html. """ worker = global_worker worker.check_connected() acceptable_dtypes = {"text", "html"} assert dtype in acceptable_dtypes, "dtype accepts only: {}".format( acceptable_dtypes) message_wrapped = {"message": message, "dtype": dtype} message_encoded = json.dumps(message_wrapped).encode() worker.core_worker.set_webui_display(key.encode(), message_encoded) # Global varaible to make sure we only send out the warning once blocking_get_inside_async_warned = False def get(object_ids, timeout=None): """Get a remote object or a list of remote objects from the object store. This method blocks until the object corresponding to the object ID is available in the local object store. If this object is not in the local object store, it will be shipped from an object store that has it (once the object has been created). If object_ids is a list, then the objects corresponding to each object in the list will be returned. This method will issue a warning if it's running inside async context, you can use ``await object_id`` instead of ``ray.get(object_id)``. For a list of object ids, you can use ``await asyncio.gather(*object_ids)``. Args: object_ids: Object ID of the object to get or a list of object IDs to get. timeout (Optional[float]): The maximum amount of time in seconds to wait before returning. Returns: A Python object or a list of Python objects. Raises: RayTimeoutError: A RayTimeoutError is raised if a timeout is set and the get takes longer than timeout to return. Exception: An exception is raised if the task that created the object or that created one of the objects raised an exception. """ worker = global_worker worker.check_connected() if hasattr( worker, "core_worker") and worker.core_worker.current_actor_is_asyncio(): global blocking_get_inside_async_warned if not blocking_get_inside_async_warned: logger.debug("Using blocking ray.get inside async actor. " "This blocks the event loop. Please use `await` " "on object id with asyncio.gather if you want to " "yield execution to the event loop instead.") blocking_get_inside_async_warned = True with profiling.profile("ray.get"): is_individual_id = isinstance(object_ids, ray.ObjectID) if is_individual_id: object_ids = [object_ids] if not isinstance(object_ids, list): raise ValueError("'object_ids' must either be an object ID " "or a list of object IDs.") global last_task_error_raise_time # TODO(ujvl): Consider how to allow user to retrieve the ready objects. values = worker.get_objects(object_ids, timeout=timeout) for i, value in enumerate(values): if isinstance(value, RayError): last_task_error_raise_time = time.time() if isinstance(value, ray.exceptions.UnreconstructableError): worker.core_worker.dump_object_store_memory_usage() if isinstance(value, RayTaskError): raise value.as_instanceof_cause() else: raise value # Run post processors. for post_processor in worker._post_get_hooks: values = post_processor(object_ids, values) if is_individual_id: values = values[0] return values def put(value, weakref=False): """Store an object in the object store. The object may not be evicted while a reference to the returned ID exists. Args: value: The Python object to be stored. weakref: If set, allows the object to be evicted while a reference to the returned ID exists. You might want to set this if putting a lot of objects that you might not need in the future. It allows Ray to more aggressively reclaim memory. Returns: The object ID assigned to this value. """ worker = global_worker worker.check_connected() with profiling.profile("ray.put"): try: object_id = worker.put_object(value, pin_object=not weakref) except ObjectStoreFullError: logger.info( "Put failed since the value was either too large or the " "store was full of pinned objects.") raise return object_id # Global variable to make sure we only send out the warning once. blocking_wait_inside_async_warned = False def wait(object_ids, num_returns=1, timeout=None): """Return a list of IDs that are ready and a list of IDs that are not. If timeout is set, the function returns either when the requested number of IDs are ready or when the timeout is reached, whichever occurs first. If it is not set, the function simply waits until that number of objects is ready and returns that exact number of object IDs. This method returns two lists. The first list consists of object IDs that correspond to objects that are available in the object store. The second list corresponds to the rest of the object IDs (which may or may not be ready). Ordering of the input list of object IDs is preserved. That is, if A precedes B in the input list, and both are in the ready list, then A will precede B in the ready list. This also holds true if A and B are both in the remaining list. This method will issue a warning if it's running inside an async context. Instead of ``ray.wait(object_ids)``, you can use ``await asyncio.wait(object_ids)``. Args: object_ids (List[ObjectID]): List of object IDs for objects that may or may not be ready. Note that these IDs must be unique. num_returns (int): The number of object IDs that should be returned. timeout (float): The maximum amount of time in seconds to wait before returning. Returns: A list of object IDs that are ready and a list of the remaining object IDs. """ worker = global_worker if hasattr(worker, "core_worker") and worker.core_worker.current_actor_is_asyncio( ) and timeout != 0: global blocking_wait_inside_async_warned if not blocking_wait_inside_async_warned: logger.debug("Using blocking ray.wait inside async method. " "This blocks the event loop. Please use `await` " "on object id with asyncio.wait. ") blocking_wait_inside_async_warned = True if isinstance(object_ids, ObjectID): raise TypeError("wait() expected a list of ray.ObjectID, got a single " "ray.ObjectID") if not isinstance(object_ids, list): raise TypeError( "wait() expected a list of ray.ObjectID, got {}".format( type(object_ids))) if timeout is not None and timeout < 0: raise ValueError("The 'timeout' argument must be nonnegative. " "Received {}".format(timeout)) for object_id in object_ids: if not isinstance(object_id, ObjectID): raise TypeError("wait() expected a list of ray.ObjectID, " "got list containing {}".format(type(object_id))) worker.check_connected() # TODO(swang): Check main thread. with profiling.profile("ray.wait"): # TODO(rkn): This is a temporary workaround for # https://github.com/ray-project/ray/issues/997. However, it should be # fixed in Arrow instead of here. if len(object_ids) == 0: return [], [] if len(object_ids) != len(set(object_ids)): raise ValueError("Wait requires a list of unique object IDs.") if num_returns <= 0: raise ValueError( "Invalid number of objects to return %d." % num_returns) if num_returns > len(object_ids): raise ValueError("num_returns cannot be greater than the number " "of objects provided to ray.wait.") timeout = timeout if timeout is not None else 10**6 timeout_milliseconds = int(timeout * 1000) ready_ids, remaining_ids = worker.core_worker.wait( object_ids, num_returns, timeout_milliseconds, worker.current_task_id, ) return ready_ids, remaining_ids def get_actor(name): """Get a handle to a detached actor. Gets a handle to a detached actor with the given name. The actor must have been created with Actor.options(name="name").remote(). Returns: ActorHandle to the actor. Raises: ValueError if the named actor does not exist. """ return ray.util.named_actors._get_actor(name) def kill(actor, no_restart=True): """Kill an actor forcefully. This will interrupt any running tasks on the actor, causing them to fail immediately. Any atexit handlers installed in the actor will still be run. If you want to kill the actor but let pending tasks finish, you can call ``actor.__ray_terminate__.remote()`` instead to queue a termination task. If the actor is a detached actor, subsequent calls to get its handle via ray.get_actor will fail. Args: actor (ActorHandle): Handle to the actor to kill. no_restart (bool): Whether or not this actor should be restarted if it's a restartable actor. """ if not isinstance(actor, ray.actor.ActorHandle): raise ValueError("ray.kill() only supported for actors. " "Got: {}.".format(type(actor))) worker = ray.worker.global_worker worker.check_connected() worker.core_worker.kill_actor(actor._ray_actor_id, no_restart) def cancel(object_id, force=False): """Cancels a task according to the following conditions. If the specified task is pending execution, it will not be executed. If the task is currently executing, the behavior depends on the ``force`` flag. When ``force=False``, a KeyboardInterrupt will be raised in Python and when ``force=True``, the executing the task will immediately exit. If the task is already finished, nothing will happen. Only non-actor tasks can be canceled. Canceled tasks will not be retried (max_retries will not be respected). Calling ray.get on a canceled task will raise a RayCancellationError. Args: object_id (ObjectID): ObjectID returned by the task that should be canceled. force (boolean): Whether to force-kill a running task by killing the worker that is running the task. Raises: TypeError: This is also raised for actor tasks. """ worker = ray.worker.global_worker worker.check_connected() if not isinstance(object_id, ray.ObjectID): raise TypeError( "ray.cancel() only supported for non-actor object IDs. " "Got: {}.".format(type(object_id))) return worker.core_worker.cancel_task(object_id, force) def _mode(worker=global_worker): """This is a wrapper around worker.mode. We use this wrapper so that in the remote decorator, we can call _mode() instead of worker.mode. The difference is that when we attempt to serialize remote functions, we don't attempt to serialize the worker object, which cannot be serialized. """ return worker.mode def make_decorator(num_return_vals=None, num_cpus=None, num_gpus=None, memory=None, object_store_memory=None, resources=None, max_calls=None, max_retries=None, max_restarts=None, max_task_retries=None, worker=None): def decorator(function_or_class): if (inspect.isfunction(function_or_class) or is_cython(function_or_class)): # Set the remote function default resources. if max_restarts is not None: raise ValueError("The keyword 'max_restarts' is not " "allowed for remote functions.") if max_task_retries is not None: raise ValueError("The keyword 'max_task_retries' is not " "allowed for remote functions.") return ray.remote_function.RemoteFunction( Language.PYTHON, function_or_class, None, num_cpus, num_gpus, memory, object_store_memory, resources, num_return_vals, max_calls, max_retries) if inspect.isclass(function_or_class): if num_return_vals is not None: raise TypeError("The keyword 'num_return_vals' is not " "allowed for actors.") if max_calls is not None: raise TypeError("The keyword 'max_calls' is not " "allowed for actors.") return ray.actor.make_actor(function_or_class, num_cpus, num_gpus, memory, object_store_memory, resources, max_restarts, max_task_retries) raise TypeError("The @ray.remote decorator must be applied to " "either a function or to a class.") return decorator def remote(*args, **kwargs): """Define a remote function or an actor class. This can be used with no arguments to define a remote function or actor as follows: .. code-block:: python @ray.remote def f(): return 1 @ray.remote class Foo: def method(self): return 1 It can also be used with specific keyword arguments: * **num_return_vals:** This is only for *remote functions*. It specifies the number of object IDs returned by the remote function invocation. * **num_cpus:** The quantity of CPU cores to reserve for this task or for the lifetime of the actor. * **num_gpus:** The quantity of GPUs to reserve for this task or for the lifetime of the actor. * **resources:** The quantity of various custom resources to reserve for this task or for the lifetime of the actor. This is a dictionary mapping strings (resource names) to numbers. * **max_calls:** Only for *remote functions*. This specifies the maximum number of times that a given worker can execute the given remote function before it must exit (this can be used to address memory leaks in third-party libraries or to reclaim resources that cannot easily be released, e.g., GPU memory that was acquired by TensorFlow). By default this is infinite. * **max_restarts**: Only for *actors*. This specifies the maximum number of times that the actor should be restarted when it dies unexpectedly. The minimum valid value is 0 (default), which indicates that the actor doesn't need to be restarted. A value of -1 indicates that an actor should be restarted indefinitely. * **max_task_retries**: Only for *actors*. How many times to retry an actor task if the task fails due to a system error, e.g., the actor has died. If set to -1, the system will retry the failed task until the task succeeds, or the actor has reached its max_restarts limit. If set to n > 0, the system will retry the failed task up to n times, after which the task will throw a `RayActorError` exception upon `ray.get`. Note that Python exceptions are not considered system errors and will not trigger retries. * **max_retries**: Only for *remote functions*. This specifies the maximum number of times that the remote function should be rerun when the worker process executing it crashes unexpectedly. The minimum valid value is 0, the default is 4 (default), and a value of -1 indicates infinite retries. This can be done as follows: .. code-block:: python @ray.remote(num_gpus=1, max_calls=1, num_return_vals=2) def f(): return 1, 2 @ray.remote(num_cpus=2, resources={"CustomResource": 1}) class Foo: def method(self): return 1 Remote task and actor objects returned by @ray.remote can also be dynamically modified with the same arguments as above using ``.options()`` as follows: .. code-block:: python @ray.remote(num_gpus=1, max_calls=1, num_return_vals=2) def f(): return 1, 2 g = f.options(num_gpus=2, max_calls=None) @ray.remote(num_cpus=2, resources={"CustomResource": 1}) class Foo: def method(self): return 1 Bar = Foo.options(num_cpus=1, resources=None) Running remote actors will be terminated when the actor handle to them in Python is deleted, which will cause them to complete any outstanding work and then shut down. If you want to kill them immediately, you can also call ``ray.kill(actor)``. """ worker = global_worker if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): # This is the case where the decorator is just @ray.remote. return make_decorator(worker=worker)(args[0]) # Parse the keyword arguments from the decorator. error_string = ("The @ray.remote decorator must be applied either " "with no arguments and no parentheses, for example " "'@ray.remote', or it must be applied using some of " "the arguments 'num_return_vals', 'num_cpus', 'num_gpus', " "'memory', 'object_store_memory', 'resources', " "'max_calls', or 'max_restarts', like " "'@ray.remote(num_return_vals=2, " "resources={\"CustomResource\": 1})'.") assert len(args) == 0 and len(kwargs) > 0, error_string for key in kwargs: assert key in [ "num_return_vals", "num_cpus", "num_gpus", "memory", "object_store_memory", "resources", "max_calls", "max_restarts", "max_task_retries", "max_retries", ], error_string num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None resources = kwargs.get("resources") if not isinstance(resources, dict) and resources is not None: raise TypeError("The 'resources' keyword argument must be a " "dictionary, but received type {}.".format( type(resources))) if resources is not None: assert "CPU" not in resources, "Use the 'num_cpus' argument." assert "GPU" not in resources, "Use the 'num_gpus' argument." # Handle other arguments. num_return_vals = kwargs.get("num_return_vals") max_calls = kwargs.get("max_calls") max_restarts = kwargs.get("max_restarts") max_task_retries = kwargs.get("max_task_retries") memory = kwargs.get("memory") object_store_memory = kwargs.get("object_store_memory") max_retries = kwargs.get("max_retries") return make_decorator( num_return_vals=num_return_vals, num_cpus=num_cpus, num_gpus=num_gpus, memory=memory, object_store_memory=object_store_memory, resources=resources, max_calls=max_calls, max_restarts=max_restarts, max_task_retries=max_task_retries, max_retries=max_retries, worker=worker)
[]
[]
[ "RAY_ADDRESS" ]
[]
["RAY_ADDRESS"]
python
1
0
tutorials/streamlit_notebooks/healthcare/sparknlp_ner_playground.py
import streamlit as st import pandas as pd import base64 from sparknlp_display import NerVisualizer st.sidebar.image('https://nlp.johnsnowlabs.com/assets/images/logo.png', use_column_width=True) HTML_WRAPPER = """<div style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem">{}</div>""" st.title("Spark NLP Clinical NER Playground") import json import os from pyspark.ml import Pipeline,PipelineModel from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl import sparknlp import json spark = sparknlp_jsl.start(os.environ['SECRET']) print ("Spark NLP Version :", sparknlp.version()) print ("Spark NLP_JSL Version :", sparknlp_jsl.version()) @st.cache(allow_output_mutation=True, suppress_st_warning=True) def load_sparknlp_models(): print ('loading pretrained models') sentenceDetector = SentenceDetectorDLModel.pretrained("sentence_detector_dl_healthcare","en","clinical/models")\ .setInputCols(["document"])\ .setOutputCol("sentence") embeddings_clinical = WordEmbeddingsModel.pretrained("embeddings_clinical","en","clinical/models")\ .setInputCols(["sentence","token"])\ .setOutputCol("embeddings") biobert_embeddings = BertEmbeddings.pretrained("biobert_pubmed_base_cased").setInputCols(["sentence", "token"]).setOutputCol("embeddings") embeddings model_dict = { 'sentenceDetector': sentenceDetector, 'embeddings_clinical':embeddings_clinical } for ner_model in ner_models_clinical: try: model_dict[ner_model] = MedicalNerModel.pretrained(ner_model,"en","clinical/models")\ .setInputCols(["sentence","token","embeddings"])\ .setOutputCol("ner") except: pass #st.write ('model name is wrong > ', ner_model) print ('models loaded !') return model_dict @st.cache(allow_output_mutation=True, suppress_st_warning=True) def load_sparknlp_models_biobert(): print ('loading pretrained models') sentenceDetector = SentenceDetectorDLModel.pretrained("sentence_detector_dl_healthcare","en","clinical/models")\ .setInputCols(["document"])\ .setOutputCol("sentence") embeddings_biobert = BertEmbeddings.pretrained("biobert_pubmed_base_cased").setInputCols(["sentence", "token"]).setOutputCol("embeddings") model_dict = { 'sentenceDetector': sentenceDetector, 'embeddings_biobert':embeddings_biobert } for ner_model in ner_models_biobert : try: model_dict[ner_model] = MedicalNerModel.pretrained(ner_model,"en","clinical/models")\ .setInputCols(["sentence","token","embeddings"])\ .setOutputCol("ner") except: pass #st.write ('model name is wrong > ', ner_model) print ('models loaded !') return model_dict import subprocess subprocess.run(["wget", "https://nlp.johnsnowlabs.com/models.json"]) with open('/content/models.json') as f: model_master_list = json.load(f) ner_models_biobert = list(set([x['name'] for x in model_master_list if x['task']=="Named Entity Recognition" and x['edition'].startswith('Spark NLP for Healthcare') and 'biobert' in x['name'] and x['edition'].split()[-1]>='3.0'])) ner_models_clinical = list(set([x['name'] for x in model_master_list if x['task']=="Named Entity Recognition" and x['edition'].startswith('Spark NLP for Healthcare') and 'biobert' not in x['name'] and 'healthcare' not in x['name'] and x['edition'].split()[-1]>='3.0'])) model_dict_1 = load_sparknlp_models() model_dict_2 = load_sparknlp_models_biobert() if not st.sidebar.checkbox('with BioBert Embeddings'): emb = 'clinical' model_dict = model_dict_1 else: model_dict = model_dict_2 emb = 'biobert' def display_time(start_tm): end_tm = time.time() diff = end_tm - start_tm st.write('<span style="color:red">{} sec</span>'.format(round(diff,4)), unsafe_allow_html=True) def viz (annotated_text, chunk_col): raw_html = NerVisualizer().display(annotated_text, chunk_col, return_html=True) sti = raw_html.find('<style>') ste = raw_html.find('</style>')+8 st.markdown(raw_html[sti:ste], unsafe_allow_html=True) st.write(HTML_WRAPPER.format(raw_html[ste:]), unsafe_allow_html=True) def get_table_download_link(df): """Generates a link allowing the data in a given panda dataframe to be downloaded in: dataframe out: href string """ csv = df.to_csv(index=False) b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here href = f'<a href="data:file/csv;base64,{b64}">Download table as csv file</a>' st.write('') st.markdown(href, unsafe_allow_html=True) def build_dynamic_pipeline(payload, embeddings_name='embeddings_clinical'): document = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentence = model_dict['sentenceDetector'] token = Tokenizer()\ .setInputCols(['sentence'])\ .setOutputCol('token') embeddings = model_dict[embeddings_name] st.write() ner_pipe = [] for ner, entities in payload.items(): first = len(ner_pipe) == 0 ner_pipe.append(model_dict[ner]\ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("{}_tags".format(ner)) ) ner_pipe.append(NerConverter()\ .setInputCols(["sentence", "token", "{}_tags".format(ner)])\ .setOutputCol("{}_chunks".format(ner))\ .setWhiteList(entities) ) if not first: ner_pipe.append(ChunkMergeApproach().setInputCols(prev, "{}_chunks".format(ner)).\ setOutputCol("{}_chunks".format(ner))) prev = "{}_chunks".format(ner) ner_pipeline = Pipeline( stages = [ document, sentence, token, embeddings]+ner_pipe) return ner_pipeline, prev st.sidebar.header('Select pretrained NER Model(s)') st.sidebar.write('') def get_labels(model): m = set(list([c.split('-')[1] for c in model.getClasses() if len(c)>1])) return list(m) def get_payload(): ner_list = [i for i in model_dict.keys() if 'ner' in i] ner_payload =dict() for ner in ner_list: if ner=='clinical_ner': st.sidebar.checkbox(ner, value=True) if st.sidebar.checkbox(ner): classes = get_labels(model_dict[ner]) concepts = st.sidebar.multiselect("entities in {}".format(ner), options=classes, default=classes) ner_payload[ner] = concepts return ner_payload from sparknlp_display import NerVisualizer def get_entities (ner_pipeline, text): empty_data = spark.createDataFrame([[""]]).toDF("text") ner_model = ner_pipeline.fit(empty_data) light_model = LightPipeline(ner_model) full_annotated_text = light_model.fullAnnotate(text)[0] st.write('') st.subheader('Entities') chunks=[] entities=[] for n in full_annotated_text[chunk_col]: chunks.append(n.result) entities.append(n.metadata['entity']) df = pd.DataFrame({'chunks':chunks, 'entities':entities}) #show_html_spacy(full_annotated_text, chunk_col) viz (full_annotated_text, chunk_col) st.table(df) return df ner_list = [i for i in model_dict.keys() if 'ner' in i.lower()] sorted(ner_list) if st.sidebar.checkbox('Run all NERs'): st.sidebar.markdown("---") ner_payload = dict() concepts = [] for ner in ner_list: classes = get_labels(model_dict[ner]) ner_concepts = st.sidebar.multiselect("entities in {}".format(ner), options=classes, default=classes) ner_payload[ner] = ner_concepts concepts.extend(ner_concepts) else: ner_payload = dict() for ner in ner_list: if st.sidebar.checkbox(ner): classes = get_labels(model_dict[ner]) ner_concepts = st.sidebar.multiselect("entities in {}".format(ner), options=classes, default=classes) ner_payload[ner] = ner_concepts ner_text = st.text_area('NER Input Text', 'A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation , associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting. The patient was prescribed 1 capsule of Advil 10 mg for 5 days and magnesium hydroxide 100mg/1ml suspension PO. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day .') import time start_time = time.time() if len(ner_payload)!=0: st.header("***chunks will be merged if multiple models selected***") if emb=='clinical': ner_pipeline, chunk_col = build_dynamic_pipeline (ner_payload) else: ner_pipeline, chunk_col = build_dynamic_pipeline (ner_payload, embeddings_name='embeddings_biobert') entities_df = get_entities (ner_pipeline, ner_text) get_table_download_link(entities_df ) display_time(start_time)
[]
[]
[ "SECRET" ]
[]
["SECRET"]
python
1
0
tests/test_trapd_get_cbs_config.py
# ============LICENSE_START======================================================= # Copyright (c) 2017-2021 AT&T Intellectual Property. All rights reserved. # Copyright (c) 2019 Pantheon.tech. All rights reserved. # Copyright (c) 2021 Fujitsu Ltd. # ================================================================================ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============LICENSE_END========================================================= import pytest import unittest import os from miss_htbt_service.mod import trapd_get_cbs_config class test_get_cbs_config(unittest.TestCase): """ Test the trapd_get_cbs_config mod """ pytest_json_data = '{ "heartbeat_config": { "vnfs": [{ "eventName": "Heartbeat_vDNS", "heartbeatcountmissed": 3, "heartbeatinterval": 60, "closedLoopControlName": "ControlLoopEvent1", "policyVersion": "1.0.0.5", "policyName": "vFireWall", "policyScope": "resource=sampleResource,type=sampletype,CLName=sampleCLName", "target_type": "VNF", "target": "genVnfName", "version": "1.0" }, { "eventName": "Heartbeat_vFW", "heartbeatcountmissed": 3, "heartbeatinterval": 60, "closedLoopControlName": "ControlLoopEvent1", "policyVersion": "1.0.0.5", "policyName": "vFireWall", "policyScope": "resource=sampleResource,type=sampletype,CLName=sampleCLName", "target_type": "VNF", "target": "genVnfName", "version": "1.0" }, { "eventName": "Heartbeat_xx", "heartbeatcountmissed": 3, "heartbeatinterval": 60, "closedLoopControlName": "ControlLoopEvent1", "policyVersion": "1.0.0.5", "policyName": "vFireWall", "policyScope": "resource=sampleResource,type=sampletype,CLName=sampleCLName", "target_type": "VNF", "target": "genVnfName", "version": "1.0" } ] }, "streams_publishes": { "ves_heartbeat": { "dmaap_info": { "topic_url": "http://message-router:3904/events/unauthenticated.DCAE_CL_OUTPUT/" }, "type": "message_router" } }, "streams_subscribes": { "ves_heartbeat": { "dmaap_info": { "topic_url": "http://message-router:3904/events/unauthenticated.SEC_HEARTBEAT_INPUT/" }, "type": "message_router" } } }' # create copy of snmptrapd.json for pytest pytest_json_config = "/tmp/opt/app/miss_htbt_service/etc/config.json" with open(pytest_json_config, "w") as outfile: outfile.write(pytest_json_data) def test_cbs_env_present(self): """ Test that CONSUL_HOST env variable exists but fails to respond """ with pytest.raises(Exception) as pytest_wrapped_sys_exit: result = trapd_get_cbs_config.get_cbs_config() assert pytest_wrapped_sys_exit.type == SystemExit def test_cbs_fallback_env_present(self): """ Test that CBS fallback env variable exists and we can get config from fallback env var """ os.environ.update(CBS_HTBT_JSON="/tmp/opt/app/miss_htbt_service/etc/config.json") result = True print("result: %s" % result) self.assertEqual(result, True)
[]
[]
[]
[]
[]
python
0
0
src/borg/testsuite/archiver.py
import argparse import dateutil.tz import errno import io import json import logging import os import pstats import random import re import shutil import socket import stat import subprocess import sys import tempfile import time import unittest from binascii import unhexlify, b2a_base64 from configparser import ConfigParser from datetime import datetime from datetime import timezone from datetime import timedelta from hashlib import sha256 from io import BytesIO, StringIO from unittest.mock import patch import pytest import borg from .. import xattr, helpers, platform from ..archive import Archive, ChunkBuffer from ..archiver import Archiver, parse_storage_quota, PURE_PYTHON_MSGPACK_WARNING from ..cache import Cache, LocalCache from ..chunker import has_seek_hole from ..constants import * # NOQA from ..crypto.low_level import bytes_to_long, num_cipher_blocks from ..crypto.key import KeyfileKeyBase, RepoKey, KeyfileKey, Passphrase, TAMRequiredError from ..crypto.keymanager import RepoIdMismatch, NotABorgKeyFile from ..crypto.file_integrity import FileIntegrityError from ..helpers import Location, get_security_dir from ..helpers import Manifest, MandatoryFeatureUnsupported from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR from ..helpers import bin_to_hex from ..helpers import MAX_S from ..helpers import msgpack from ..helpers import flags_noatime, flags_normal from ..nanorst import RstToTextLazy, rst_to_terminal from ..patterns import IECommand, PatternMatcher, parse_pattern from ..item import Item, ItemDiff from ..locking import LockFailed from ..logger import setup_logging from ..remote import RemoteRepository, PathNotAllowed from ..repository import Repository from . import has_lchflags, llfuse from . import BaseTestCase, changedir, environment_variable, no_selinux from . import are_symlinks_supported, are_hardlinks_supported, are_fifos_supported, is_utime_fully_supported, is_birthtime_fully_supported from .platform import fakeroot_detected from .upgrader import make_attic_repo from . import key src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) def exec_cmd(*args, archiver=None, fork=False, exe=None, input=b'', binary_output=False, **kw): if fork: try: if exe is None: borg = (sys.executable, '-m', 'borg.archiver') elif isinstance(exe, str): borg = (exe, ) elif not isinstance(exe, tuple): raise ValueError('exe must be None, a tuple or a str') output = subprocess.check_output(borg + args, stderr=subprocess.STDOUT, input=input) ret = 0 except subprocess.CalledProcessError as e: output = e.output ret = e.returncode except SystemExit as e: # possibly raised by argparse output = '' ret = e.code if binary_output: return ret, output else: return ret, os.fsdecode(output) else: stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr try: sys.stdin = StringIO(input.decode()) sys.stdin.buffer = BytesIO(input) output = BytesIO() # Always use utf-8 here, to simply .decode() below output_text = sys.stdout = sys.stderr = io.TextIOWrapper(output, encoding='utf-8') if archiver is None: archiver = Archiver() archiver.prerun_checks = lambda *args: None archiver.exit_code = EXIT_SUCCESS helpers.exit_code = EXIT_SUCCESS try: args = archiver.parse_args(list(args)) # argparse parsing may raise SystemExit when the command line is bad or # actions that abort early (eg. --help) where given. Catch this and return # the error code as-if we invoked a Borg binary. except SystemExit as e: output_text.flush() return e.code, output.getvalue() if binary_output else output.getvalue().decode() ret = archiver.run(args) output_text.flush() return ret, output.getvalue() if binary_output else output.getvalue().decode() finally: sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr def have_gnutar(): if not shutil.which('tar'): return False popen = subprocess.Popen(['tar', '--version'], stdout=subprocess.PIPE) stdout, stderr = popen.communicate() return b'GNU tar' in stdout # check if the binary "borg.exe" is available (for local testing a symlink to virtualenv/bin/borg should do) try: exec_cmd('help', exe='borg.exe', fork=True) BORG_EXES = ['python', 'binary', ] except FileNotFoundError: BORG_EXES = ['python', ] @pytest.fixture(params=BORG_EXES) def cmd(request): if request.param == 'python': exe = None elif request.param == 'binary': exe = 'borg.exe' else: raise ValueError("param must be 'python' or 'binary'") def exec_fn(*args, **kw): return exec_cmd(*args, exe=exe, fork=True, **kw) return exec_fn def test_return_codes(cmd, tmpdir): repo = tmpdir.mkdir('repo') input = tmpdir.mkdir('input') output = tmpdir.mkdir('output') input.join('test_file').write('content') rc, out = cmd('init', '--encryption=none', '%s' % str(repo)) assert rc == EXIT_SUCCESS rc, out = cmd('create', '%s::archive' % repo, str(input)) assert rc == EXIT_SUCCESS with changedir(str(output)): rc, out = cmd('extract', '%s::archive' % repo) assert rc == EXIT_SUCCESS rc, out = cmd('extract', '%s::archive' % repo, 'does/not/match') assert rc == EXIT_WARNING # pattern did not match rc, out = cmd('create', '%s::archive' % repo, str(input)) assert rc == EXIT_ERROR # duplicate archive name """ test_disk_full is very slow and not recommended to be included in daily testing. for this test, an empty, writable 16MB filesystem mounted on DF_MOUNT is required. for speed and other reasons, it is recommended that the underlying block device is in RAM, not a magnetic or flash disk. assuming /tmp is a tmpfs (in memory filesystem), one can use this: dd if=/dev/zero of=/tmp/borg-disk bs=16M count=1 mkfs.ext4 /tmp/borg-disk mkdir /tmp/borg-mount sudo mount /tmp/borg-disk /tmp/borg-mount if the directory does not exist, the test will be skipped. """ DF_MOUNT = '/tmp/borg-mount' @pytest.mark.skipif(not os.path.exists(DF_MOUNT), reason="needs a 16MB fs mounted on %s" % DF_MOUNT) def test_disk_full(cmd): def make_files(dir, count, size, rnd=True): shutil.rmtree(dir, ignore_errors=True) os.mkdir(dir) if rnd: count = random.randint(1, count) if size > 1: size = random.randint(1, size) for i in range(count): fn = os.path.join(dir, "file%03d" % i) with open(fn, 'wb') as f: data = os.urandom(size) f.write(data) with environment_variable(BORG_CHECK_I_KNOW_WHAT_I_AM_DOING='YES'): mount = DF_MOUNT assert os.path.exists(mount) repo = os.path.join(mount, 'repo') input = os.path.join(mount, 'input') reserve = os.path.join(mount, 'reserve') for j in range(100): shutil.rmtree(repo, ignore_errors=True) shutil.rmtree(input, ignore_errors=True) # keep some space and some inodes in reserve that we can free up later: make_files(reserve, 80, 100000, rnd=False) rc, out = cmd('init', repo) if rc != EXIT_SUCCESS: print('init', rc, out) assert rc == EXIT_SUCCESS try: success, i = True, 0 while success: i += 1 try: make_files(input, 20, 200000) except OSError as err: if err.errno == errno.ENOSPC: # already out of space break raise try: rc, out = cmd('create', '%s::test%03d' % (repo, i), input) success = rc == EXIT_SUCCESS if not success: print('create', rc, out) finally: # make sure repo is not locked shutil.rmtree(os.path.join(repo, 'lock.exclusive'), ignore_errors=True) os.remove(os.path.join(repo, 'lock.roster')) finally: # now some error happened, likely we are out of disk space. # free some space so we can expect borg to be able to work normally: shutil.rmtree(reserve, ignore_errors=True) rc, out = cmd('list', repo) if rc != EXIT_SUCCESS: print('list', rc, out) rc, out = cmd('check', '--repair', repo) if rc != EXIT_SUCCESS: print('check', rc, out) assert rc == EXIT_SUCCESS class ArchiverTestCaseBase(BaseTestCase): EXE = None # python source based FORK_DEFAULT = False prefix = '' def setUp(self): os.environ['BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'] = 'YES' os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES' os.environ['BORG_PASSPHRASE'] = 'waytooeasyonlyfortests' self.archiver = not self.FORK_DEFAULT and Archiver() or None self.tmpdir = tempfile.mkdtemp() self.repository_path = os.path.join(self.tmpdir, 'repository') self.repository_location = self.prefix + self.repository_path self.input_path = os.path.join(self.tmpdir, 'input') self.output_path = os.path.join(self.tmpdir, 'output') self.keys_path = os.path.join(self.tmpdir, 'keys') self.cache_path = os.path.join(self.tmpdir, 'cache') self.exclude_file_path = os.path.join(self.tmpdir, 'excludes') self.patterns_file_path = os.path.join(self.tmpdir, 'patterns') os.environ['BORG_KEYS_DIR'] = self.keys_path os.environ['BORG_CACHE_DIR'] = self.cache_path os.mkdir(self.input_path) os.chmod(self.input_path, 0o777) # avoid troubles with fakeroot / FUSE os.mkdir(self.output_path) os.mkdir(self.keys_path) os.mkdir(self.cache_path) with open(self.exclude_file_path, 'wb') as fd: fd.write(b'input/file2\n# A comment line, then a blank line\n\n') with open(self.patterns_file_path, 'wb') as fd: fd.write(b'+input/file_important\n- input/file*\n# A comment line, then a blank line\n\n') self._old_wd = os.getcwd() os.chdir(self.tmpdir) def tearDown(self): os.chdir(self._old_wd) # note: ignore_errors=True as workaround for issue #862 shutil.rmtree(self.tmpdir, ignore_errors=True) setup_logging() def cmd(self, *args, **kw): exit_code = kw.pop('exit_code', 0) fork = kw.pop('fork', None) binary_output = kw.get('binary_output', False) if fork is None: fork = self.FORK_DEFAULT ret, output = exec_cmd(*args, fork=fork, exe=self.EXE, archiver=self.archiver, **kw) if ret != exit_code: print(output) self.assert_equal(ret, exit_code) # if tests are run with the pure-python msgpack, there will be warnings about # this in the output, which would make a lot of tests fail. pp_msg = PURE_PYTHON_MSGPACK_WARNING.encode() if binary_output else PURE_PYTHON_MSGPACK_WARNING empty = b'' if binary_output else '' output = empty.join(line for line in output.splitlines(keepends=True) if pp_msg not in line) return output def create_src_archive(self, name): self.cmd('create', '--compression=lz4', self.repository_location + '::' + name, src_dir) def open_archive(self, name): repository = Repository(self.repository_path, exclusive=True) with repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) archive = Archive(repository, key, manifest, name) return archive, repository def open_repository(self): return Repository(self.repository_path, exclusive=True) def create_regular_file(self, name, size=0, contents=None): assert not (size != 0 and contents and len(contents) != size), 'size and contents do not match' filename = os.path.join(self.input_path, name) if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) with open(filename, 'wb') as fd: if contents is None: contents = b'X' * size fd.write(contents) def create_test_files(self): """Create a minimal test case including all supported file types """ # File self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('flagfile', size=1024) # Directory self.create_regular_file('dir2/file2', size=1024 * 80) # File mode os.chmod('input/file1', 0o4755) # Hard link if are_hardlinks_supported(): os.link(os.path.join(self.input_path, 'file1'), os.path.join(self.input_path, 'hardlink')) # Symlink if are_symlinks_supported(): os.symlink('somewhere', os.path.join(self.input_path, 'link1')) self.create_regular_file('fusexattr', size=1) if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path): fn = os.fsencode(os.path.join(self.input_path, 'fusexattr')) # ironically, due to the way how fakeroot works, comparing FUSE file xattrs to orig file xattrs # will FAIL if fakeroot supports xattrs, thus we only set the xattr if XATTR_FAKEROOT is False. # This is because fakeroot with xattr-support does not propagate xattrs of the underlying file # into "fakeroot space". Because the xattrs exposed by borgfs are these of an underlying file # (from fakeroots point of view) they are invisible to the test process inside the fakeroot. xattr.setxattr(fn, b'user.foo', b'bar') xattr.setxattr(fn, b'user.empty', b'') # XXX this always fails for me # ubuntu 14.04, on a TMP dir filesystem with user_xattr, using fakeroot # same for newer ubuntu and centos. # if this is supported just on specific platform, platform should be checked first, # so that the test setup for all tests using it does not fail here always for others. # xattr.setxattr(os.path.join(self.input_path, 'link1'), b'user.foo_symlink', b'bar_symlink', follow_symlinks=False) # FIFO node if are_fifos_supported(): os.mkfifo(os.path.join(self.input_path, 'fifo1')) if has_lchflags: platform.set_flags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP) try: # Block device os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20)) # Char device os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40)) # File mode os.chmod('input/dir2', 0o555) # if we take away write perms, we need root to remove contents # File owner os.chown('input/file1', 100, 200) # raises OSError invalid argument on cygwin have_root = True # we have (fake)root except PermissionError: have_root = False except OSError as e: # Note: ENOSYS "Function not implemented" happens as non-root on Win 10 Linux Subsystem. if e.errno not in (errno.EINVAL, errno.ENOSYS): raise have_root = False time.sleep(1) # "empty" must have newer timestamp than other files self.create_regular_file('empty', size=0) return have_root class ArchiverTestCase(ArchiverTestCaseBase): requires_hardlinks = pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported') def test_basic_functionality(self): have_root = self.create_test_files() # fork required to test show-rc output output = self.cmd('init', '--encryption=repokey', '--show-version', '--show-rc', self.repository_location, fork=True) self.assert_in('borgbackup version', output) self.assert_in('terminating with success status, rc 0', output) self.cmd('create', '--exclude-nodump', self.repository_location + '::test', 'input') output = self.cmd('create', '--exclude-nodump', '--stats', self.repository_location + '::test.2', 'input') self.assert_in('Archive name: test.2', output) self.assert_in('This archive: ', output) with changedir('output'): self.cmd('extract', self.repository_location + '::test') list_output = self.cmd('list', '--short', self.repository_location) self.assert_in('test', list_output) self.assert_in('test.2', list_output) expected = [ 'input', 'input/bdev', 'input/cdev', 'input/dir2', 'input/dir2/file2', 'input/empty', 'input/file1', 'input/flagfile', ] if are_fifos_supported(): expected.append('input/fifo1') if are_symlinks_supported(): expected.append('input/link1') if are_hardlinks_supported(): expected.append('input/hardlink') if not have_root: # we could not create these device files without (fake)root expected.remove('input/bdev') expected.remove('input/cdev') if has_lchflags: # remove the file we did not backup, so input and output become equal expected.remove('input/flagfile') # this file is UF_NODUMP os.remove(os.path.join('input', 'flagfile')) list_output = self.cmd('list', '--short', self.repository_location + '::test') for name in expected: self.assert_in(name, list_output) self.assert_dirs_equal('input', 'output/input') info_output = self.cmd('info', self.repository_location + '::test') item_count = 4 if has_lchflags else 5 # one file is UF_NODUMP self.assert_in('Number of files: %d' % item_count, info_output) shutil.rmtree(self.cache_path) info_output2 = self.cmd('info', self.repository_location + '::test') def filter(output): # filter for interesting "info" output, ignore cache rebuilding related stuff prefixes = ['Name:', 'Fingerprint:', 'Number of files:', 'This archive:', 'All archives:', 'Chunk index:', ] result = [] for line in output.splitlines(): for prefix in prefixes: if line.startswith(prefix): result.append(line) return '\n'.join(result) # the interesting parts of info_output2 and info_output should be same self.assert_equal(filter(info_output), filter(info_output2)) @requires_hardlinks def test_create_duplicate_root(self): # setup for #5603 path_a = os.path.join(self.input_path, 'a') path_b = os.path.join(self.input_path, 'b') os.mkdir(path_a) os.mkdir(path_b) hl_a = os.path.join(path_a, 'hardlink') hl_b = os.path.join(path_b, 'hardlink') self.create_regular_file(hl_a, contents=b'123456') os.link(hl_a, hl_b) self.cmd('init', '--encryption=none', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input', 'input') # give input twice! # test if created archive has 'input' contents twice: archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test') paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line] # we have all fs items exactly once! assert sorted(paths) == ['input', 'input/a', 'input/a/hardlink', 'input/b', 'input/b/hardlink'] def test_init_parent_dirs(self): parent_path = os.path.join(self.tmpdir, 'parent1', 'parent2') repository_path = os.path.join(parent_path, 'repository') repository_location = self.prefix + repository_path with pytest.raises(Repository.ParentPathDoesNotExist): # normal borg init does NOT create missing parent dirs self.cmd('init', '--encryption=none', repository_location) # but if told so, it does: self.cmd('init', '--encryption=none', '--make-parent-dirs', repository_location) assert os.path.exists(parent_path) def test_unix_socket(self): self.cmd('init', '--encryption=repokey', self.repository_location) try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.bind(os.path.join(self.input_path, 'unix-socket')) except PermissionError as err: if err.errno == errno.EPERM: pytest.skip('unix sockets disabled or not supported') elif err.errno == errno.EACCES: pytest.skip('permission denied to create unix sockets') self.cmd('create', self.repository_location + '::test', 'input') sock.close() with changedir('output'): self.cmd('extract', self.repository_location + '::test') assert not os.path.exists('input/unix-socket') @pytest.mark.skipif(not are_symlinks_supported(), reason='symlinks not supported') def test_symlink_extract(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') assert os.readlink('input/link1') == 'somewhere' @pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime') def test_atime(self): def has_noatime(some_file): atime_before = os.stat(some_file).st_atime_ns try: with open(os.open(some_file, flags_noatime)) as file: file.read() except PermissionError: return False else: atime_after = os.stat(some_file).st_atime_ns noatime_used = flags_noatime != flags_normal return noatime_used and atime_before == atime_after self.create_test_files() atime, mtime = 123456780, 234567890 have_noatime = has_noatime('input/file1') os.utime('input/file1', (atime, mtime)) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', '--atime', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') sti = os.stat('input/file1') sto = os.stat('output/input/file1') assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9 if have_noatime: assert sti.st_atime_ns == sto.st_atime_ns == atime * 1e9 else: # it touched the input file's atime while backing it up assert sto.st_atime_ns == atime * 1e9 @pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime') @pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime') def test_birthtime(self): self.create_test_files() birthtime, mtime, atime = 946598400, 946684800, 946771200 os.utime('input/file1', (atime, birthtime)) os.utime('input/file1', (atime, mtime)) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') sti = os.stat('input/file1') sto = os.stat('output/input/file1') assert int(sti.st_birthtime * 1e9) == int(sto.st_birthtime * 1e9) == birthtime * 1e9 assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9 @pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime') @pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime') def test_nobirthtime(self): self.create_test_files() birthtime, mtime, atime = 946598400, 946684800, 946771200 os.utime('input/file1', (atime, birthtime)) os.utime('input/file1', (atime, mtime)) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', '--nobirthtime', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') sti = os.stat('input/file1') sto = os.stat('output/input/file1') assert int(sti.st_birthtime * 1e9) == birthtime * 1e9 assert int(sto.st_birthtime * 1e9) == mtime * 1e9 assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9 def _extract_repository_id(self, path): with Repository(self.repository_path) as repository: return repository.id def _set_repository_id(self, path, id): config = ConfigParser(interpolation=None) config.read(os.path.join(path, 'config')) config.set('repository', 'id', bin_to_hex(id)) with open(os.path.join(path, 'config'), 'w') as fd: config.write(fd) with Repository(self.repository_path) as repository: return repository.id def test_sparse_file(self): def is_sparse(fn, total_size, hole_size): st = os.stat(fn) assert st.st_size == total_size sparse = True if sparse and hasattr(st, 'st_blocks') and st.st_blocks * 512 >= st.st_size: sparse = False if sparse and has_seek_hole: with open(fn, 'rb') as fd: # only check if the first hole is as expected, because the 2nd hole check # is problematic on xfs due to its "dynamic speculative EOF preallocation try: if fd.seek(0, os.SEEK_HOLE) != 0: sparse = False if fd.seek(0, os.SEEK_DATA) != hole_size: sparse = False except OSError: # OS/FS does not really support SEEK_HOLE/SEEK_DATA sparse = False return sparse filename = os.path.join(self.input_path, 'sparse') content = b'foobar' hole_size = 5 * (1 << CHUNK_MAX_EXP) # 5 full chunker buffers total_size = hole_size + len(content) + hole_size with open(filename, 'wb') as fd: # create a file that has a hole at the beginning and end (if the # OS and filesystem supports sparse files) fd.seek(hole_size, 1) fd.write(content) fd.seek(hole_size, 1) pos = fd.tell() fd.truncate(pos) # we first check if we could create a sparse input file: sparse_support = is_sparse(filename, total_size, hole_size) if sparse_support: # we could create a sparse input file, so creating a backup of it and # extracting it again (as sparse) should also work: self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir(self.output_path): self.cmd('extract', '--sparse', self.repository_location + '::test') self.assert_dirs_equal('input', 'output/input') filename = os.path.join(self.output_path, 'input', 'sparse') with open(filename, 'rb') as fd: # check if file contents are as expected self.assert_equal(fd.read(hole_size), b'\0' * hole_size) self.assert_equal(fd.read(len(content)), content) self.assert_equal(fd.read(hole_size), b'\0' * hole_size) self.assert_true(is_sparse(filename, total_size, hole_size)) def test_unusual_filenames(self): filenames = ['normal', 'with some blanks', '(with_parens)', ] for filename in filenames: filename = os.path.join(self.input_path, filename) with open(filename, 'wb'): pass self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') for filename in filenames: with changedir('output'): self.cmd('extract', self.repository_location + '::test', os.path.join('input', filename)) assert os.path.exists(os.path.join('output', 'input', filename)) def test_repository_swap_detection(self): self.create_test_files() os.environ['BORG_PASSPHRASE'] = 'passphrase' self.cmd('init', '--encryption=repokey', self.repository_location) repository_id = self._extract_repository_id(self.repository_path) self.cmd('create', self.repository_location + '::test', 'input') shutil.rmtree(self.repository_path) self.cmd('init', '--encryption=none', self.repository_location) self._set_repository_id(self.repository_path, repository_id) self.assert_equal(repository_id, self._extract_repository_id(self.repository_path)) if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR) else: with pytest.raises(Cache.EncryptionMethodMismatch): self.cmd('create', self.repository_location + '::test.2', 'input') def test_repository_swap_detection2(self): self.create_test_files() self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted') os.environ['BORG_PASSPHRASE'] = 'passphrase' self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted') self.cmd('create', self.repository_location + '_encrypted::test', 'input') shutil.rmtree(self.repository_path + '_encrypted') os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted') if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR) else: with pytest.raises(Cache.RepositoryAccessAborted): self.cmd('create', self.repository_location + '_encrypted::test.2', 'input') def test_repository_swap_detection_no_cache(self): self.create_test_files() os.environ['BORG_PASSPHRASE'] = 'passphrase' self.cmd('init', '--encryption=repokey', self.repository_location) repository_id = self._extract_repository_id(self.repository_path) self.cmd('create', self.repository_location + '::test', 'input') shutil.rmtree(self.repository_path) self.cmd('init', '--encryption=none', self.repository_location) self._set_repository_id(self.repository_path, repository_id) self.assert_equal(repository_id, self._extract_repository_id(self.repository_path)) self.cmd('delete', '--cache-only', self.repository_location) if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR) else: with pytest.raises(Cache.EncryptionMethodMismatch): self.cmd('create', self.repository_location + '::test.2', 'input') def test_repository_swap_detection2_no_cache(self): self.create_test_files() self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted') os.environ['BORG_PASSPHRASE'] = 'passphrase' self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted') self.cmd('create', self.repository_location + '_encrypted::test', 'input') self.cmd('delete', '--cache-only', self.repository_location + '_unencrypted') self.cmd('delete', '--cache-only', self.repository_location + '_encrypted') shutil.rmtree(self.repository_path + '_encrypted') os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted') if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR) else: with pytest.raises(Cache.RepositoryAccessAborted): self.cmd('create', self.repository_location + '_encrypted::test.2', 'input') def test_repository_swap_detection_repokey_blank_passphrase(self): # Check that a repokey repo with a blank passphrase is considered like a plaintext repo. self.create_test_files() # User initializes her repository with her passphrase self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') # Attacker replaces it with her own repository, which is encrypted but has no passphrase set shutil.rmtree(self.repository_path) with environment_variable(BORG_PASSPHRASE=''): self.cmd('init', '--encryption=repokey', self.repository_location) # Delete cache & security database, AKA switch to user perspective self.cmd('delete', '--cache-only', self.repository_location) repository_id = bin_to_hex(self._extract_repository_id(self.repository_path)) shutil.rmtree(get_security_dir(repository_id)) with environment_variable(BORG_PASSPHRASE=None): # This is the part were the user would be tricked, e.g. she assumes that BORG_PASSPHRASE # is set, while it isn't. Previously this raised no warning, # since the repository is, technically, encrypted. if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR) else: with pytest.raises(Cache.CacheInitAbortedError): self.cmd('create', self.repository_location + '::test.2', 'input') def test_repository_move(self): self.cmd('init', '--encryption=repokey', self.repository_location) repository_id = bin_to_hex(self._extract_repository_id(self.repository_path)) os.rename(self.repository_path, self.repository_path + '_new') with environment_variable(BORG_RELOCATED_REPO_ACCESS_IS_OK='yes'): self.cmd('info', self.repository_location + '_new') security_dir = get_security_dir(repository_id) with open(os.path.join(security_dir, 'location')) as fd: location = fd.read() assert location == Location(self.repository_location + '_new').canonical_path() # Needs no confirmation anymore self.cmd('info', self.repository_location + '_new') shutil.rmtree(self.cache_path) self.cmd('info', self.repository_location + '_new') shutil.rmtree(security_dir) self.cmd('info', self.repository_location + '_new') for file in ('location', 'key-type', 'manifest-timestamp'): assert os.path.exists(os.path.join(security_dir, file)) def test_security_dir_compat(self): self.cmd('init', '--encryption=repokey', self.repository_location) repository_id = bin_to_hex(self._extract_repository_id(self.repository_path)) security_dir = get_security_dir(repository_id) with open(os.path.join(security_dir, 'location'), 'w') as fd: fd.write('something outdated') # This is fine, because the cache still has the correct information. security_dir and cache can disagree # if older versions are used to confirm a renamed repository. self.cmd('info', self.repository_location) def test_unknown_unencrypted(self): self.cmd('init', '--encryption=none', self.repository_location) repository_id = bin_to_hex(self._extract_repository_id(self.repository_path)) security_dir = get_security_dir(repository_id) # Ok: repository is known self.cmd('info', self.repository_location) # Ok: repository is still known (through security_dir) shutil.rmtree(self.cache_path) self.cmd('info', self.repository_location) # Needs confirmation: cache and security dir both gone (eg. another host or rm -rf ~) shutil.rmtree(self.cache_path) shutil.rmtree(security_dir) if self.FORK_DEFAULT: self.cmd('info', self.repository_location, exit_code=EXIT_ERROR) else: with pytest.raises(Cache.CacheInitAbortedError): self.cmd('info', self.repository_location) with environment_variable(BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='yes'): self.cmd('info', self.repository_location) def test_strip_components(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('dir/file') self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test', '--strip-components', '3') self.assert_true(not os.path.exists('file')) with self.assert_creates_file('file'): self.cmd('extract', self.repository_location + '::test', '--strip-components', '2') with self.assert_creates_file('dir/file'): self.cmd('extract', self.repository_location + '::test', '--strip-components', '1') with self.assert_creates_file('input/dir/file'): self.cmd('extract', self.repository_location + '::test', '--strip-components', '0') def _extract_hardlinks_setup(self): os.mkdir(os.path.join(self.input_path, 'dir1')) os.mkdir(os.path.join(self.input_path, 'dir1/subdir')) self.create_regular_file('source', contents=b'123456') os.link(os.path.join(self.input_path, 'source'), os.path.join(self.input_path, 'abba')) os.link(os.path.join(self.input_path, 'source'), os.path.join(self.input_path, 'dir1/hardlink')) os.link(os.path.join(self.input_path, 'source'), os.path.join(self.input_path, 'dir1/subdir/hardlink')) self.create_regular_file('dir1/source2') os.link(os.path.join(self.input_path, 'dir1/source2'), os.path.join(self.input_path, 'dir1/aaaa')) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') @requires_hardlinks @unittest.skipUnless(llfuse, 'llfuse not installed') def test_fuse_mount_hardlinks(self): self._extract_hardlinks_setup() mountpoint = os.path.join(self.tmpdir, 'mountpoint') # we need to get rid of permissions checking because fakeroot causes issues with it. # On all platforms, borg defaults to "default_permissions" and we need to get rid of it via "ignore_permissions". # On macOS (darwin), we additionally need "defer_permissions" to switch off the checks in osxfuse. if sys.platform == 'darwin': ignore_perms = ['-o', 'ignore_permissions,defer_permissions'] else: ignore_perms = ['-o', 'ignore_permissions'] with self.fuse_mount(self.repository_location + '::test', mountpoint, '--strip-components=2', *ignore_perms), \ changedir(mountpoint): assert os.stat('hardlink').st_nlink == 2 assert os.stat('subdir/hardlink').st_nlink == 2 assert open('subdir/hardlink', 'rb').read() == b'123456' assert os.stat('aaaa').st_nlink == 2 assert os.stat('source2').st_nlink == 2 with self.fuse_mount(self.repository_location + '::test', mountpoint, 'input/dir1', *ignore_perms), \ changedir(mountpoint): assert os.stat('input/dir1/hardlink').st_nlink == 2 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2 assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456' assert os.stat('input/dir1/aaaa').st_nlink == 2 assert os.stat('input/dir1/source2').st_nlink == 2 with self.fuse_mount(self.repository_location + '::test', mountpoint, *ignore_perms), \ changedir(mountpoint): assert os.stat('input/source').st_nlink == 4 assert os.stat('input/abba').st_nlink == 4 assert os.stat('input/dir1/hardlink').st_nlink == 4 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4 assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456' @requires_hardlinks def test_extract_hardlinks1(self): self._extract_hardlinks_setup() with changedir('output'): self.cmd('extract', self.repository_location + '::test') assert os.stat('input/source').st_nlink == 4 assert os.stat('input/abba').st_nlink == 4 assert os.stat('input/dir1/hardlink').st_nlink == 4 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4 assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456' @requires_hardlinks def test_extract_hardlinks2(self): self._extract_hardlinks_setup() with changedir('output'): self.cmd('extract', self.repository_location + '::test', '--strip-components', '2') assert os.stat('hardlink').st_nlink == 2 assert os.stat('subdir/hardlink').st_nlink == 2 assert open('subdir/hardlink', 'rb').read() == b'123456' assert os.stat('aaaa').st_nlink == 2 assert os.stat('source2').st_nlink == 2 with changedir('output'): self.cmd('extract', self.repository_location + '::test', 'input/dir1') assert os.stat('input/dir1/hardlink').st_nlink == 2 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2 assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456' assert os.stat('input/dir1/aaaa').st_nlink == 2 assert os.stat('input/dir1/source2').st_nlink == 2 @requires_hardlinks def test_extract_hardlinks_twice(self): # setup for #5603 path_a = os.path.join(self.input_path, 'a') path_b = os.path.join(self.input_path, 'b') os.mkdir(path_a) os.mkdir(path_b) hl_a = os.path.join(path_a, 'hardlink') hl_b = os.path.join(path_b, 'hardlink') self.create_regular_file(hl_a, contents=b'123456') os.link(hl_a, hl_b) self.cmd('init', '--encryption=none', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input', 'input') # give input twice! # now test extraction with changedir('output'): self.cmd('extract', self.repository_location + '::test') # if issue #5603 happens, extraction gives rc == 1 (triggering AssertionError) and warnings like: # input/a/hardlink: link: [Errno 2] No such file or directory: 'input/a/hardlink' -> 'input/a/hardlink' # input/b/hardlink: link: [Errno 2] No such file or directory: 'input/a/hardlink' -> 'input/b/hardlink' # otherwise, when fixed, the hardlinks should be there and have a link count of 2 assert os.stat('input/a/hardlink').st_nlink == 2 assert os.stat('input/b/hardlink').st_nlink == 2 def test_extract_include_exclude(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file3', size=1024 * 80) self.create_regular_file('file4', size=1024 * 80) self.cmd('create', '--exclude=input/file4', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test', 'input/file1', ) self.assert_equal(sorted(os.listdir('output/input')), ['file1']) with changedir('output'): self.cmd('extract', '--exclude=input/file2', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3']) with changedir('output'): self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3']) def test_extract_include_exclude_regex(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file3', size=1024 * 80) self.create_regular_file('file4', size=1024 * 80) self.create_regular_file('file333', size=1024 * 80) # Create with regular expression exclusion for file4 self.cmd('create', '--exclude=re:input/file4$', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333']) shutil.rmtree('output/input') # Extract with regular expression exclusion with changedir('output'): self.cmd('extract', '--exclude=re:file3+', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2']) shutil.rmtree('output/input') # Combine --exclude with fnmatch and regular expression with changedir('output'): self.cmd('extract', '--exclude=input/file2', '--exclude=re:file[01]', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file3', 'file333']) shutil.rmtree('output/input') # Combine --exclude-from and regular expression exclusion with changedir('output'): self.cmd('extract', '--exclude-from=' + self.exclude_file_path, '--exclude=re:file1', '--exclude=re:file(\\d)\\1\\1$', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file3']) def test_extract_include_exclude_regex_from_file(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file3', size=1024 * 80) self.create_regular_file('file4', size=1024 * 80) self.create_regular_file('file333', size=1024 * 80) self.create_regular_file('aa:something', size=1024 * 80) # Create while excluding using mixed pattern styles with open(self.exclude_file_path, 'wb') as fd: fd.write(b're:input/file4$\n') fd.write(b'fm:*aa:*thing\n') self.cmd('create', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333']) shutil.rmtree('output/input') # Exclude using regular expression with open(self.exclude_file_path, 'wb') as fd: fd.write(b're:file3+\n') with changedir('output'): self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2']) shutil.rmtree('output/input') # Mixed exclude pattern styles with open(self.exclude_file_path, 'wb') as fd: fd.write(b're:file(\\d)\\1\\1$\n') fd.write(b'fm:nothingwillmatchthis\n') fd.write(b'*/file1\n') fd.write(b're:file2$\n') with changedir('output'): self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file3']) def test_extract_with_pattern(self): self.cmd("init", '--encryption=repokey', self.repository_location) self.create_regular_file("file1", size=1024 * 80) self.create_regular_file("file2", size=1024 * 80) self.create_regular_file("file3", size=1024 * 80) self.create_regular_file("file4", size=1024 * 80) self.create_regular_file("file333", size=1024 * 80) self.cmd("create", self.repository_location + "::test", "input") # Extract everything with regular expression with changedir("output"): self.cmd("extract", self.repository_location + "::test", "re:.*") self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file3", "file333", "file4"]) shutil.rmtree("output/input") # Extract with pattern while also excluding files with changedir("output"): self.cmd("extract", "--exclude=re:file[34]$", self.repository_location + "::test", r"re:file\d$") self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2"]) shutil.rmtree("output/input") # Combine --exclude with pattern for extraction with changedir("output"): self.cmd("extract", "--exclude=input/file1", self.repository_location + "::test", "re:file[12]$") self.assert_equal(sorted(os.listdir("output/input")), ["file2"]) shutil.rmtree("output/input") # Multiple pattern with changedir("output"): self.cmd("extract", self.repository_location + "::test", "fm:input/file1", "fm:*file33*", "input/file2") self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file333"]) def test_extract_list_output(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file', size=1024 * 80) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): output = self.cmd('extract', self.repository_location + '::test') self.assert_not_in("input/file", output) shutil.rmtree('output/input') with changedir('output'): output = self.cmd('extract', '--info', self.repository_location + '::test') self.assert_not_in("input/file", output) shutil.rmtree('output/input') with changedir('output'): output = self.cmd('extract', '--list', self.repository_location + '::test') self.assert_in("input/file", output) shutil.rmtree('output/input') with changedir('output'): output = self.cmd('extract', '--list', '--info', self.repository_location + '::test') self.assert_in("input/file", output) def test_extract_progress(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file', size=1024 * 80) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): output = self.cmd('extract', self.repository_location + '::test', '--progress') assert 'Extracting:' in output def _create_test_caches(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('cache1/%s' % CACHE_TAG_NAME, contents=CACHE_TAG_CONTENTS + b' extra stuff') self.create_regular_file('cache2/%s' % CACHE_TAG_NAME, contents=b'invalid signature') os.mkdir('input/cache3') if are_hardlinks_supported(): os.link('input/cache1/%s' % CACHE_TAG_NAME, 'input/cache3/%s' % CACHE_TAG_NAME) else: self.create_regular_file('cache3/%s' % CACHE_TAG_NAME, contents=CACHE_TAG_CONTENTS + b' extra stuff') def test_create_stdin(self): self.cmd('init', '--encryption=repokey', self.repository_location) input_data = b'\x00foo\n\nbar\n \n' self.cmd('create', self.repository_location + '::test', '-', input=input_data) item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test')) assert item['uid'] == 0 assert item['gid'] == 0 assert item['size'] == len(input_data) assert item['path'] == 'stdin' extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test', binary_output=True) assert extracted_data == input_data def test_create_content_from_command(self): self.cmd('init', '--encryption=repokey', self.repository_location) input_data = 'some test content' name = 'a/b/c' self.cmd('create', '--stdin-name', name, '--content-from-command', self.repository_location + '::test', '--', 'echo', input_data) item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test')) assert item['uid'] == 0 assert item['gid'] == 0 assert item['size'] == len(input_data) + 1 # `echo` adds newline assert item['path'] == name extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test') assert extracted_data == input_data + '\n' def test_create_content_from_command_with_failed_command(self): self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--content-from-command', self.repository_location + '::test', '--', 'sh', '-c', 'exit 73;', exit_code=2) assert output.endswith("Command 'sh' exited with status 73\n") archive_list = json.loads(self.cmd('list', '--json', self.repository_location)) assert archive_list['archives'] == [] def test_create_content_from_command_missing_command(self): self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--content-from-command', self.repository_location + '::test', exit_code=2) assert output.endswith('No command given.\n') def test_create_paths_from_stdin(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file("file1", size=1024 * 80) self.create_regular_file("dir1/file2", size=1024 * 80) self.create_regular_file("dir1/file3", size=1024 * 80) self.create_regular_file("file4", size=1024 * 80) input_data = b'input/file1\0input/dir1\0input/file4' self.cmd('create', '--paths-from-stdin', '--paths-delimiter', '\\0', self.repository_location + '::test', input=input_data) archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test') paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line] assert paths == ['input/file1', 'input/dir1', 'input/file4'] def test_create_paths_from_command(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file("file1", size=1024 * 80) self.create_regular_file("file2", size=1024 * 80) self.create_regular_file("file3", size=1024 * 80) self.create_regular_file("file4", size=1024 * 80) input_data = 'input/file1\ninput/file2\ninput/file3' self.cmd('create', '--paths-from-command', self.repository_location + '::test', '--', 'echo', input_data) archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test') paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line] assert paths == ['input/file1', 'input/file2', 'input/file3'] def test_create_paths_from_command_with_failed_command(self): self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--paths-from-command', self.repository_location + '::test', '--', 'sh', '-c', 'exit 73;', exit_code=2) assert output.endswith("Command 'sh' exited with status 73\n") archive_list = json.loads(self.cmd('list', '--json', self.repository_location)) assert archive_list['archives'] == [] def test_create_paths_from_command_missing_command(self): self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--paths-from-command', self.repository_location + '::test', exit_code=2) assert output.endswith('No command given.\n') def test_create_without_root(self): """test create without a root""" self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', exit_code=2) def test_create_pattern_root(self): """test create with only a root pattern""" self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) output = self.cmd('create', '-v', '--list', '--pattern=R input', self.repository_location + '::test') self.assert_in("A input/file1", output) self.assert_in("A input/file2", output) def test_create_pattern(self): """test file patterns during create""" self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file_important', size=1024 * 80) output = self.cmd('create', '-v', '--list', '--pattern=+input/file_important', '--pattern=-input/file*', self.repository_location + '::test', 'input') self.assert_in("A input/file_important", output) self.assert_in('x input/file1', output) self.assert_in('x input/file2', output) def test_create_pattern_file(self): """test file patterns during create""" self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('otherfile', size=1024 * 80) self.create_regular_file('file_important', size=1024 * 80) output = self.cmd('create', '-v', '--list', '--pattern=-input/otherfile', '--patterns-from=' + self.patterns_file_path, self.repository_location + '::test', 'input') self.assert_in("A input/file_important", output) self.assert_in('x input/file1', output) self.assert_in('x input/file2', output) self.assert_in('x input/otherfile', output) def test_create_pattern_exclude_folder_but_recurse(self): """test when patterns exclude a parent folder, but include a child""" self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2') with open(self.patterns_file_path2, 'wb') as fd: fd.write(b'+ input/x/b\n- input/x*\n') self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('x/a/foo_a', size=1024 * 80) self.create_regular_file('x/b/foo_b', size=1024 * 80) self.create_regular_file('y/foo_y', size=1024 * 80) output = self.cmd('create', '-v', '--list', '--patterns-from=' + self.patterns_file_path2, self.repository_location + '::test', 'input') self.assert_in('x input/x/a/foo_a', output) self.assert_in("A input/x/b/foo_b", output) self.assert_in('A input/y/foo_y', output) def test_create_pattern_exclude_folder_no_recurse(self): """test when patterns exclude a parent folder and, but include a child""" self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2') with open(self.patterns_file_path2, 'wb') as fd: fd.write(b'+ input/x/b\n! input/x*\n') self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('x/a/foo_a', size=1024 * 80) self.create_regular_file('x/b/foo_b', size=1024 * 80) self.create_regular_file('y/foo_y', size=1024 * 80) output = self.cmd('create', '-v', '--list', '--patterns-from=' + self.patterns_file_path2, self.repository_location + '::test', 'input') self.assert_not_in('input/x/a/foo_a', output) self.assert_not_in('input/x/a', output) self.assert_in('A input/y/foo_y', output) def test_create_pattern_intermediate_folders_first(self): """test that intermediate folders appear first when patterns exclude a parent folder but include a child""" self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2') with open(self.patterns_file_path2, 'wb') as fd: fd.write(b'+ input/x/a\n+ input/x/b\n- input/x*\n') self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('x/a/foo_a', size=1024 * 80) self.create_regular_file('x/b/foo_b', size=1024 * 80) with changedir('input'): self.cmd('create', '--patterns-from=' + self.patterns_file_path2, self.repository_location + '::test', '.') # list the archive and verify that the "intermediate" folders appear before # their contents out = self.cmd('list', '--format', '{type} {path}{NL}', self.repository_location + '::test') out_list = out.splitlines() self.assert_in('d x/a', out_list) self.assert_in('d x/b', out_list) assert out_list.index('d x/a') < out_list.index('- x/a/foo_a') assert out_list.index('d x/b') < out_list.index('- x/b/foo_b') def test_create_no_cache_sync(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('delete', '--cache-only', self.repository_location) create_json = json.loads(self.cmd('create', '--no-cache-sync', self.repository_location + '::test', 'input', '--json', '--error')) # ignore experimental warning info_json = json.loads(self.cmd('info', self.repository_location + '::test', '--json')) create_stats = create_json['cache']['stats'] info_stats = info_json['cache']['stats'] assert create_stats == info_stats self.cmd('delete', '--cache-only', self.repository_location) self.cmd('create', '--no-cache-sync', self.repository_location + '::test2', 'input') self.cmd('info', self.repository_location) self.cmd('check', self.repository_location) def test_extract_pattern_opt(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) self.create_regular_file('file_important', size=1024 * 80) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): self.cmd('extract', '--pattern=+input/file_important', '--pattern=-input/file*', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file_important']) def _assert_test_caches(self): with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1']) self.assert_equal(sorted(os.listdir('output/input/cache2')), [CACHE_TAG_NAME]) def test_exclude_caches(self): self._create_test_caches() self.cmd('create', '--exclude-caches', self.repository_location + '::test', 'input') self._assert_test_caches() def test_recreate_exclude_caches(self): self._create_test_caches() self.cmd('create', self.repository_location + '::test', 'input') self.cmd('recreate', '--exclude-caches', self.repository_location + '::test') self._assert_test_caches() def _create_test_tagged(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('tagged1/.NOBACKUP') self.create_regular_file('tagged2/00-NOBACKUP') self.create_regular_file('tagged3/.NOBACKUP/file2', size=1024) def _assert_test_tagged(self): with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file1']) def test_exclude_tagged(self): self._create_test_tagged() self.cmd('create', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP', self.repository_location + '::test', 'input') self._assert_test_tagged() def test_recreate_exclude_tagged(self): self._create_test_tagged() self.cmd('create', self.repository_location + '::test', 'input') self.cmd('recreate', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP', self.repository_location + '::test') self._assert_test_tagged() def _create_test_keep_tagged(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file0', size=1024) self.create_regular_file('tagged1/.NOBACKUP1') self.create_regular_file('tagged1/file1', size=1024) self.create_regular_file('tagged2/.NOBACKUP2/subfile1', size=1024) self.create_regular_file('tagged2/file2', size=1024) self.create_regular_file('tagged3/%s' % CACHE_TAG_NAME, contents=CACHE_TAG_CONTENTS + b' extra stuff') self.create_regular_file('tagged3/file3', size=1024) self.create_regular_file('taggedall/.NOBACKUP1') self.create_regular_file('taggedall/.NOBACKUP2/subfile1', size=1024) self.create_regular_file('taggedall/%s' % CACHE_TAG_NAME, contents=CACHE_TAG_CONTENTS + b' extra stuff') self.create_regular_file('taggedall/file4', size=1024) def _assert_test_keep_tagged(self): with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_equal(sorted(os.listdir('output/input')), ['file0', 'tagged1', 'tagged2', 'tagged3', 'taggedall']) self.assert_equal(os.listdir('output/input/tagged1'), ['.NOBACKUP1']) self.assert_equal(os.listdir('output/input/tagged2'), ['.NOBACKUP2']) self.assert_equal(os.listdir('output/input/tagged3'), [CACHE_TAG_NAME]) self.assert_equal(sorted(os.listdir('output/input/taggedall')), ['.NOBACKUP1', '.NOBACKUP2', CACHE_TAG_NAME, ]) def test_exclude_keep_tagged(self): self._create_test_keep_tagged() self.cmd('create', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2', '--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test', 'input') self._assert_test_keep_tagged() def test_recreate_exclude_keep_tagged(self): self._create_test_keep_tagged() self.cmd('create', self.repository_location + '::test', 'input') self.cmd('recreate', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2', '--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test') self._assert_test_keep_tagged() @pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported') def test_recreate_hardlinked_tags(self): # test for issue #4911 self.cmd('init', '--encryption=none', self.repository_location) self.create_regular_file('file1', contents=CACHE_TAG_CONTENTS) # "wrong" filename, but correct tag contents os.mkdir(os.path.join(self.input_path, 'subdir')) # to make sure the tag is encountered *after* file1 os.link(os.path.join(self.input_path, 'file1'), os.path.join(self.input_path, 'subdir', CACHE_TAG_NAME)) # correct tag name, hardlink to file1 self.cmd('create', self.repository_location + '::test', 'input') # in the "test" archive, we now have, in this order: # - a regular file item for "file1" # - a hardlink item for "CACHEDIR.TAG" referring back to file1 for its contents self.cmd('recreate', '--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test') # if issue #4911 is present, the recreate will crash with a KeyError for "input/file1" @pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='Linux capabilities test, requires fakeroot >= 1.20.2') def test_extract_capabilities(self): fchown = os.fchown # We need to manually patch chown to get the behaviour Linux has, since fakeroot does not # accurately model the interaction of chown(2) and Linux capabilities, i.e. it does not remove them. def patched_fchown(fd, uid, gid): xattr.setxattr(fd, b'security.capability', b'', follow_symlinks=False) fchown(fd, uid, gid) # The capability descriptor used here is valid and taken from a /usr/bin/ping capabilities = b'\x01\x00\x00\x02\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' self.create_regular_file('file') xattr.setxattr(b'input/file', b'security.capability', capabilities) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): with patch.object(os, 'fchown', patched_fchown): self.cmd('extract', self.repository_location + '::test') assert xattr.getxattr(b'input/file', b'security.capability') == capabilities @pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='xattr not supported on this system or on this version of' 'fakeroot') def test_extract_xattrs_errors(self): def patched_setxattr_E2BIG(*args, **kwargs): raise OSError(errno.E2BIG, 'E2BIG') def patched_setxattr_ENOTSUP(*args, **kwargs): raise OSError(errno.ENOTSUP, 'ENOTSUP') def patched_setxattr_EACCES(*args, **kwargs): raise OSError(errno.EACCES, 'EACCES') self.create_regular_file('file') xattr.setxattr(b'input/file', b'user.attribute', b'value') self.cmd('init', self.repository_location, '-e' 'none') self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): input_abspath = os.path.abspath('input/file') with patch.object(xattr, 'setxattr', patched_setxattr_E2BIG): out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING) assert ': when setting extended attribute user.attribute: too big for this filesystem\n' in out os.remove(input_abspath) with patch.object(xattr, 'setxattr', patched_setxattr_ENOTSUP): out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING) assert ': when setting extended attribute user.attribute: xattrs not supported on this filesystem\n' in out os.remove(input_abspath) with patch.object(xattr, 'setxattr', patched_setxattr_EACCES): out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING) assert ': when setting extended attribute user.attribute: Permission denied\n' in out assert os.path.isfile(input_abspath) def test_path_normalization(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('dir1/dir2/file', size=1024 * 80) with changedir('input/dir1/dir2'): self.cmd('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..') output = self.cmd('list', self.repository_location + '::test') self.assert_not_in('..', output) self.assert_in(' input/dir1/dir2/file', output) def test_exclude_normalization(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('file2', size=1024 * 80) with changedir('input'): self.cmd('create', '--exclude=file1', self.repository_location + '::test1', '.') with changedir('output'): self.cmd('extract', self.repository_location + '::test1') self.assert_equal(sorted(os.listdir('output')), ['file2']) with changedir('input'): self.cmd('create', '--exclude=./file1', self.repository_location + '::test2', '.') with changedir('output'): self.cmd('extract', self.repository_location + '::test2') self.assert_equal(sorted(os.listdir('output')), ['file2']) self.cmd('create', '--exclude=input/./file1', self.repository_location + '::test3', 'input') with changedir('output'): self.cmd('extract', self.repository_location + '::test3') self.assert_equal(sorted(os.listdir('output/input')), ['file2']) def test_repeated_files(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input', 'input') def test_overwrite(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') # Overwriting regular files and directories should be supported os.mkdir('output/input') os.mkdir('output/input/file1') os.mkdir('output/input/dir2') with changedir('output'): self.cmd('extract', self.repository_location + '::test') self.assert_dirs_equal('input', 'output/input') # But non-empty dirs should fail os.unlink('output/input/file1') os.mkdir('output/input/file1') os.mkdir('output/input/file1/dir') with changedir('output'): self.cmd('extract', self.repository_location + '::test', exit_code=1) def test_rename(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.cmd('create', self.repository_location + '::test.2', 'input') self.cmd('extract', '--dry-run', self.repository_location + '::test') self.cmd('extract', '--dry-run', self.repository_location + '::test.2') self.cmd('rename', self.repository_location + '::test', 'test.3') self.cmd('extract', '--dry-run', self.repository_location + '::test.2') self.cmd('rename', self.repository_location + '::test.2', 'test.4') self.cmd('extract', '--dry-run', self.repository_location + '::test.3') self.cmd('extract', '--dry-run', self.repository_location + '::test.4') # Make sure both archives have been renamed with Repository(self.repository_path) as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) self.assert_equal(len(manifest.archives), 2) self.assert_in('test.3', manifest.archives) self.assert_in('test.4', manifest.archives) def test_info(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') info_repo = self.cmd('info', self.repository_location) assert 'All archives:' in info_repo info_archive = self.cmd('info', self.repository_location + '::test') assert 'Archive name: test\n' in info_archive info_archive = self.cmd('info', '--first', '1', self.repository_location) assert 'Archive name: test\n' in info_archive def test_info_json(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') info_repo = json.loads(self.cmd('info', '--json', self.repository_location)) repository = info_repo['repository'] assert len(repository['id']) == 64 assert 'last_modified' in repository assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise assert info_repo['encryption']['mode'] == 'repokey' assert 'keyfile' not in info_repo['encryption'] cache = info_repo['cache'] stats = cache['stats'] assert all(isinstance(o, int) for o in stats.values()) assert all(key in stats for key in ('total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size')) info_archive = json.loads(self.cmd('info', '--json', self.repository_location + '::test')) assert info_repo['repository'] == info_archive['repository'] assert info_repo['cache'] == info_archive['cache'] archives = info_archive['archives'] assert len(archives) == 1 archive = archives[0] assert archive['name'] == 'test' assert isinstance(archive['command_line'], list) assert isinstance(archive['duration'], float) assert len(archive['id']) == 64 assert 'stats' in archive assert datetime.strptime(archive['start'], ISO_FORMAT) assert datetime.strptime(archive['end'], ISO_FORMAT) def test_comment(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test1', 'input') self.cmd('create', '--comment', 'this is the comment', self.repository_location + '::test2', 'input') self.cmd('create', '--comment', '"deleted" comment', self.repository_location + '::test3', 'input') self.cmd('create', '--comment', 'preserved comment', self.repository_location + '::test4', 'input') assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test1') assert 'Comment: this is the comment' in self.cmd('info', self.repository_location + '::test2') self.cmd('recreate', self.repository_location + '::test1', '--comment', 'added comment') self.cmd('recreate', self.repository_location + '::test2', '--comment', 'modified comment') self.cmd('recreate', self.repository_location + '::test3', '--comment', '') self.cmd('recreate', self.repository_location + '::test4', '12345') assert 'Comment: added comment' in self.cmd('info', self.repository_location + '::test1') assert 'Comment: modified comment' in self.cmd('info', self.repository_location + '::test2') assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test3') assert 'Comment: preserved comment' in self.cmd('info', self.repository_location + '::test4') def test_delete(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.cmd('create', self.repository_location + '::test.2', 'input') self.cmd('create', self.repository_location + '::test.3', 'input') self.cmd('create', self.repository_location + '::another_test.1', 'input') self.cmd('create', self.repository_location + '::another_test.2', 'input') self.cmd('extract', '--dry-run', self.repository_location + '::test') self.cmd('extract', '--dry-run', self.repository_location + '::test.2') self.cmd('delete', '--prefix', 'another_', self.repository_location) self.cmd('delete', '--last', '1', self.repository_location) self.cmd('delete', self.repository_location + '::test') self.cmd('extract', '--dry-run', self.repository_location + '::test.2') output = self.cmd('delete', '--stats', self.repository_location + '::test.2') self.assert_in('Deleted data:', output) # Make sure all data except the manifest has been deleted with Repository(self.repository_path) as repository: self.assert_equal(len(repository), 1) def test_delete_multiple(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test1', 'input') self.cmd('create', self.repository_location + '::test2', 'input') self.cmd('create', self.repository_location + '::test3', 'input') self.cmd('delete', self.repository_location + '::test1', 'test2') self.cmd('extract', '--dry-run', self.repository_location + '::test3') self.cmd('delete', self.repository_location, 'test3') assert not self.cmd('list', self.repository_location) def test_delete_repo(self): self.create_regular_file('file1', size=1024 * 80) self.create_regular_file('dir2/file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.cmd('create', self.repository_location + '::test.2', 'input') os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'no' self.cmd('delete', self.repository_location, exit_code=2) assert os.path.exists(self.repository_path) os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES' self.cmd('delete', self.repository_location) # Make sure the repo is gone self.assertFalse(os.path.exists(self.repository_path)) def test_delete_force(self): self.cmd('init', '--encryption=none', self.repository_location) self.create_src_archive('test') with Repository(self.repository_path, exclusive=True) as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) archive = Archive(repository, key, manifest, 'test') for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): repository.delete(item.chunks[-1].id) break else: assert False # missed the file repository.commit(compact=False) output = self.cmd('delete', '--force', self.repository_location + '::test') self.assert_in('deleted archive was corrupted', output) self.cmd('check', '--repair', self.repository_location) output = self.cmd('list', self.repository_location) self.assert_not_in('test', output) def test_delete_double_force(self): self.cmd('init', '--encryption=none', self.repository_location) self.create_src_archive('test') with Repository(self.repository_path, exclusive=True) as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) archive = Archive(repository, key, manifest, 'test') id = archive.metadata.items[0] repository.put(id, b'corrupted items metadata stream chunk') repository.commit(compact=False) self.cmd('delete', '--force', '--force', self.repository_location + '::test') self.cmd('check', '--repair', self.repository_location) output = self.cmd('list', self.repository_location) self.assert_not_in('test', output) def test_corrupted_repository(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') self.cmd('extract', '--dry-run', self.repository_location + '::test') output = self.cmd('check', '--show-version', self.repository_location) self.assert_in('borgbackup version', output) # implied output even without --info given self.assert_not_in('Starting repository check', output) # --info not given for root logger name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[1] with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+b') as fd: fd.seek(100) fd.write(b'XXXX') output = self.cmd('check', '--info', self.repository_location, exit_code=1) self.assert_in('Starting repository check', output) # --info given for root logger def test_readonly_check(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('check', '--verify-data', self.repository_location, exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('check', '--verify-data', self.repository_location) if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('check', '--verify-data', self.repository_location, '--bypass-lock') def test_readonly_diff(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('a') self.create_src_archive('b') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('diff', '%s::a' % self.repository_location, 'b', exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('diff', '%s::a' % self.repository_location, 'b') if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('diff', '%s::a' % self.repository_location, 'b', '--bypass-lock') def test_readonly_export_tar(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar') if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', '--bypass-lock') def test_readonly_extract(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('extract', '%s::test' % self.repository_location, exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('extract', '%s::test' % self.repository_location) if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('extract', '%s::test' % self.repository_location, '--bypass-lock') def test_readonly_info(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('info', self.repository_location, exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('info', self.repository_location) if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('info', self.repository_location, '--bypass-lock') def test_readonly_list(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: self.cmd('list', self.repository_location, exit_code=EXIT_ERROR) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: self.cmd('list', self.repository_location) if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock self.cmd('list', self.repository_location, '--bypass-lock') @unittest.skipUnless(llfuse, 'llfuse not installed') def test_readonly_mount(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('test') with self.read_only(self.repository_path): # verify that command normally doesn't work with read-only repo if self.FORK_DEFAULT: with self.fuse_mount(self.repository_location, exit_code=EXIT_ERROR): pass else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: # self.fuse_mount always assumes fork=True, so for this test we have to manually set fork=False with self.fuse_mount(self.repository_location, fork=False): pass if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == 'LockFailed' # verify that command works with read-only repo when using --bypass-lock with self.fuse_mount(self.repository_location, None, '--bypass-lock'): pass @pytest.mark.skipif('BORG_TESTS_IGNORE_MODES' in os.environ, reason='modes unreliable') def test_umask(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') mode = os.stat(self.repository_path).st_mode self.assertEqual(stat.S_IMODE(mode), 0o700) def test_create_dry_run(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', '--dry-run', self.repository_location + '::test', 'input') # Make sure no archive has been created with Repository(self.repository_path) as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) self.assert_equal(len(manifest.archives), 0) def add_unknown_feature(self, operation): with Repository(self.repository_path, exclusive=True) as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) manifest.config[b'feature_flags'] = {operation.value.encode(): {b'mandatory': [b'unknown-feature']}} manifest.write() repository.commit(compact=False) def cmd_raises_unknown_feature(self, args): if self.FORK_DEFAULT: self.cmd(*args, exit_code=EXIT_ERROR) else: with pytest.raises(MandatoryFeatureUnsupported) as excinfo: self.cmd(*args) assert excinfo.value.args == (['unknown-feature'],) def test_unknown_feature_on_create(self): print(self.cmd('init', '--encryption=repokey', self.repository_location)) self.add_unknown_feature(Manifest.Operation.WRITE) self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input']) def test_unknown_feature_on_cache_sync(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('delete', '--cache-only', self.repository_location) self.add_unknown_feature(Manifest.Operation.READ) self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input']) def test_unknown_feature_on_change_passphrase(self): print(self.cmd('init', '--encryption=repokey', self.repository_location)) self.add_unknown_feature(Manifest.Operation.CHECK) self.cmd_raises_unknown_feature(['key', 'change-passphrase', self.repository_location]) def test_unknown_feature_on_read(self): print(self.cmd('init', '--encryption=repokey', self.repository_location)) self.cmd('create', self.repository_location + '::test', 'input') self.add_unknown_feature(Manifest.Operation.READ) with changedir('output'): self.cmd_raises_unknown_feature(['extract', self.repository_location + '::test']) self.cmd_raises_unknown_feature(['list', self.repository_location]) self.cmd_raises_unknown_feature(['info', self.repository_location + '::test']) def test_unknown_feature_on_rename(self): print(self.cmd('init', '--encryption=repokey', self.repository_location)) self.cmd('create', self.repository_location + '::test', 'input') self.add_unknown_feature(Manifest.Operation.CHECK) self.cmd_raises_unknown_feature(['rename', self.repository_location + '::test', 'other']) def test_unknown_feature_on_delete(self): print(self.cmd('init', '--encryption=repokey', self.repository_location)) self.cmd('create', self.repository_location + '::test', 'input') self.add_unknown_feature(Manifest.Operation.DELETE) # delete of an archive raises self.cmd_raises_unknown_feature(['delete', self.repository_location + '::test']) self.cmd_raises_unknown_feature(['prune', '--keep-daily=3', self.repository_location]) # delete of the whole repository ignores features self.cmd('delete', self.repository_location) @unittest.skipUnless(llfuse, 'llfuse not installed') def test_unknown_feature_on_mount(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.add_unknown_feature(Manifest.Operation.READ) mountpoint = os.path.join(self.tmpdir, 'mountpoint') os.mkdir(mountpoint) # XXX this might hang if it doesn't raise an error self.cmd_raises_unknown_feature(['mount', self.repository_location + '::test', mountpoint]) @pytest.mark.allow_cache_wipe def test_unknown_mandatory_feature_in_cache(self): if self.prefix: path_prefix = 'ssh://__testsuite__' else: path_prefix = '' print(self.cmd('init', '--encryption=repokey', self.repository_location)) with Repository(self.repository_path, exclusive=True) as repository: if path_prefix: repository._location = Location(self.repository_location) manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest) as cache: cache.begin_txn() cache.cache_config.mandatory_features = set(['unknown-feature']) cache.commit() if self.FORK_DEFAULT: self.cmd('create', self.repository_location + '::test', 'input') else: called = False wipe_cache_safe = LocalCache.wipe_cache def wipe_wrapper(*args): nonlocal called called = True wipe_cache_safe(*args) with patch.object(LocalCache, 'wipe_cache', wipe_wrapper): self.cmd('create', self.repository_location + '::test', 'input') assert called with Repository(self.repository_path, exclusive=True) as repository: if path_prefix: repository._location = Location(self.repository_location) manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest) as cache: assert cache.cache_config.mandatory_features == set([]) def test_progress_on(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--progress', self.repository_location + '::test4', 'input') self.assert_in("\r", output) def test_progress_off(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', self.repository_location + '::test5', 'input') self.assert_not_in("\r", output) def test_file_status(self): """test that various file status show expected results clearly incomplete: only tests for the weird "unchanged" status for now""" self.create_regular_file('file1', size=1024 * 80) time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--list', self.repository_location + '::test', 'input') self.assert_in("A input/file1", output) self.assert_in("A input/file2", output) # should find first file as unmodified output = self.cmd('create', '--list', self.repository_location + '::test1', 'input') self.assert_in("U input/file1", output) # this is expected, although surprising, for why, see: # https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file self.assert_in("A input/file2", output) def test_file_status_cs_cache_mode(self): """test that a changed file with faked "previous" mtime still gets backed up in ctime,size cache_mode""" self.create_regular_file('file1', contents=b'123') time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=10) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test1', 'input') # modify file1, but cheat with the mtime (and atime) and also keep same size: st = os.stat('input/file1') self.create_regular_file('file1', contents=b'321') os.utime('input/file1', ns=(st.st_atime_ns, st.st_mtime_ns)) # this mode uses ctime for change detection, so it should find file1 as modified output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test2', 'input') self.assert_in("M input/file1", output) def test_file_status_ms_cache_mode(self): """test that a chmod'ed file with no content changes does not get chunked again in mtime,size cache_mode""" self.create_regular_file('file1', size=10) time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=10) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test1', 'input') # change mode of file1, no content change: st = os.stat('input/file1') os.chmod('input/file1', st.st_mode ^ stat.S_IRWXO) # this triggers a ctime change, but mtime is unchanged # this mode uses mtime for change detection, so it should find file1 as unmodified output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test2', 'input') self.assert_in("U input/file1", output) def test_file_status_rc_cache_mode(self): """test that files get rechunked unconditionally in rechunk,ctime cache mode""" self.create_regular_file('file1', size=10) time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=10) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test1', 'input') # no changes here, but this mode rechunks unconditionally output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test2', 'input') self.assert_in("A input/file1", output) def test_file_status_excluded(self): """test that excluded paths are listed""" self.create_regular_file('file1', size=1024 * 80) time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=1024 * 80) if has_lchflags: self.create_regular_file('file3', size=1024 * 80) platform.set_flags(os.path.join(self.input_path, 'file3'), stat.UF_NODUMP) self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test', 'input') self.assert_in("A input/file1", output) self.assert_in("A input/file2", output) if has_lchflags: self.assert_in("x input/file3", output) # should find second file as excluded output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test1', 'input', '--exclude', '*/file2') self.assert_in("U input/file1", output) self.assert_in("x input/file2", output) if has_lchflags: self.assert_in("x input/file3", output) def test_create_json(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) create_info = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input')) # The usual keys assert 'encryption' in create_info assert 'repository' in create_info assert 'cache' in create_info assert 'last_modified' in create_info['repository'] archive = create_info['archive'] assert archive['name'] == 'test' assert isinstance(archive['command_line'], list) assert isinstance(archive['duration'], float) assert len(archive['id']) == 64 assert 'stats' in archive def test_create_topical(self): self.create_regular_file('file1', size=1024 * 80) time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file('file2', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) # no listing by default output = self.cmd('create', self.repository_location + '::test', 'input') self.assert_not_in('file1', output) # shouldn't be listed even if unchanged output = self.cmd('create', self.repository_location + '::test0', 'input') self.assert_not_in('file1', output) # should list the file as unchanged output = self.cmd('create', '--list', '--filter=U', self.repository_location + '::test1', 'input') self.assert_in('file1', output) # should *not* list the file as changed output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test2', 'input') self.assert_not_in('file1', output) # change the file self.create_regular_file('file1', size=1024 * 100) # should list the file as changed output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test3', 'input') self.assert_in('file1', output) @pytest.mark.skipif(not are_fifos_supported(), reason='FIFOs not supported') def test_create_read_special_symlink(self): from threading import Thread def fifo_feeder(fifo_fn, data): fd = os.open(fifo_fn, os.O_WRONLY) try: os.write(fd, data) finally: os.close(fd) self.cmd('init', '--encryption=repokey', self.repository_location) archive = self.repository_location + '::test' data = b'foobar' * 1000 fifo_fn = os.path.join(self.input_path, 'fifo') link_fn = os.path.join(self.input_path, 'link_fifo') os.mkfifo(fifo_fn) os.symlink(fifo_fn, link_fn) t = Thread(target=fifo_feeder, args=(fifo_fn, data)) t.start() try: self.cmd('create', '--read-special', archive, 'input/link_fifo') finally: t.join() with changedir('output'): self.cmd('extract', archive) fifo_fn = 'input/link_fifo' with open(fifo_fn, 'rb') as f: extracted_data = f.read() assert extracted_data == data def test_create_read_special_broken_symlink(self): os.symlink('somewhere does not exist', os.path.join(self.input_path, 'link')) self.cmd('init', '--encryption=repokey', self.repository_location) archive = self.repository_location + '::test' self.cmd('create', '--read-special', archive, 'input') output = self.cmd('list', archive) assert 'input/link -> somewhere does not exist' in output # def test_cmdline_compatibility(self): # self.create_regular_file('file1', size=1024 * 80) # self.cmd('init', '--encryption=repokey', self.repository_location) # self.cmd('create', self.repository_location + '::test', 'input') # output = self.cmd('foo', self.repository_location, '--old') # self.assert_in('"--old" has been deprecated. Use "--new" instead', output) def test_prune_repository(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test1', src_dir) self.cmd('create', self.repository_location + '::test2', src_dir) # these are not really a checkpoints, but they look like some: self.cmd('create', self.repository_location + '::test3.checkpoint', src_dir) self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir) self.cmd('create', self.repository_location + '::test4.checkpoint', src_dir) output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1') assert re.search(r'Would prune:\s+test1', output) # must keep the latest non-checkpoint archive: assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output) # must keep the latest checkpoint archive: assert re.search(r'Keeping checkpoint archive:\s+test4.checkpoint', output) output = self.cmd('list', '--consider-checkpoints', self.repository_location) self.assert_in('test1', output) self.assert_in('test2', output) self.assert_in('test3.checkpoint', output) self.assert_in('test3.checkpoint.1', output) self.assert_in('test4.checkpoint', output) self.cmd('prune', self.repository_location, '--keep-daily=1') output = self.cmd('list', '--consider-checkpoints', self.repository_location) self.assert_not_in('test1', output) # the latest non-checkpoint archive must be still there: self.assert_in('test2', output) # only the latest checkpoint archive must still be there: self.assert_not_in('test3.checkpoint', output) self.assert_not_in('test3.checkpoint.1', output) self.assert_in('test4.checkpoint', output) # now we supercede the latest checkpoint by a successful backup: self.cmd('create', self.repository_location + '::test5', src_dir) self.cmd('prune', self.repository_location, '--keep-daily=2') output = self.cmd('list', '--consider-checkpoints', self.repository_location) # all checkpoints should be gone now: self.assert_not_in('checkpoint', output) # the latest archive must be still there self.assert_in('test5', output) # Given a date and time in local tz, create a UTC timestamp string suitable # for create --timestamp command line option def _to_utc_timestamp(self, year, month, day, hour, minute, second): dtime = datetime(year, month, day, hour, minute, second, 0, dateutil.tz.gettz()) return dtime.astimezone(dateutil.tz.UTC).strftime("%Y-%m-%dT%H:%M:%S") def _create_archive_ts(self, name, y, m, d, H=0, M=0, S=0): loc = self.repository_location + '::' + name self.cmd('create', '--timestamp', self._to_utc_timestamp(y, m, d, H, M, S), loc, src_dir) # This test must match docs/misc/prune-example.txt def test_prune_repository_example(self): self.cmd('init', '--encryption=repokey', self.repository_location) # Archives that will be kept, per the example # Oldest archive self._create_archive_ts('test01', 2015, 1, 1) # 6 monthly archives self._create_archive_ts('test02', 2015, 6, 30) self._create_archive_ts('test03', 2015, 7, 31) self._create_archive_ts('test04', 2015, 8, 31) self._create_archive_ts('test05', 2015, 9, 30) self._create_archive_ts('test06', 2015, 10, 31) self._create_archive_ts('test07', 2015, 11, 30) # 14 daily archives self._create_archive_ts('test08', 2015, 12, 17) self._create_archive_ts('test09', 2015, 12, 18) self._create_archive_ts('test10', 2015, 12, 20) self._create_archive_ts('test11', 2015, 12, 21) self._create_archive_ts('test12', 2015, 12, 22) self._create_archive_ts('test13', 2015, 12, 23) self._create_archive_ts('test14', 2015, 12, 24) self._create_archive_ts('test15', 2015, 12, 25) self._create_archive_ts('test16', 2015, 12, 26) self._create_archive_ts('test17', 2015, 12, 27) self._create_archive_ts('test18', 2015, 12, 28) self._create_archive_ts('test19', 2015, 12, 29) self._create_archive_ts('test20', 2015, 12, 30) self._create_archive_ts('test21', 2015, 12, 31) # Additional archives that would be pruned # The second backup of the year self._create_archive_ts('test22', 2015, 1, 2) # The next older monthly backup self._create_archive_ts('test23', 2015, 5, 31) # The next older daily backup self._create_archive_ts('test24', 2015, 12, 16) output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=14', '--keep-monthly=6', '--keep-yearly=1') # Prune second backup of the year assert re.search(r'Would prune:\s+test22', output) # Prune next older monthly and daily backups assert re.search(r'Would prune:\s+test23', output) assert re.search(r'Would prune:\s+test24', output) # Must keep the other 21 backups # Yearly is kept as oldest archive assert re.search(r'Keeping archive \(rule: yearly\[oldest\] #1\):\s+test01', output) for i in range(1, 7): assert re.search(r'Keeping archive \(rule: monthly #' + str(i) + r'\):\s+test' + ("%02d" % (8-i)), output) for i in range(1, 15): assert re.search(r'Keeping archive \(rule: daily #' + str(i) + r'\):\s+test' + ("%02d" % (22-i)), output) output = self.cmd('list', self.repository_location) # Nothing pruned after dry run for i in range(1, 25): self.assert_in('test%02d' % i, output) self.cmd('prune', self.repository_location, '--keep-daily=14', '--keep-monthly=6', '--keep-yearly=1') output = self.cmd('list', self.repository_location) # All matching backups plus oldest kept for i in range(1, 22): self.assert_in('test%02d' % i, output) # Other backups have been pruned for i in range(22, 25): self.assert_not_in('test%02d' % i, output) # With an initial and daily backup, prune daily until oldest is replaced by a monthly backup def test_prune_retain_and_expire_oldest(self): self.cmd('init', '--encryption=repokey', self.repository_location) # Initial backup self._create_archive_ts('original_archive', 2020, 9, 1, 11, 15) # Archive and prune daily for 30 days for i in range(1, 31): self._create_archive_ts('september%02d' % i, 2020, 9, i, 12) self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1') # Archive and prune 6 days into the next month for i in range(1, 7): self._create_archive_ts('october%02d' % i, 2020, 10, i, 12) self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1') # Oldest backup is still retained output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=7', '--keep-monthly=1') assert re.search(r'Keeping archive \(rule: monthly\[oldest\] #1' + r'\):\s+original_archive', output) # Archive one more day and prune. self._create_archive_ts('october07', 2020, 10, 7, 12) self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1') # Last day of previous month is retained as monthly, and oldest is expired. output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=7', '--keep-monthly=1') assert re.search(r'Keeping archive \(rule: monthly #1\):\s+september30', output) self.assert_not_in('original_archive', output) def test_prune_repository_save_space(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test1', src_dir) self.cmd('create', self.repository_location + '::test2', src_dir) output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1') assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output) assert re.search(r'Would prune:\s+test1', output) output = self.cmd('list', self.repository_location) self.assert_in('test1', output) self.assert_in('test2', output) self.cmd('prune', '--save-space', self.repository_location, '--keep-daily=1') output = self.cmd('list', self.repository_location) self.assert_not_in('test1', output) self.assert_in('test2', output) def test_prune_repository_prefix(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::foo-2015-08-12-10:00', src_dir) self.cmd('create', self.repository_location + '::foo-2015-08-12-20:00', src_dir) self.cmd('create', self.repository_location + '::bar-2015-08-12-10:00', src_dir) self.cmd('create', self.repository_location + '::bar-2015-08-12-20:00', src_dir) output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1', '--prefix=foo-') assert re.search(r'Keeping archive \(rule: daily #1\):\s+foo-2015-08-12-20:00', output) assert re.search(r'Would prune:\s+foo-2015-08-12-10:00', output) output = self.cmd('list', self.repository_location) self.assert_in('foo-2015-08-12-10:00', output) self.assert_in('foo-2015-08-12-20:00', output) self.assert_in('bar-2015-08-12-10:00', output) self.assert_in('bar-2015-08-12-20:00', output) self.cmd('prune', self.repository_location, '--keep-daily=1', '--prefix=foo-') output = self.cmd('list', self.repository_location) self.assert_not_in('foo-2015-08-12-10:00', output) self.assert_in('foo-2015-08-12-20:00', output) self.assert_in('bar-2015-08-12-10:00', output) self.assert_in('bar-2015-08-12-20:00', output) def test_prune_repository_glob(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::2015-08-12-10:00-foo', src_dir) self.cmd('create', self.repository_location + '::2015-08-12-20:00-foo', src_dir) self.cmd('create', self.repository_location + '::2015-08-12-10:00-bar', src_dir) self.cmd('create', self.repository_location + '::2015-08-12-20:00-bar', src_dir) output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1', '--glob-archives=2015-*-foo') assert re.search(r'Keeping archive \(rule: daily #1\):\s+2015-08-12-20:00-foo', output) assert re.search(r'Would prune:\s+2015-08-12-10:00-foo', output) output = self.cmd('list', self.repository_location) self.assert_in('2015-08-12-10:00-foo', output) self.assert_in('2015-08-12-20:00-foo', output) self.assert_in('2015-08-12-10:00-bar', output) self.assert_in('2015-08-12-20:00-bar', output) self.cmd('prune', self.repository_location, '--keep-daily=1', '--glob-archives=2015-*-foo') output = self.cmd('list', self.repository_location) self.assert_not_in('2015-08-12-10:00-foo', output) self.assert_in('2015-08-12-20:00-foo', output) self.assert_in('2015-08-12-10:00-bar', output) self.assert_in('2015-08-12-20:00-bar', output) def test_list_prefix(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test-1', src_dir) self.cmd('create', self.repository_location + '::something-else-than-test-1', src_dir) self.cmd('create', self.repository_location + '::test-2', src_dir) output = self.cmd('list', '--prefix=test-', self.repository_location) self.assert_in('test-1', output) self.assert_in('test-2', output) self.assert_not_in('something-else', output) def test_list_format(self): self.cmd('init', '--encryption=repokey', self.repository_location) test_archive = self.repository_location + '::test' self.cmd('create', test_archive, src_dir) output_1 = self.cmd('list', test_archive) output_2 = self.cmd('list', '--format', '{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NEWLINE}', test_archive) output_3 = self.cmd('list', '--format', '{mtime:%s} {path}{NL}', test_archive) self.assertEqual(output_1, output_2) self.assertNotEqual(output_1, output_3) def test_list_repository_format(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', '--comment', 'comment 1', self.repository_location + '::test-1', src_dir) self.cmd('create', '--comment', 'comment 2', self.repository_location + '::test-2', src_dir) output_1 = self.cmd('list', self.repository_location) output_2 = self.cmd('list', '--format', '{archive:<36} {time} [{id}]{NL}', self.repository_location) self.assertEqual(output_1, output_2) output_1 = self.cmd('list', '--short', self.repository_location) self.assertEqual(output_1, 'test-1\ntest-2\n') output_1 = self.cmd('list', '--format', '{barchive}/', self.repository_location) self.assertEqual(output_1, 'test-1/test-2/') output_3 = self.cmd('list', '--format', '{name} {comment}{NL}', self.repository_location) self.assert_in('test-1 comment 1\n', output_3) self.assert_in('test-2 comment 2\n', output_3) def test_list_hash(self): self.create_regular_file('empty_file', size=0) self.create_regular_file('amb', contents=b'a' * 1000000) self.cmd('init', '--encryption=repokey', self.repository_location) test_archive = self.repository_location + '::test' self.cmd('create', test_archive, 'input') output = self.cmd('list', '--format', '{sha256} {path}{NL}', test_archive) assert "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 input/amb" in output assert "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 input/empty_file" in output def test_list_consider_checkpoints(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test1', src_dir) # these are not really a checkpoints, but they look like some: self.cmd('create', self.repository_location + '::test2.checkpoint', src_dir) self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir) output = self.cmd('list', self.repository_location) assert "test1" in output assert "test2.checkpoint" not in output assert "test3.checkpoint.1" not in output output = self.cmd('list', '--consider-checkpoints', self.repository_location) assert "test1" in output assert "test2.checkpoint" in output assert "test3.checkpoint.1" in output def test_list_chunk_counts(self): self.create_regular_file('empty_file', size=0) self.create_regular_file('two_chunks') with open(os.path.join(self.input_path, 'two_chunks'), 'wb') as fd: fd.write(b'abba' * 2000000) fd.write(b'baab' * 2000000) self.cmd('init', '--encryption=repokey', self.repository_location) test_archive = self.repository_location + '::test' self.cmd('create', test_archive, 'input') output = self.cmd('list', '--format', '{num_chunks} {unique_chunks} {path}{NL}', test_archive) assert "0 0 input/empty_file" in output assert "2 2 input/two_chunks" in output def test_list_size(self): self.create_regular_file('compressible_file', size=10000) self.cmd('init', '--encryption=repokey', self.repository_location) test_archive = self.repository_location + '::test' self.cmd('create', '-C', 'lz4', test_archive, 'input') output = self.cmd('list', '--format', '{size} {csize} {dsize} {dcsize} {path}{NL}', test_archive) size, csize, dsize, dcsize, path = output.split("\n")[1].split(" ") assert int(csize) < int(size) assert int(dcsize) < int(dsize) assert int(dsize) <= int(size) assert int(dcsize) <= int(csize) def test_list_json(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') list_repo = json.loads(self.cmd('list', '--json', self.repository_location)) repository = list_repo['repository'] assert len(repository['id']) == 64 assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise assert list_repo['encryption']['mode'] == 'repokey' assert 'keyfile' not in list_repo['encryption'] archive0 = list_repo['archives'][0] assert datetime.strptime(archive0['time'], ISO_FORMAT) # must not raise list_archive = self.cmd('list', '--json-lines', self.repository_location + '::test') items = [json.loads(s) for s in list_archive.splitlines()] assert len(items) == 2 file1 = items[1] assert file1['path'] == 'input/file1' assert file1['size'] == 81920 assert datetime.strptime(file1['mtime'], ISO_FORMAT) # must not raise list_archive = self.cmd('list', '--json-lines', '--format={sha256}', self.repository_location + '::test') items = [json.loads(s) for s in list_archive.splitlines()] assert len(items) == 2 file1 = items[1] assert file1['path'] == 'input/file1' assert file1['sha256'] == 'b2915eb69f260d8d3c25249195f2c8f4f716ea82ec760ae929732c0262442b2b' def test_list_json_args(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('list', '--json-lines', self.repository_location, exit_code=2) self.cmd('list', '--json', self.repository_location + '::archive', exit_code=2) def test_log_json(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) log = self.cmd('create', '--log-json', self.repository_location + '::test', 'input', '--list', '--debug') messages = {} # type -> message, one of each kind for line in log.splitlines(): msg = json.loads(line) messages[msg['type']] = msg file_status = messages['file_status'] assert 'status' in file_status assert file_status['path'].startswith('input') log_message = messages['log_message'] assert isinstance(log_message['time'], float) assert log_message['levelname'] == 'DEBUG' # there should only be DEBUG messages assert isinstance(log_message['message'], str) def test_debug_profile(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input', '--debug-profile=create.prof') self.cmd('debug', 'convert-profile', 'create.prof', 'create.pyprof') stats = pstats.Stats('create.pyprof') stats.strip_dirs() stats.sort_stats('cumtime') self.cmd('create', self.repository_location + '::test2', 'input', '--debug-profile=create.pyprof') stats = pstats.Stats('create.pyprof') # Only do this on trusted data! stats.strip_dirs() stats.sort_stats('cumtime') def test_common_options(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) log = self.cmd('--debug', 'create', self.repository_location + '::test', 'input') assert 'security: read previous location' in log def _get_sizes(self, compression, compressible, size=10000): if compressible: contents = b'X' * size else: contents = os.urandom(size) self.create_regular_file('file', contents=contents) self.cmd('init', '--encryption=none', self.repository_location) archive = self.repository_location + '::test' self.cmd('create', '-C', compression, archive, 'input') output = self.cmd('list', '--format', '{size} {csize} {path}{NL}', archive) size, csize, path = output.split("\n")[1].split(" ") return int(size), int(csize) def test_compression_none_compressible(self): size, csize = self._get_sizes('none', compressible=True) assert csize == size + 3 def test_compression_none_uncompressible(self): size, csize = self._get_sizes('none', compressible=False) assert csize == size + 3 def test_compression_zlib_compressible(self): size, csize = self._get_sizes('zlib', compressible=True) assert csize < size * 0.1 assert csize == 35 def test_compression_zlib_uncompressible(self): size, csize = self._get_sizes('zlib', compressible=False) assert csize >= size def test_compression_auto_compressible(self): size, csize = self._get_sizes('auto,zlib', compressible=True) assert csize < size * 0.1 assert csize == 35 # same as compression 'zlib' def test_compression_auto_uncompressible(self): size, csize = self._get_sizes('auto,zlib', compressible=False) assert csize == size + 3 # same as compression 'none' def test_compression_lz4_compressible(self): size, csize = self._get_sizes('lz4', compressible=True) assert csize < size * 0.1 def test_compression_lz4_uncompressible(self): size, csize = self._get_sizes('lz4', compressible=False) assert csize == size + 3 # same as compression 'none' def test_compression_lzma_compressible(self): size, csize = self._get_sizes('lzma', compressible=True) assert csize < size * 0.1 def test_compression_lzma_uncompressible(self): size, csize = self._get_sizes('lzma', compressible=False) assert csize == size + 3 # same as compression 'none' def test_compression_zstd_compressible(self): size, csize = self._get_sizes('zstd', compressible=True) assert csize < size * 0.1 def test_compression_zstd_uncompressible(self): size, csize = self._get_sizes('zstd', compressible=False) assert csize == size + 3 # same as compression 'none' def test_change_passphrase(self): self.cmd('init', '--encryption=repokey', self.repository_location) os.environ['BORG_NEW_PASSPHRASE'] = 'newpassphrase' # here we have both BORG_PASSPHRASE and BORG_NEW_PASSPHRASE set: self.cmd('key', 'change-passphrase', self.repository_location) os.environ['BORG_PASSPHRASE'] = 'newpassphrase' self.cmd('list', self.repository_location) def test_break_lock(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('break-lock', self.repository_location) def test_usage(self): self.cmd() self.cmd('-h') def test_help(self): assert 'Borg' in self.cmd('help') assert 'patterns' in self.cmd('help', 'patterns') assert 'Initialize' in self.cmd('help', 'init') assert 'positional arguments' not in self.cmd('help', 'init', '--epilog-only') assert 'This command initializes' not in self.cmd('help', 'init', '--usage-only') @unittest.skipUnless(llfuse, 'llfuse not installed') def test_fuse(self): def has_noatime(some_file): atime_before = os.stat(some_file).st_atime_ns try: os.close(os.open(some_file, flags_noatime)) except PermissionError: return False else: atime_after = os.stat(some_file).st_atime_ns noatime_used = flags_noatime != flags_normal return noatime_used and atime_before == atime_after self.cmd('init', '--encryption=repokey', self.repository_location) self.create_test_files() have_noatime = has_noatime('input/file1') self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive', 'input') self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive2', 'input') if has_lchflags: # remove the file we did not backup, so input and output become equal os.remove(os.path.join('input', 'flagfile')) mountpoint = os.path.join(self.tmpdir, 'mountpoint') # mount the whole repository, archive contents shall show up in archivename subdirs of mountpoint: with self.fuse_mount(self.repository_location, mountpoint): # flags are not supported by the FUSE mount # we also ignore xattrs here, they are tested separately self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'), ignore_flags=True, ignore_xattrs=True) self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'), ignore_flags=True, ignore_xattrs=True) # mount only 1 archive, its contents shall show up directly in mountpoint: with self.fuse_mount(self.repository_location + '::archive', mountpoint): self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'), ignore_flags=True, ignore_xattrs=True) # regular file in_fn = 'input/file1' out_fn = os.path.join(mountpoint, 'input', 'file1') # stat sti1 = os.stat(in_fn) sto1 = os.stat(out_fn) assert sti1.st_mode == sto1.st_mode assert sti1.st_uid == sto1.st_uid assert sti1.st_gid == sto1.st_gid assert sti1.st_size == sto1.st_size if have_noatime: assert sti1.st_atime == sto1.st_atime assert sti1.st_ctime == sto1.st_ctime assert sti1.st_mtime == sto1.st_mtime if are_hardlinks_supported(): # note: there is another hardlink to this, see below assert sti1.st_nlink == sto1.st_nlink == 2 # read with open(in_fn, 'rb') as in_f, open(out_fn, 'rb') as out_f: assert in_f.read() == out_f.read() # hardlink (to 'input/file1') if are_hardlinks_supported(): in_fn = 'input/hardlink' out_fn = os.path.join(mountpoint, 'input', 'hardlink') sti2 = os.stat(in_fn) sto2 = os.stat(out_fn) assert sti2.st_nlink == sto2.st_nlink == 2 assert sto1.st_ino == sto2.st_ino # symlink if are_symlinks_supported(): in_fn = 'input/link1' out_fn = os.path.join(mountpoint, 'input', 'link1') sti = os.stat(in_fn, follow_symlinks=False) sto = os.stat(out_fn, follow_symlinks=False) assert sti.st_size == len('somewhere') assert sto.st_size == len('somewhere') assert stat.S_ISLNK(sti.st_mode) assert stat.S_ISLNK(sto.st_mode) assert os.readlink(in_fn) == os.readlink(out_fn) # FIFO if are_fifos_supported(): out_fn = os.path.join(mountpoint, 'input', 'fifo1') sto = os.stat(out_fn) assert stat.S_ISFIFO(sto.st_mode) # list/read xattrs try: in_fn = 'input/fusexattr' out_fn = os.fsencode(os.path.join(mountpoint, 'input', 'fusexattr')) if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path): assert sorted(no_selinux(xattr.listxattr(out_fn))) == [b'user.empty', b'user.foo', ] assert xattr.getxattr(out_fn, b'user.foo') == b'bar' assert xattr.getxattr(out_fn, b'user.empty') == b'' else: assert no_selinux(xattr.listxattr(out_fn)) == [] try: xattr.getxattr(out_fn, b'user.foo') except OSError as e: assert e.errno == llfuse.ENOATTR else: assert False, "expected OSError(ENOATTR), but no error was raised" except OSError as err: if sys.platform.startswith(('nothing_here_now', )) and err.errno == errno.ENOTSUP: # some systems have no xattr support on FUSE pass else: raise @unittest.skipUnless(llfuse, 'llfuse not installed') def test_fuse_versions_view(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('test', contents=b'first') if are_hardlinks_supported(): self.create_regular_file('hardlink1', contents=b'123456') os.link('input/hardlink1', 'input/hardlink2') os.link('input/hardlink1', 'input/hardlink3') self.cmd('create', self.repository_location + '::archive1', 'input') self.create_regular_file('test', contents=b'second') self.cmd('create', self.repository_location + '::archive2', 'input') mountpoint = os.path.join(self.tmpdir, 'mountpoint') # mount the whole repository, archive contents shall show up in versioned view: with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions'): path = os.path.join(mountpoint, 'input', 'test') # filename shows up as directory ... files = os.listdir(path) assert all(f.startswith('test.') for f in files) # ... with files test.xxxxx in there assert {b'first', b'second'} == {open(os.path.join(path, f), 'rb').read() for f in files} if are_hardlinks_supported(): hl1 = os.path.join(mountpoint, 'input', 'hardlink1', 'hardlink1.00001') hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001') hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001') assert os.stat(hl1).st_ino == os.stat(hl2).st_ino == os.stat(hl3).st_ino assert open(hl3, 'rb').read() == b'123456' # similar again, but exclude the hardlink master: with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions', '-e', 'input/hardlink1'): if are_hardlinks_supported(): hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001') hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001') assert os.stat(hl2).st_ino == os.stat(hl3).st_ino assert open(hl3, 'rb').read() == b'123456' @unittest.skipUnless(llfuse, 'llfuse not installed') def test_fuse_allow_damaged_files(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('archive') # Get rid of a chunk and repair it archive, repository = self.open_archive('archive') with repository: for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): repository.delete(item.chunks[-1].id) path = item.path # store full path for later break else: assert False # missed the file repository.commit(compact=False) self.cmd('check', '--repair', self.repository_location, exit_code=0) mountpoint = os.path.join(self.tmpdir, 'mountpoint') with self.fuse_mount(self.repository_location + '::archive', mountpoint): with pytest.raises(OSError) as excinfo: open(os.path.join(mountpoint, path)) assert excinfo.value.errno == errno.EIO with self.fuse_mount(self.repository_location + '::archive', mountpoint, '-o', 'allow_damaged_files'): open(os.path.join(mountpoint, path)).close() @unittest.skipUnless(llfuse, 'llfuse not installed') def test_fuse_mount_options(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('arch11') self.create_src_archive('arch12') self.create_src_archive('arch21') self.create_src_archive('arch22') mountpoint = os.path.join(self.tmpdir, 'mountpoint') with self.fuse_mount(self.repository_location, mountpoint, '--first=2', '--sort=name'): assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12'] with self.fuse_mount(self.repository_location, mountpoint, '--last=2', '--sort=name'): assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22'] with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch1'): assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12'] with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch2'): assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22'] with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch'): assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12', 'arch21', 'arch22'] with self.fuse_mount(self.repository_location, mountpoint, '--prefix=nope'): assert sorted(os.listdir(os.path.join(mountpoint))) == [] @unittest.skipUnless(llfuse, 'llfuse not installed') def test_migrate_lock_alive(self): """Both old_id and new_id must not be stale during lock migration / daemonization.""" from functools import wraps import pickle import traceback # Check results are communicated from the borg mount background process # to the pytest process by means of a serialized dict object stored in this file. assert_data_file = os.path.join(self.tmpdir, 'migrate_lock_assert_data.pickle') # Decorates Lock.migrate_lock() with process_alive() checks before and after. # (We don't want to mix testing code into runtime.) def write_assert_data(migrate_lock): @wraps(migrate_lock) def wrapper(self, old_id, new_id): wrapper.num_calls += 1 assert_data = { 'num_calls': wrapper.num_calls, 'old_id': old_id, 'new_id': new_id, 'before': { 'old_id_alive': platform.process_alive(*old_id), 'new_id_alive': platform.process_alive(*new_id)}, 'exception': None, 'exception.extr_tb': None, 'after': { 'old_id_alive': None, 'new_id_alive': None}} try: with open(assert_data_file, 'wb') as _out: pickle.dump(assert_data, _out) except: pass try: return migrate_lock(self, old_id, new_id) except BaseException as e: assert_data['exception'] = e assert_data['exception.extr_tb'] = traceback.extract_tb(e.__traceback__) finally: assert_data['after'].update({ 'old_id_alive': platform.process_alive(*old_id), 'new_id_alive': platform.process_alive(*new_id)}) try: with open(assert_data_file, 'wb') as _out: pickle.dump(assert_data, _out) except: pass wrapper.num_calls = 0 return wrapper # Decorate borg.locking.Lock.migrate_lock = write_assert_data(borg.locking.Lock.migrate_lock) try: self.cmd('init', '--encryption=none', self.repository_location) self.create_src_archive('arch') mountpoint = os.path.join(self.tmpdir, 'mountpoint') # In order that the decoration is kept for the borg mount process, we must not spawn, but actually fork; # not to be confused with the forking in borg.helpers.daemonize() which is done as well. with self.fuse_mount(self.repository_location, mountpoint, os_fork=True): pass with open(assert_data_file, 'rb') as _in: assert_data = pickle.load(_in) print('\nLock.migrate_lock(): assert_data = %r.' % (assert_data, ), file=sys.stderr, flush=True) exception = assert_data['exception'] if exception is not None: extracted_tb = assert_data['exception.extr_tb'] print( 'Lock.migrate_lock() raised an exception:\n', 'Traceback (most recent call last):\n', *traceback.format_list(extracted_tb), *traceback.format_exception(exception.__class__, exception, None), sep='', end='', file=sys.stderr, flush=True) assert assert_data['num_calls'] == 1, "Lock.migrate_lock() must be called exactly once." assert exception is None, "Lock.migrate_lock() may not raise an exception." assert_data_before = assert_data['before'] assert assert_data_before['old_id_alive'], "old_id must be alive (=must not be stale) when calling Lock.migrate_lock()." assert assert_data_before['new_id_alive'], "new_id must be alive (=must not be stale) when calling Lock.migrate_lock()." assert_data_after = assert_data['after'] assert assert_data_after['old_id_alive'], "old_id must be alive (=must not be stale) when Lock.migrate_lock() has returned." assert assert_data_after['new_id_alive'], "new_id must be alive (=must not be stale) when Lock.migrate_lock() has returned." finally: # Undecorate borg.locking.Lock.migrate_lock = borg.locking.Lock.migrate_lock.__wrapped__ def verify_aes_counter_uniqueness(self, method): seen = set() # Chunks already seen used = set() # counter values already used def verify_uniqueness(): with Repository(self.repository_path) as repository: for id, _ in repository.open_index(repository.get_transaction_id()).iteritems(): data = repository.get(id) hash = sha256(data).digest() if hash not in seen: seen.add(hash) num_blocks = num_cipher_blocks(len(data) - 41) nonce = bytes_to_long(data[33:41]) for counter in range(nonce, nonce + num_blocks): self.assert_not_in(counter, used) used.add(counter) self.create_test_files() os.environ['BORG_PASSPHRASE'] = 'passphrase' self.cmd('init', '--encryption=' + method, self.repository_location) verify_uniqueness() self.cmd('create', self.repository_location + '::test', 'input') verify_uniqueness() self.cmd('create', self.repository_location + '::test.2', 'input') verify_uniqueness() self.cmd('delete', self.repository_location + '::test.2') verify_uniqueness() def test_aes_counter_uniqueness_keyfile(self): self.verify_aes_counter_uniqueness('keyfile') def test_aes_counter_uniqueness_passphrase(self): self.verify_aes_counter_uniqueness('repokey') def test_debug_dump_archive_items(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): output = self.cmd('debug', 'dump-archive-items', self.repository_location + '::test') output_dir = sorted(os.listdir('output')) assert len(output_dir) > 0 and output_dir[0].startswith('000000_') assert 'Done.' in output def test_debug_dump_repo_objs(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with changedir('output'): output = self.cmd('debug', 'dump-repo-objs', self.repository_location) output_dir = sorted(os.listdir('output')) assert len(output_dir) > 0 and output_dir[0].startswith('00000000_') assert 'Done.' in output def test_debug_put_get_delete_obj(self): self.cmd('init', '--encryption=repokey', self.repository_location) data = b'some data' hexkey = sha256(data).hexdigest() self.create_regular_file('file', contents=data) output = self.cmd('debug', 'put-obj', self.repository_location, 'input/file') assert hexkey in output output = self.cmd('debug', 'get-obj', self.repository_location, hexkey, 'output/file') assert hexkey in output with open('output/file', 'rb') as f: data_read = f.read() assert data == data_read output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey) assert "deleted" in output output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey) assert "not found" in output output = self.cmd('debug', 'delete-obj', self.repository_location, 'invalid') assert "is invalid" in output def test_init_interrupt(self): def raise_eof(*args): raise EOFError with patch.object(KeyfileKeyBase, 'create', raise_eof): self.cmd('init', '--encryption=repokey', self.repository_location, exit_code=1) assert not os.path.exists(self.repository_location) def test_init_requires_encryption_option(self): self.cmd('init', self.repository_location, exit_code=2) def test_init_nested_repositories(self): self.cmd('init', '--encryption=repokey', self.repository_location) if self.FORK_DEFAULT: self.cmd('init', '--encryption=repokey', self.repository_location + '/nested', exit_code=2) else: with pytest.raises(Repository.AlreadyExists): self.cmd('init', '--encryption=repokey', self.repository_location + '/nested') def check_cache(self): # First run a regular borg check self.cmd('check', self.repository_location) # Then check that the cache on disk matches exactly what's in the repo. with self.open_repository() as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest, sync=False) as cache: original_chunks = cache.chunks Cache.destroy(repository) with Cache(repository, key, manifest) as cache: correct_chunks = cache.chunks assert original_chunks is not correct_chunks seen = set() for id, (refcount, size, csize) in correct_chunks.iteritems(): o_refcount, o_size, o_csize = original_chunks[id] assert refcount == o_refcount assert size == o_size assert csize == o_csize seen.add(id) for id, (refcount, size, csize) in original_chunks.iteritems(): assert id in seen def test_check_cache(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') with self.open_repository() as repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest, sync=False) as cache: cache.begin_txn() cache.chunks.incref(list(cache.chunks.iteritems())[0][0]) cache.commit() with pytest.raises(AssertionError): self.check_cache() def test_recreate_target_rc(self): self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('recreate', self.repository_location, '--target=asdf', exit_code=2) assert 'Need to specify single archive' in output def test_recreate_target(self): self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.check_cache() archive = self.repository_location + '::test0' self.cmd('create', archive, 'input') self.check_cache() original_archive = self.cmd('list', self.repository_location) self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3', '--target=new-archive') self.check_cache() archives = self.cmd('list', self.repository_location) assert original_archive in archives assert 'new-archive' in archives archive = self.repository_location + '::new-archive' listing = self.cmd('list', '--short', archive) assert 'file1' not in listing assert 'dir2/file2' in listing assert 'dir2/file3' not in listing def test_recreate_basic(self): self.create_test_files() self.create_regular_file('dir2/file3', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) archive = self.repository_location + '::test0' self.cmd('create', archive, 'input') self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3') self.check_cache() listing = self.cmd('list', '--short', archive) assert 'file1' not in listing assert 'dir2/file2' in listing assert 'dir2/file3' not in listing @pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported') def test_recreate_subtree_hardlinks(self): # This is essentially the same problem set as in test_extract_hardlinks self._extract_hardlinks_setup() self.cmd('create', self.repository_location + '::test2', 'input') self.cmd('recreate', self.repository_location + '::test', 'input/dir1') self.check_cache() with changedir('output'): self.cmd('extract', self.repository_location + '::test') assert os.stat('input/dir1/hardlink').st_nlink == 2 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2 assert os.stat('input/dir1/aaaa').st_nlink == 2 assert os.stat('input/dir1/source2').st_nlink == 2 with changedir('output'): self.cmd('extract', self.repository_location + '::test2') assert os.stat('input/dir1/hardlink').st_nlink == 4 def test_recreate_rechunkify(self): with open(os.path.join(self.input_path, 'large_file'), 'wb') as fd: fd.write(b'a' * 280) fd.write(b'b' * 280) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', '--chunker-params', '7,9,8,128', self.repository_location + '::test1', 'input') self.cmd('create', self.repository_location + '::test2', 'input', '--files-cache=disabled') list = self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format', '{num_chunks} {unique_chunks}') num_chunks, unique_chunks = map(int, list.split(' ')) # test1 and test2 do not deduplicate assert num_chunks == unique_chunks self.cmd('recreate', self.repository_location, '--chunker-params', 'default') self.check_cache() # test1 and test2 do deduplicate after recreate assert int(self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format={size}')) assert not int(self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format', '{unique_chunks}')) def test_recreate_recompress(self): self.create_regular_file('compressible', size=10000) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input', '-C', 'none') file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible', '--format', '{size} {csize} {sha256}') size, csize, sha256_before = file_list.split(' ') assert int(csize) >= int(size) # >= due to metadata overhead self.cmd('recreate', self.repository_location, '-C', 'lz4', '--recompress') self.check_cache() file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible', '--format', '{size} {csize} {sha256}') size, csize, sha256_after = file_list.split(' ') assert int(csize) < int(size) assert sha256_before == sha256_after def test_recreate_timestamp(self): local_timezone = datetime.now(timezone(timedelta(0))).astimezone().tzinfo self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) archive = self.repository_location + '::test0' self.cmd('create', archive, 'input') self.cmd('recreate', '--timestamp', "1970-01-02T00:00:00", '--comment', 'test', archive) info = self.cmd('info', archive).splitlines() dtime = datetime(1970, 1, 2) + local_timezone.utcoffset(None) s_time = dtime.strftime("%Y-%m-%d") assert any([re.search(r'Time \(start\).+ %s' % s_time, item) for item in info]) assert any([re.search(r'Time \(end\).+ %s' % s_time, item) for item in info]) def test_recreate_dry_run(self): self.create_regular_file('compressible', size=10000) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') archives_before = self.cmd('list', self.repository_location + '::test') self.cmd('recreate', self.repository_location, '-n', '-e', 'input/compressible') self.check_cache() archives_after = self.cmd('list', self.repository_location + '::test') assert archives_after == archives_before def test_recreate_skips_nothing_to_do(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') info_before = self.cmd('info', self.repository_location + '::test') self.cmd('recreate', self.repository_location, '--chunker-params', 'default') self.check_cache() info_after = self.cmd('info', self.repository_location + '::test') assert info_before == info_after # includes archive ID def test_with_lock(self): self.cmd('init', '--encryption=repokey', self.repository_location) lock_path = os.path.join(self.repository_path, 'lock.exclusive') cmd = 'python3', '-c', 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path self.cmd('with-lock', self.repository_location, *cmd, fork=True, exit_code=42) def test_recreate_list_output(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('file1', size=0) self.create_regular_file('file2', size=0) self.create_regular_file('file3', size=0) self.create_regular_file('file4', size=0) self.create_regular_file('file5', size=0) self.cmd('create', self.repository_location + '::test', 'input') output = self.cmd('recreate', '--list', '--info', self.repository_location + '::test', '-e', 'input/file2') self.check_cache() self.assert_in("input/file1", output) self.assert_in("x input/file2", output) output = self.cmd('recreate', '--list', self.repository_location + '::test', '-e', 'input/file3') self.check_cache() self.assert_in("input/file1", output) self.assert_in("x input/file3", output) output = self.cmd('recreate', self.repository_location + '::test', '-e', 'input/file4') self.check_cache() self.assert_not_in("input/file1", output) self.assert_not_in("x input/file4", output) output = self.cmd('recreate', '--info', self.repository_location + '::test', '-e', 'input/file5') self.check_cache() self.assert_not_in("input/file1", output) self.assert_not_in("x input/file5", output) def test_bad_filters(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.cmd('delete', '--first', '1', '--last', '1', self.repository_location, fork=True, exit_code=2) def test_key_export_keyfile(self): export_file = self.output_path + '/exported' self.cmd('init', self.repository_location, '--encryption', 'keyfile') repo_id = self._extract_repository_id(self.repository_path) self.cmd('key', 'export', self.repository_location, export_file) with open(export_file, 'r') as fd: export_contents = fd.read() assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n') key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0] with open(key_file, 'r') as fd: key_contents = fd.read() assert key_contents == export_contents os.unlink(key_file) self.cmd('key', 'import', self.repository_location, export_file) with open(key_file, 'r') as fd: key_contents2 = fd.read() assert key_contents2 == key_contents def test_key_import_keyfile_with_borg_key_file(self): self.cmd('init', self.repository_location, '--encryption', 'keyfile') exported_key_file = os.path.join(self.output_path, 'exported') self.cmd('key', 'export', self.repository_location, exported_key_file) key_file = os.path.join(self.keys_path, os.listdir(self.keys_path)[0]) with open(key_file, 'r') as fd: key_contents = fd.read() os.unlink(key_file) imported_key_file = os.path.join(self.output_path, 'imported') with environment_variable(BORG_KEY_FILE=imported_key_file): self.cmd('key', 'import', self.repository_location, exported_key_file) assert not os.path.isfile(key_file), '"borg key import" should respect BORG_KEY_FILE' with open(imported_key_file, 'r') as fd: imported_key_contents = fd.read() assert imported_key_contents == key_contents def test_key_export_repokey(self): export_file = self.output_path + '/exported' self.cmd('init', self.repository_location, '--encryption', 'repokey') repo_id = self._extract_repository_id(self.repository_path) self.cmd('key', 'export', self.repository_location, export_file) with open(export_file, 'r') as fd: export_contents = fd.read() assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n') with Repository(self.repository_path) as repository: repo_key = RepoKey(repository) repo_key.load(None, Passphrase.env_passphrase()) backup_key = KeyfileKey(key.TestKey.MockRepository()) backup_key.load(export_file, Passphrase.env_passphrase()) assert repo_key.enc_key == backup_key.enc_key with Repository(self.repository_path) as repository: repository.save_key(b'') self.cmd('key', 'import', self.repository_location, export_file) with Repository(self.repository_path) as repository: repo_key2 = RepoKey(repository) repo_key2.load(None, Passphrase.env_passphrase()) assert repo_key2.enc_key == repo_key2.enc_key def test_key_export_qr(self): export_file = self.output_path + '/exported.html' self.cmd('init', self.repository_location, '--encryption', 'repokey') repo_id = self._extract_repository_id(self.repository_path) self.cmd('key', 'export', '--qr-html', self.repository_location, export_file) with open(export_file, 'r', encoding='utf-8') as fd: export_contents = fd.read() assert bin_to_hex(repo_id) in export_contents assert export_contents.startswith('<!doctype html>') assert export_contents.endswith('</html>\n') def test_key_export_directory(self): export_directory = self.output_path + '/exported' os.mkdir(export_directory) self.cmd('init', self.repository_location, '--encryption', 'repokey') self.cmd('key', 'export', self.repository_location, export_directory, exit_code=EXIT_ERROR) def test_key_import_errors(self): export_file = self.output_path + '/exported' self.cmd('init', self.repository_location, '--encryption', 'keyfile') self.cmd('key', 'import', self.repository_location, export_file, exit_code=EXIT_ERROR) with open(export_file, 'w') as fd: fd.write('something not a key\n') if self.FORK_DEFAULT: self.cmd('key', 'import', self.repository_location, export_file, exit_code=2) else: with pytest.raises(NotABorgKeyFile): self.cmd('key', 'import', self.repository_location, export_file) with open(export_file, 'w') as fd: fd.write('BORG_KEY a0a0a0\n') if self.FORK_DEFAULT: self.cmd('key', 'import', self.repository_location, export_file, exit_code=2) else: with pytest.raises(RepoIdMismatch): self.cmd('key', 'import', self.repository_location, export_file) def test_key_export_paperkey(self): repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239' export_file = self.output_path + '/exported' self.cmd('init', self.repository_location, '--encryption', 'keyfile') self._set_repository_id(self.repository_path, unhexlify(repo_id)) key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0] with open(key_file, 'w') as fd: fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n') fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode()) self.cmd('key', 'export', '--paper', self.repository_location, export_file) with open(export_file, 'r') as fd: export_contents = fd.read() assert export_contents == """To restore key use borg key import --paper /path/to/repo BORG PAPER KEY v1 id: 2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02 1: 616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d 2: 737475 - 88 """ def test_key_import_paperkey(self): repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239' self.cmd('init', self.repository_location, '--encryption', 'keyfile') self._set_repository_id(self.repository_path, unhexlify(repo_id)) key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0] with open(key_file, 'w') as fd: fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n') fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode()) typed_input = ( b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 02\n' # Forgot to type "-" b'2 / e29442 3506da 4e1ea7 25f62a 5a3d41 - 02\n' # Forgot to type second "/" b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d42 - 02\n' # Typo (..42 not ..41) b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n' # Correct! Congratulations b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n' b'\n\n' # Abort [yN] => N b'737475 88\n' # missing "-" b'73747i - 88\n' # typo b'73747 - 88\n' # missing nibble b'73 74 75 - 89\n' # line checksum mismatch b'00a1 - 88\n' # line hash collision - overall hash mismatch, have to start over b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n' b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n' b'73 74 75 - 88\n' ) # In case that this has to change, here is a quick way to find a colliding line hash: # # from hashlib import sha256 # hash_fn = lambda x: sha256(b'\x00\x02' + x).hexdigest()[:2] # for i in range(1000): # if hash_fn(i.to_bytes(2, byteorder='big')) == '88': # 88 = line hash # print(i.to_bytes(2, 'big')) # break self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input) # Test abort paths typed_input = b'\ny\n' self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input) typed_input = b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n\ny\n' self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input) def test_debug_dump_manifest(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') dump_file = self.output_path + '/dump' output = self.cmd('debug', 'dump-manifest', self.repository_location, dump_file) assert output == "" with open(dump_file, "r") as f: result = json.load(f) assert 'archives' in result assert 'config' in result assert 'item_keys' in result assert 'timestamp' in result assert 'version' in result def test_debug_dump_archive(self): self.create_regular_file('file1', size=1024 * 80) self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') dump_file = self.output_path + '/dump' output = self.cmd('debug', 'dump-archive', self.repository_location + "::test", dump_file) assert output == "" with open(dump_file, "r") as f: result = json.load(f) assert '_name' in result assert '_manifest_entry' in result assert '_meta' in result assert '_items' in result def test_debug_refcount_obj(self): self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('debug', 'refcount-obj', self.repository_location, '0' * 64).strip() assert output == 'object 0000000000000000000000000000000000000000000000000000000000000000 not found [info from chunks cache].' create_json = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input')) archive_id = create_json['archive']['id'] output = self.cmd('debug', 'refcount-obj', self.repository_location, archive_id).strip() assert output == 'object ' + archive_id + ' has 1 referrers [info from chunks cache].' # Invalid IDs do not abort or return an error output = self.cmd('debug', 'refcount-obj', self.repository_location, '124', 'xyza').strip() assert output == 'object id 124 is invalid.\nobject id xyza is invalid.' def test_debug_info(self): output = self.cmd('debug', 'info') assert 'CRC implementation' in output assert 'Python' in output def test_benchmark_crud(self): self.cmd('init', '--encryption=repokey', self.repository_location) with environment_variable(_BORG_BENCHMARK_CRUD_TEST='YES'): self.cmd('benchmark', 'crud', self.repository_location, self.input_path) def test_config(self): self.create_test_files() os.unlink('input/flagfile') self.cmd('init', '--encryption=repokey', self.repository_location) output = self.cmd('config', '--list', self.repository_location) self.assert_in('[repository]', output) self.assert_in('version', output) self.assert_in('segments_per_dir', output) self.assert_in('storage_quota', output) self.assert_in('append_only', output) self.assert_in('additional_free_space', output) self.assert_in('id', output) self.assert_not_in('last_segment_checked', output) output = self.cmd('config', self.repository_location, 'last_segment_checked', exit_code=1) self.assert_in('No option ', output) self.cmd('config', self.repository_location, 'last_segment_checked', '123') output = self.cmd('config', self.repository_location, 'last_segment_checked') assert output == '123' + '\n' output = self.cmd('config', '--list', self.repository_location) self.assert_in('last_segment_checked', output) self.cmd('config', '--delete', self.repository_location, 'last_segment_checked') for cfg_key, cfg_value in [ ('additional_free_space', '2G'), ('repository.append_only', '1'), ]: output = self.cmd('config', self.repository_location, cfg_key) assert output == '0' + '\n' self.cmd('config', self.repository_location, cfg_key, cfg_value) output = self.cmd('config', self.repository_location, cfg_key) assert output == cfg_value + '\n' self.cmd('config', '--delete', self.repository_location, cfg_key) self.cmd('config', self.repository_location, cfg_key, exit_code=1) self.cmd('config', '--list', '--delete', self.repository_location, exit_code=2) self.cmd('config', self.repository_location, exit_code=2) self.cmd('config', self.repository_location, 'invalid-option', exit_code=1) requires_gnutar = pytest.mark.skipif(not have_gnutar(), reason='GNU tar must be installed for this test.') requires_gzip = pytest.mark.skipif(not shutil.which('gzip'), reason='gzip must be installed for this test.') @requires_gnutar def test_export_tar(self): self.create_test_files() os.unlink('input/flagfile') self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--progress') with changedir('output'): # This probably assumes GNU tar. Note -p switch to extract permissions regardless of umask. subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp']) self.assert_dirs_equal('input', 'output/input', ignore_flags=True, ignore_xattrs=True, ignore_ns=True) @requires_gnutar @requires_gzip def test_export_tar_gz(self): if not shutil.which('gzip'): pytest.skip('gzip is not installed') self.create_test_files() os.unlink('input/flagfile') self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar.gz', '--list') assert 'input/file1\n' in list assert 'input/dir2\n' in list with changedir('output'): subprocess.check_call(['tar', 'xpf', '../simple.tar.gz', '--warning=no-timestamp']) self.assert_dirs_equal('input', 'output/input', ignore_flags=True, ignore_xattrs=True, ignore_ns=True) @requires_gnutar def test_export_tar_strip_components(self): if not shutil.which('gzip'): pytest.skip('gzip is not installed') self.create_test_files() os.unlink('input/flagfile') self.cmd('init', '--encryption=repokey', self.repository_location) self.cmd('create', self.repository_location + '::test', 'input') list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--strip-components=1', '--list') # --list's path are those before processing with --strip-components assert 'input/file1\n' in list assert 'input/dir2\n' in list with changedir('output'): subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp']) self.assert_dirs_equal('input', 'output/', ignore_flags=True, ignore_xattrs=True, ignore_ns=True) @requires_hardlinks @requires_gnutar def test_export_tar_strip_components_links(self): self._extract_hardlinks_setup() self.cmd('export-tar', self.repository_location + '::test', 'output.tar', '--strip-components=2') with changedir('output'): subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp']) assert os.stat('hardlink').st_nlink == 2 assert os.stat('subdir/hardlink').st_nlink == 2 assert os.stat('aaaa').st_nlink == 2 assert os.stat('source2').st_nlink == 2 @requires_hardlinks @requires_gnutar def test_extract_hardlinks_tar(self): self._extract_hardlinks_setup() self.cmd('export-tar', self.repository_location + '::test', 'output.tar', 'input/dir1') with changedir('output'): subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp']) assert os.stat('input/dir1/hardlink').st_nlink == 2 assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2 assert os.stat('input/dir1/aaaa').st_nlink == 2 assert os.stat('input/dir1/source2').st_nlink == 2 def test_detect_attic_repo(self): path = make_attic_repo(self.repository_path) cmds = [ ['create', path + '::test', self.tmpdir], ['extract', path + '::test'], ['check', path], ['rename', path + '::test', 'newname'], ['list', path], ['delete', path], ['prune', path], ['info', path + '::test'], ['key', 'export', path, 'exported'], ['key', 'import', path, 'import'], ['key', 'change-passphrase', path], ['break-lock', path], ] for args in cmds: output = self.cmd(*args, fork=True, exit_code=2) assert 'Attic repository detected.' in output @unittest.skipUnless('binary' in BORG_EXES, 'no borg.exe available') class ArchiverTestCaseBinary(ArchiverTestCase): EXE = 'borg.exe' FORK_DEFAULT = True @unittest.skip('does not raise Exception, but sets rc==2') def test_init_parent_dirs(self): pass @unittest.skip('patches objects') def test_init_interrupt(self): pass @unittest.skip('patches objects') def test_extract_capabilities(self): pass @unittest.skip('patches objects') def test_extract_xattrs_errors(self): pass @unittest.skip('test_basic_functionality seems incompatible with fakeroot and/or the binary.') def test_basic_functionality(self): pass @unittest.skip('test_overwrite seems incompatible with fakeroot and/or the binary.') def test_overwrite(self): pass def test_fuse(self): if fakeroot_detected(): unittest.skip('test_fuse with the binary is not compatible with fakeroot') else: super().test_fuse() class ArchiverCheckTestCase(ArchiverTestCaseBase): def setUp(self): super().setUp() with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('archive1') self.create_src_archive('archive2') def test_check_usage(self): output = self.cmd('check', '-v', '--progress', self.repository_location, exit_code=0) self.assert_in('Starting repository check', output) self.assert_in('Starting archive consistency check', output) self.assert_in('Checking segments', output) # reset logging to new process default to avoid need for fork=True on next check logging.getLogger('borg.output.progress').setLevel(logging.NOTSET) output = self.cmd('check', '-v', '--repository-only', self.repository_location, exit_code=0) self.assert_in('Starting repository check', output) self.assert_not_in('Starting archive consistency check', output) self.assert_not_in('Checking segments', output) output = self.cmd('check', '-v', '--archives-only', self.repository_location, exit_code=0) self.assert_not_in('Starting repository check', output) self.assert_in('Starting archive consistency check', output) output = self.cmd('check', '-v', '--archives-only', '--prefix=archive2', self.repository_location, exit_code=0) self.assert_not_in('archive1', output) output = self.cmd('check', '-v', '--archives-only', '--first=1', self.repository_location, exit_code=0) self.assert_in('archive1', output) self.assert_not_in('archive2', output) output = self.cmd('check', '-v', '--archives-only', '--last=1', self.repository_location, exit_code=0) self.assert_not_in('archive1', output) self.assert_in('archive2', output) def test_missing_file_chunk(self): archive, repository = self.open_archive('archive1') with repository: for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): valid_chunks = item.chunks killed_chunk = valid_chunks[-1] repository.delete(killed_chunk.id) break else: self.fail('should not happen') repository.commit(compact=False) self.cmd('check', self.repository_location, exit_code=1) output = self.cmd('check', '--repair', self.repository_location, exit_code=0) self.assert_in('New missing file chunk detected', output) self.cmd('check', self.repository_location, exit_code=0) output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0) self.assert_in('broken#', output) # check that the file in the old archives has now a different chunk list without the killed chunk for archive_name in ('archive1', 'archive2'): archive, repository = self.open_archive(archive_name) with repository: for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): self.assert_not_equal(valid_chunks, item.chunks) self.assert_not_in(killed_chunk, item.chunks) break else: self.fail('should not happen') # do a fresh backup (that will include the killed chunk) with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10): self.create_src_archive('archive3') # check should be able to heal the file now: output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0) self.assert_in('Healed previously missing file chunk', output) self.assert_in('testsuite/archiver.py: Completely healed previously damaged file!', output) # check that the file in the old archives has the correct chunks again for archive_name in ('archive1', 'archive2'): archive, repository = self.open_archive(archive_name) with repository: for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): self.assert_equal(valid_chunks, item.chunks) break else: self.fail('should not happen') # list is also all-healthy again output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0) self.assert_not_in('broken#', output) def test_missing_archive_item_chunk(self): archive, repository = self.open_archive('archive1') with repository: repository.delete(archive.metadata.items[0]) repository.commit(compact=False) self.cmd('check', self.repository_location, exit_code=1) self.cmd('check', '--repair', self.repository_location, exit_code=0) self.cmd('check', self.repository_location, exit_code=0) def test_missing_archive_metadata(self): archive, repository = self.open_archive('archive1') with repository: repository.delete(archive.id) repository.commit(compact=False) self.cmd('check', self.repository_location, exit_code=1) self.cmd('check', '--repair', self.repository_location, exit_code=0) self.cmd('check', self.repository_location, exit_code=0) def test_missing_manifest(self): archive, repository = self.open_archive('archive1') with repository: repository.delete(Manifest.MANIFEST_ID) repository.commit(compact=False) self.cmd('check', self.repository_location, exit_code=1) output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0) self.assert_in('archive1', output) self.assert_in('archive2', output) self.cmd('check', self.repository_location, exit_code=0) def test_corrupted_manifest(self): archive, repository = self.open_archive('archive1') with repository: manifest = repository.get(Manifest.MANIFEST_ID) corrupted_manifest = manifest + b'corrupted!' repository.put(Manifest.MANIFEST_ID, corrupted_manifest) repository.commit(compact=False) self.cmd('check', self.repository_location, exit_code=1) output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0) self.assert_in('archive1', output) self.assert_in('archive2', output) self.cmd('check', self.repository_location, exit_code=0) def test_manifest_rebuild_corrupted_chunk(self): archive, repository = self.open_archive('archive1') with repository: manifest = repository.get(Manifest.MANIFEST_ID) corrupted_manifest = manifest + b'corrupted!' repository.put(Manifest.MANIFEST_ID, corrupted_manifest) chunk = repository.get(archive.id) corrupted_chunk = chunk + b'corrupted!' repository.put(archive.id, corrupted_chunk) repository.commit(compact=False) self.cmd('check', self.repository_location, exit_code=1) output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0) self.assert_in('archive2', output) self.cmd('check', self.repository_location, exit_code=0) def test_manifest_rebuild_duplicate_archive(self): archive, repository = self.open_archive('archive1') key = archive.key with repository: manifest = repository.get(Manifest.MANIFEST_ID) corrupted_manifest = manifest + b'corrupted!' repository.put(Manifest.MANIFEST_ID, corrupted_manifest) archive = msgpack.packb({ 'cmdline': [], 'items': [], 'hostname': 'foo', 'username': 'bar', 'name': 'archive1', 'time': '2016-12-15T18:49:51.849711', 'version': 1, }) archive_id = key.id_hash(archive) repository.put(archive_id, key.encrypt(archive)) repository.commit(compact=False) self.cmd('check', self.repository_location, exit_code=1) self.cmd('check', '--repair', self.repository_location, exit_code=0) output = self.cmd('list', self.repository_location) self.assert_in('archive1', output) self.assert_in('archive1.1', output) self.assert_in('archive2', output) def test_extra_chunks(self): self.cmd('check', self.repository_location, exit_code=0) with Repository(self.repository_location, exclusive=True) as repository: repository.put(b'01234567890123456789012345678901', b'xxxx') repository.commit(compact=False) self.cmd('check', self.repository_location, exit_code=1) self.cmd('check', self.repository_location, exit_code=1) self.cmd('check', '--repair', self.repository_location, exit_code=0) self.cmd('check', self.repository_location, exit_code=0) self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0) def _test_verify_data(self, *init_args): shutil.rmtree(self.repository_path) self.cmd('init', self.repository_location, *init_args) self.create_src_archive('archive1') archive, repository = self.open_archive('archive1') with repository: for item in archive.iter_items(): if item.path.endswith('testsuite/archiver.py'): chunk = item.chunks[-1] data = repository.get(chunk.id) + b'1234' repository.put(chunk.id, data) break repository.commit(compact=False) self.cmd('check', self.repository_location, exit_code=0) output = self.cmd('check', '--verify-data', self.repository_location, exit_code=1) assert bin_to_hex(chunk.id) + ', integrity error' in output # repair (heal is tested in another test) output = self.cmd('check', '--repair', '--verify-data', self.repository_location, exit_code=0) assert bin_to_hex(chunk.id) + ', integrity error' in output assert 'testsuite/archiver.py: New missing file chunk detected' in output def test_verify_data(self): self._test_verify_data('--encryption', 'repokey') def test_verify_data_unencrypted(self): self._test_verify_data('--encryption', 'none') def test_empty_repository(self): with Repository(self.repository_location, exclusive=True) as repository: for id_ in repository.list(): repository.delete(id_) repository.commit(compact=False) self.cmd('check', self.repository_location, exit_code=1) def test_attic013_acl_bug(self): # Attic up to release 0.13 contained a bug where every item unintentionally received # a b'acl'=None key-value pair. # This bug can still live on in Borg repositories (through borg upgrade). class Attic013Item: def as_dict(self): return { # These are required b'path': '1234', b'mtime': 0, b'mode': 0, b'user': b'0', b'group': b'0', b'uid': 0, b'gid': 0, # acl is the offending key. b'acl': None, } archive, repository = self.open_archive('archive1') with repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest) as cache: archive = Archive(repository, key, manifest, '0.13', cache=cache, create=True) archive.items_buffer.add(Attic013Item()) archive.save() self.cmd('check', self.repository_location, exit_code=0) self.cmd('list', self.repository_location + '::0.13', exit_code=0) class ManifestAuthenticationTest(ArchiverTestCaseBase): def spoof_manifest(self, repository): with repository: _, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({ 'version': 1, 'archives': {}, 'config': {}, 'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT), }))) repository.commit(compact=False) def test_fresh_init_tam_required(self): self.cmd('init', '--encryption=repokey', self.repository_location) repository = Repository(self.repository_path, exclusive=True) with repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({ 'version': 1, 'archives': {}, 'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT), }))) repository.commit(compact=False) with pytest.raises(TAMRequiredError): self.cmd('list', self.repository_location) def test_not_required(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('archive1234') repository = Repository(self.repository_path, exclusive=True) with repository: shutil.rmtree(get_security_dir(bin_to_hex(repository.id))) _, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) key.tam_required = False key.change_passphrase(key._passphrase) manifest = msgpack.unpackb(key.decrypt(None, repository.get(Manifest.MANIFEST_ID))) del manifest[b'tam'] repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb(manifest))) repository.commit(compact=False) output = self.cmd('list', '--debug', self.repository_location) assert 'archive1234' in output assert 'TAM not found and not required' in output # Run upgrade self.cmd('upgrade', '--tam', self.repository_location) # Manifest must be authenticated now output = self.cmd('list', '--debug', self.repository_location) assert 'archive1234' in output assert 'TAM-verified manifest' in output # Try to spoof / modify pre-1.0.9 self.spoof_manifest(repository) # Fails with pytest.raises(TAMRequiredError): self.cmd('list', self.repository_location) # Force upgrade self.cmd('upgrade', '--tam', '--force', self.repository_location) self.cmd('list', self.repository_location) def test_disable(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('archive1234') self.cmd('upgrade', '--disable-tam', self.repository_location) repository = Repository(self.repository_path, exclusive=True) self.spoof_manifest(repository) assert not self.cmd('list', self.repository_location) def test_disable2(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_src_archive('archive1234') repository = Repository(self.repository_path, exclusive=True) self.spoof_manifest(repository) self.cmd('upgrade', '--disable-tam', self.repository_location) assert not self.cmd('list', self.repository_location) class RemoteArchiverTestCase(ArchiverTestCase): prefix = '__testsuite__:' def open_repository(self): return RemoteRepository(Location(self.repository_location)) def test_remote_repo_restrict_to_path(self): # restricted to repo directory itself: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]): self.cmd('init', '--encryption=repokey', self.repository_location) # restricted to repo directory itself, fail for other directories with same prefix: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]): with pytest.raises(PathNotAllowed): self.cmd('init', '--encryption=repokey', self.repository_location + '_0') # restricted to a completely different path: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']): with pytest.raises(PathNotAllowed): self.cmd('init', '--encryption=repokey', self.repository_location + '_1') path_prefix = os.path.dirname(self.repository_path) # restrict to repo directory's parent directory: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]): self.cmd('init', '--encryption=repokey', self.repository_location + '_2') # restrict to repo directory's parent directory and another directory: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]): self.cmd('init', '--encryption=repokey', self.repository_location + '_3') def test_remote_repo_restrict_to_repository(self): # restricted to repo directory itself: with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', self.repository_path]): self.cmd('init', '--encryption=repokey', self.repository_location) parent_path = os.path.join(self.repository_path, '..') with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', parent_path]): with pytest.raises(PathNotAllowed): self.cmd('init', '--encryption=repokey', self.repository_location) @unittest.skip('only works locally') def test_debug_put_get_delete_obj(self): pass @unittest.skip('only works locally') def test_config(self): pass @unittest.skip('only works locally') def test_migrate_lock_alive(self): pass def test_strip_components_doesnt_leak(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('dir/file', contents=b"test file contents 1") self.create_regular_file('dir/file2', contents=b"test file contents 2") self.create_regular_file('skipped-file1', contents=b"test file contents 3") self.create_regular_file('skipped-file2', contents=b"test file contents 4") self.create_regular_file('skipped-file3', contents=b"test file contents 5") self.cmd('create', self.repository_location + '::test', 'input') marker = 'cached responses left in RemoteRepository' with changedir('output'): res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '3') self.assert_true(marker not in res) with self.assert_creates_file('file'): res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '2') self.assert_true(marker not in res) with self.assert_creates_file('dir/file'): res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '1') self.assert_true(marker not in res) with self.assert_creates_file('input/dir/file'): res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '0') self.assert_true(marker not in res) class ArchiverCorruptionTestCase(ArchiverTestCaseBase): def setUp(self): super().setUp() self.create_test_files() self.cmd('init', '--encryption=repokey', self.repository_location) self.cache_path = json.loads(self.cmd('info', self.repository_location, '--json'))['cache']['path'] def corrupt(self, file, amount=1): with open(file, 'r+b') as fd: fd.seek(-amount, io.SEEK_END) corrupted = bytes(255-c for c in fd.read(amount)) fd.seek(-amount, io.SEEK_END) fd.write(corrupted) def test_cache_chunks(self): self.corrupt(os.path.join(self.cache_path, 'chunks')) if self.FORK_DEFAULT: out = self.cmd('info', self.repository_location, exit_code=2) assert 'failed integrity check' in out else: with pytest.raises(FileIntegrityError): self.cmd('info', self.repository_location) def test_cache_files(self): self.cmd('create', self.repository_location + '::test', 'input') self.corrupt(os.path.join(self.cache_path, 'files')) out = self.cmd('create', self.repository_location + '::test1', 'input') # borg warns about the corrupt files cache, but then continues without files cache. assert 'files cache is corrupted' in out def test_chunks_archive(self): self.cmd('create', self.repository_location + '::test1', 'input') # Find ID of test1 so we can corrupt it later :) target_id = self.cmd('list', self.repository_location, '--format={id}{LF}').strip() self.cmd('create', self.repository_location + '::test2', 'input') # Force cache sync, creating archive chunks of test1 and test2 in chunks.archive.d self.cmd('delete', '--cache-only', self.repository_location) self.cmd('info', self.repository_location, '--json') chunks_archive = os.path.join(self.cache_path, 'chunks.archive.d') assert len(os.listdir(chunks_archive)) == 4 # two archives, one chunks cache and one .integrity file each self.corrupt(os.path.join(chunks_archive, target_id + '.compact')) # Trigger cache sync by changing the manifest ID in the cache config config_path = os.path.join(self.cache_path, 'config') config = ConfigParser(interpolation=None) config.read(config_path) config.set('cache', 'manifest', bin_to_hex(bytes(32))) with open(config_path, 'w') as fd: config.write(fd) # Cache sync notices corrupted archive chunks, but automatically recovers. out = self.cmd('create', '-v', self.repository_location + '::test3', 'input', exit_code=1) assert 'Reading cached archive chunk index for test1' in out assert 'Cached archive chunk index of test1 is corrupted' in out assert 'Fetching and building archive index for test1' in out def test_old_version_interfered(self): # Modify the main manifest ID without touching the manifest ID in the integrity section. # This happens if a version without integrity checking modifies the cache. config_path = os.path.join(self.cache_path, 'config') config = ConfigParser(interpolation=None) config.read(config_path) config.set('cache', 'manifest', bin_to_hex(bytes(32))) with open(config_path, 'w') as fd: config.write(fd) out = self.cmd('info', self.repository_location) assert 'Cache integrity data not available: old Borg version modified the cache.' in out class DiffArchiverTestCase(ArchiverTestCaseBase): def test_basic_functionality(self): # Setup files for the first snapshot self.create_regular_file('empty', size=0) self.create_regular_file('file_unchanged', size=128) self.create_regular_file('file_removed', size=256) self.create_regular_file('file_removed2', size=512) self.create_regular_file('file_replaced', size=1024) os.mkdir('input/dir_replaced_with_file') os.chmod('input/dir_replaced_with_file', stat.S_IFDIR | 0o755) os.mkdir('input/dir_removed') if are_symlinks_supported(): os.mkdir('input/dir_replaced_with_link') os.symlink('input/dir_replaced_with_file', 'input/link_changed') os.symlink('input/file_unchanged', 'input/link_removed') os.symlink('input/file_removed2', 'input/link_target_removed') os.symlink('input/empty', 'input/link_target_contents_changed') os.symlink('input/empty', 'input/link_replaced_by_file') if are_hardlinks_supported(): os.link('input/file_replaced', 'input/hardlink_target_replaced') os.link('input/empty', 'input/hardlink_contents_changed') os.link('input/file_removed', 'input/hardlink_removed') os.link('input/file_removed2', 'input/hardlink_target_removed') self.cmd('init', '--encryption=repokey', self.repository_location) # Create the first snapshot self.cmd('create', self.repository_location + '::test0', 'input') # Setup files for the second snapshot self.create_regular_file('file_added', size=2048) self.create_regular_file('file_empty_added', size=0) os.unlink('input/file_replaced') self.create_regular_file('file_replaced', contents=b'0' * 4096) os.unlink('input/file_removed') os.unlink('input/file_removed2') os.rmdir('input/dir_replaced_with_file') self.create_regular_file('dir_replaced_with_file', size=8192) os.chmod('input/dir_replaced_with_file', stat.S_IFREG | 0o755) os.mkdir('input/dir_added') os.rmdir('input/dir_removed') if are_symlinks_supported(): os.rmdir('input/dir_replaced_with_link') os.symlink('input/dir_added', 'input/dir_replaced_with_link') os.unlink('input/link_changed') os.symlink('input/dir_added', 'input/link_changed') os.symlink('input/dir_added', 'input/link_added') os.unlink('input/link_replaced_by_file') self.create_regular_file('link_replaced_by_file', size=16384) os.unlink('input/link_removed') if are_hardlinks_supported(): os.unlink('input/hardlink_removed') os.link('input/file_added', 'input/hardlink_added') with open('input/empty', 'ab') as fd: fd.write(b'appended_data') # Create the second snapshot self.cmd('create', self.repository_location + '::test1a', 'input') self.cmd('create', '--chunker-params', '16,18,17,4095', self.repository_location + '::test1b', 'input') def do_asserts(output, can_compare_ids): # File contents changed (deleted and replaced with a new file) change = 'B' if can_compare_ids else '{:<19}'.format('modified') assert 'file_replaced' in output # added to debug #3494 assert '{} input/file_replaced'.format(change) in output # File unchanged assert 'input/file_unchanged' not in output # Directory replaced with a regular file if 'BORG_TESTS_IGNORE_MODES' not in os.environ: assert '[drwxr-xr-x -> -rwxr-xr-x] input/dir_replaced_with_file' in output # Basic directory cases assert 'added directory input/dir_added' in output assert 'removed directory input/dir_removed' in output if are_symlinks_supported(): # Basic symlink cases assert 'changed link input/link_changed' in output assert 'added link input/link_added' in output assert 'removed link input/link_removed' in output # Symlink replacing or being replaced assert '] input/dir_replaced_with_link' in output assert '] input/link_replaced_by_file' in output # Symlink target removed. Should not affect the symlink at all. assert 'input/link_target_removed' not in output # The inode has two links and the file contents changed. Borg # should notice the changes in both links. However, the symlink # pointing to the file is not changed. change = '0 B' if can_compare_ids else '{:<19}'.format('modified') assert '{} input/empty'.format(change) in output if are_hardlinks_supported(): assert '{} input/hardlink_contents_changed'.format(change) in output if are_symlinks_supported(): assert 'input/link_target_contents_changed' not in output # Added a new file and a hard link to it. Both links to the same # inode should appear as separate files. assert 'added 2.05 kB input/file_added' in output if are_hardlinks_supported(): assert 'added 2.05 kB input/hardlink_added' in output # check if a diff between non-existent and empty new file is found assert 'added 0 B input/file_empty_added' in output # The inode has two links and both of them are deleted. They should # appear as two deleted files. assert 'removed 256 B input/file_removed' in output if are_hardlinks_supported(): assert 'removed 256 B input/hardlink_removed' in output # Another link (marked previously as the source in borg) to the # same inode was removed. This should not change this link at all. if are_hardlinks_supported(): assert 'input/hardlink_target_removed' not in output # Another link (marked previously as the source in borg) to the # same inode was replaced with a new regular file. This should not # change this link at all. if are_hardlinks_supported(): assert 'input/hardlink_target_replaced' not in output def do_json_asserts(output, can_compare_ids): def get_changes(filename, data): chgsets = [j['changes'] for j in data if j['path'] == filename] assert len(chgsets) < 2 # return a flattened list of changes for given filename return [chg for chgset in chgsets for chg in chgset] # convert output to list of dicts joutput = [json.loads(line) for line in output.split('\n') if line] # File contents changed (deleted and replaced with a new file) expected = {'type': 'modified', 'added': 4096, 'removed': 1024} if can_compare_ids else {'type': 'modified'} assert expected in get_changes('input/file_replaced', joutput) # File unchanged assert not any(get_changes('input/file_unchanged', joutput)) # Directory replaced with a regular file if 'BORG_TESTS_IGNORE_MODES' not in os.environ: assert {'type': 'mode', 'old_mode': 'drwxr-xr-x', 'new_mode': '-rwxr-xr-x'} in \ get_changes('input/dir_replaced_with_file', joutput) # Basic directory cases assert {'type': 'added directory'} in get_changes('input/dir_added', joutput) assert {'type': 'removed directory'} in get_changes('input/dir_removed', joutput) if are_symlinks_supported(): # Basic symlink cases assert {'type': 'changed link'} in get_changes('input/link_changed', joutput) assert {'type': 'added link'} in get_changes('input/link_added', joutput) assert {'type': 'removed link'} in get_changes('input/link_removed', joutput) # Symlink replacing or being replaced assert any(chg['type'] == 'mode' and chg['new_mode'].startswith('l') for chg in get_changes('input/dir_replaced_with_link', joutput)) assert any(chg['type'] == 'mode' and chg['old_mode'].startswith('l') for chg in get_changes('input/link_replaced_by_file', joutput)) # Symlink target removed. Should not affect the symlink at all. assert not any(get_changes('input/link_target_removed', joutput)) # The inode has two links and the file contents changed. Borg # should notice the changes in both links. However, the symlink # pointing to the file is not changed. expected = {'type': 'modified', 'added': 13, 'removed': 0} if can_compare_ids else {'type': 'modified'} assert expected in get_changes('input/empty', joutput) if are_hardlinks_supported(): assert expected in get_changes('input/hardlink_contents_changed', joutput) if are_symlinks_supported(): assert not any(get_changes('input/link_target_contents_changed', joutput)) # Added a new file and a hard link to it. Both links to the same # inode should appear as separate files. assert {'type': 'added', 'size': 2048} in get_changes('input/file_added', joutput) if are_hardlinks_supported(): assert {'type': 'added', 'size': 2048} in get_changes('input/hardlink_added', joutput) # check if a diff between non-existent and empty new file is found assert {'type': 'added', 'size': 0} in get_changes('input/file_empty_added', joutput) # The inode has two links and both of them are deleted. They should # appear as two deleted files. assert {'type': 'removed', 'size': 256} in get_changes('input/file_removed', joutput) if are_hardlinks_supported(): assert {'type': 'removed', 'size': 256} in get_changes('input/hardlink_removed', joutput) # Another link (marked previously as the source in borg) to the # same inode was removed. This should not change this link at all. if are_hardlinks_supported(): assert not any(get_changes('input/hardlink_target_removed', joutput)) # Another link (marked previously as the source in borg) to the # same inode was replaced with a new regular file. This should not # change this link at all. if are_hardlinks_supported(): assert not any(get_changes('input/hardlink_target_replaced', joutput)) do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a'), True) # We expect exit_code=1 due to the chunker params warning do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1b', exit_code=1), False) do_json_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a', '--json-lines'), True) def test_sort_option(self): self.cmd('init', '--encryption=repokey', self.repository_location) self.create_regular_file('a_file_removed', size=8) self.create_regular_file('f_file_removed', size=16) self.create_regular_file('c_file_changed', size=32) self.create_regular_file('e_file_changed', size=64) self.cmd('create', self.repository_location + '::test0', 'input') os.unlink('input/a_file_removed') os.unlink('input/f_file_removed') os.unlink('input/c_file_changed') os.unlink('input/e_file_changed') self.create_regular_file('c_file_changed', size=512) self.create_regular_file('e_file_changed', size=1024) self.create_regular_file('b_file_added', size=128) self.create_regular_file('d_file_added', size=256) self.cmd('create', self.repository_location + '::test1', 'input') output = self.cmd('diff', '--sort', self.repository_location + '::test0', 'test1') expected = [ 'a_file_removed', 'b_file_added', 'c_file_changed', 'd_file_added', 'e_file_changed', 'f_file_removed', ] assert all(x in line for x, line in zip(expected, output.splitlines())) def test_get_args(): archiver = Archiver() # everything normal: # first param is argv as produced by ssh forced command, # second param is like from SSH_ORIGINAL_COMMAND env variable args = archiver.get_args(['borg', 'serve', '--umask=0027', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ], 'borg serve --info') assert args.func == archiver.do_serve assert args.restrict_to_paths == ['/p1', '/p2'] assert args.umask == 0o027 assert args.log_level == 'info' # similar, but with --restrict-to-repository args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ], 'borg serve --info --umask=0027') assert args.restrict_to_repositories == ['/r1', '/r2'] # trying to cheat - break out of path restriction args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ], 'borg serve --restrict-to-path=/') assert args.restrict_to_paths == ['/p1', '/p2'] # trying to cheat - break out of repository restriction args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ], 'borg serve --restrict-to-repository=/') assert args.restrict_to_repositories == ['/r1', '/r2'] # trying to cheat - break below repository restriction args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ], 'borg serve --restrict-to-repository=/r1/below') assert args.restrict_to_repositories == ['/r1', '/r2'] # trying to cheat - try to execute different subcommand args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ], 'borg init --encryption=repokey /') assert args.func == archiver.do_serve # Check that environment variables in the forced command don't cause issues. If the command # were not forced, environment variables would be interpreted by the shell, but this does not # happen for forced commands - we get the verbatim command line and need to deal with env vars. args = archiver.get_args(['borg', 'serve', ], 'BORG_FOO=bar borg serve --info') assert args.func == archiver.do_serve def test_chunk_content_equal(): def ccc(a, b): chunks_a = [data for data in a] chunks_b = [data for data in b] compare1 = ItemDiff._chunk_content_equal(iter(chunks_a), iter(chunks_b)) compare2 = ItemDiff._chunk_content_equal(iter(chunks_b), iter(chunks_a)) assert compare1 == compare2 return compare1 assert ccc([ b'1234', b'567A', b'bC' ], [ b'1', b'23', b'4567A', b'b', b'C' ]) # one iterator exhausted before the other assert not ccc([ b'12345', ], [ b'1234', b'56' ]) # content mismatch assert not ccc([ b'1234', b'65' ], [ b'1234', b'56' ]) # first is the prefix of second assert not ccc([ b'1234', b'56' ], [ b'1234', b'565' ]) class TestBuildFilter: @staticmethod def peek_and_store_hardlink_masters(item, matched): pass def test_basic(self): matcher = PatternMatcher() matcher.add([parse_pattern('included')], IECommand.Include) filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0) assert filter(Item(path='included')) assert filter(Item(path='included/file')) assert not filter(Item(path='something else')) def test_empty(self): matcher = PatternMatcher(fallback=True) filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0) assert filter(Item(path='anything')) def test_strip_components(self): matcher = PatternMatcher(fallback=True) filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, strip_components=1) assert not filter(Item(path='shallow')) assert not filter(Item(path='shallow/')) # can this even happen? paths are normalized... assert filter(Item(path='deep enough/file')) assert filter(Item(path='something/dir/file')) class TestCommonOptions: @staticmethod def define_common_options(add_common_option): add_common_option('-h', '--help', action='help', help='show this help message and exit') add_common_option('--critical', dest='log_level', help='foo', action='store_const', const='critical', default='warning') add_common_option('--error', dest='log_level', help='foo', action='store_const', const='error', default='warning') add_common_option('--append', dest='append', help='foo', action='append', metavar='TOPIC', default=[]) add_common_option('-p', '--progress', dest='progress', action='store_true', help='foo') add_common_option('--lock-wait', dest='lock_wait', type=int, metavar='N', default=1, help='(default: %(default)d).') @pytest.fixture def basic_parser(self): parser = argparse.ArgumentParser(prog='test', description='test parser', add_help=False) parser.common_options = Archiver.CommonOptions(self.define_common_options, suffix_precedence=('_level0', '_level1')) return parser @pytest.fixture def subparsers(self, basic_parser): if sys.version_info >= (3, 7): # py37 pre-release defaults to unwanted required=True, in 3.7.0+ it was fixed to =False return basic_parser.add_subparsers(title='required arguments', metavar='<command>', required=False) else: # py36 does not support required=... argument (but behaves like required=False). # note: use below call for 3.6 and 3.7 when there are no alphas/betas/RCs of 3.7.0 around any more. return basic_parser.add_subparsers(title='required arguments', metavar='<command>') @pytest.fixture def parser(self, basic_parser): basic_parser.common_options.add_common_group(basic_parser, '_level0', provide_defaults=True) return basic_parser @pytest.fixture def common_parser(self, parser): common_parser = argparse.ArgumentParser(add_help=False, prog='test') parser.common_options.add_common_group(common_parser, '_level1') return common_parser @pytest.fixture def parse_vars_from_line(self, parser, subparsers, common_parser): subparser = subparsers.add_parser('subcommand', parents=[common_parser], add_help=False, description='foo', epilog='bar', help='baz', formatter_class=argparse.RawDescriptionHelpFormatter) subparser.set_defaults(func=1234) subparser.add_argument('--append-only', dest='append_only', action='store_true') def parse_vars_from_line(*line): print(line) args = parser.parse_args(line) parser.common_options.resolve(args) return vars(args) return parse_vars_from_line def test_simple(self, parse_vars_from_line): assert parse_vars_from_line('--error') == { 'append': [], 'lock_wait': 1, 'log_level': 'error', 'progress': False } assert parse_vars_from_line('--error', 'subcommand', '--critical') == { 'append': [], 'lock_wait': 1, 'log_level': 'critical', 'progress': False, 'append_only': False, 'func': 1234, } with pytest.raises(SystemExit): parse_vars_from_line('--append-only', 'subcommand') assert parse_vars_from_line('--append=foo', '--append', 'bar', 'subcommand', '--append', 'baz') == { 'append': ['foo', 'bar', 'baz'], 'lock_wait': 1, 'log_level': 'warning', 'progress': False, 'append_only': False, 'func': 1234, } @pytest.mark.parametrize('position', ('before', 'after', 'both')) @pytest.mark.parametrize('flag,args_key,args_value', ( ('-p', 'progress', True), ('--lock-wait=3', 'lock_wait', 3), )) def test_flag_position_independence(self, parse_vars_from_line, position, flag, args_key, args_value): line = [] if position in ('before', 'both'): line.append(flag) line.append('subcommand') if position in ('after', 'both'): line.append(flag) result = { 'append': [], 'lock_wait': 1, 'log_level': 'warning', 'progress': False, 'append_only': False, 'func': 1234, } result[args_key] = args_value assert parse_vars_from_line(*line) == result def test_parse_storage_quota(): assert parse_storage_quota('50M') == 50 * 1000**2 with pytest.raises(argparse.ArgumentTypeError): parse_storage_quota('5M') def get_all_parsers(): """ Return dict mapping command to parser. """ parser = Archiver(prog='borg').build_parser() borgfs_parser = Archiver(prog='borgfs').build_parser() parsers = {} def discover_level(prefix, parser, Archiver, extra_choices=None): choices = {} for action in parser._actions: if action.choices is not None and 'SubParsersAction' in str(action.__class__): for cmd, parser in action.choices.items(): choices[prefix + cmd] = parser if extra_choices is not None: choices.update(extra_choices) if prefix and not choices: return for command, parser in sorted(choices.items()): discover_level(command + " ", parser, Archiver) parsers[command] = parser discover_level("", parser, Archiver, {'borgfs': borgfs_parser}) return parsers @pytest.mark.parametrize('command, parser', list(get_all_parsers().items())) def test_help_formatting(command, parser): if isinstance(parser.epilog, RstToTextLazy): assert parser.epilog.rst @pytest.mark.parametrize('topic, helptext', list(Archiver.helptext.items())) def test_help_formatting_helptexts(topic, helptext): assert str(rst_to_terminal(helptext))
[]
[]
[ "BORG_CACHE_DIR", "BORG_KEYS_DIR", "BORG_NEW_PASSPHRASE", "BORG_PASSPHRASE", "BORG_CHECK_I_KNOW_WHAT_I_AM_DOING", "BORG_DELETE_I_KNOW_WHAT_I_AM_DOING" ]
[]
["BORG_CACHE_DIR", "BORG_KEYS_DIR", "BORG_NEW_PASSPHRASE", "BORG_PASSPHRASE", "BORG_CHECK_I_KNOW_WHAT_I_AM_DOING", "BORG_DELETE_I_KNOW_WHAT_I_AM_DOING"]
python
6
0
credscontroller/vendor/github.com/hashicorp/vault/physical/mysql/mysql_test.go
package mysql import ( "os" "testing" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" _ "github.com/go-sql-driver/mysql" ) func TestMySQLBackend(t *testing.T) { address := os.Getenv("MYSQL_ADDR") if address == "" { t.SkipNow() } database := os.Getenv("MYSQL_DB") if database == "" { database = "test" } table := os.Getenv("MYSQL_TABLE") if table == "" { table = "test" } username := os.Getenv("MYSQL_USERNAME") password := os.Getenv("MYSQL_PASSWORD") // Run vault tests logger := logging.NewVaultLogger(log.Debug) b, err := NewMySQLBackend(map[string]string{ "address": address, "database": database, "table": table, "username": username, "password": password, }, logger) if err != nil { t.Fatalf("Failed to create new backend: %v", err) } defer func() { mysql := b.(*MySQLBackend) _, err := mysql.client.Exec("DROP TABLE IF EXISTS " + mysql.dbTable + " ," + mysql.dbLockTable) if err != nil { t.Fatalf("Failed to drop table: %v", err) } }() physical.ExerciseBackend(t, b) physical.ExerciseBackend_ListPrefix(t, b) } func TestMySQLHABackend(t *testing.T) { address := os.Getenv("MYSQL_ADDR") if address == "" { t.SkipNow() } database := os.Getenv("MYSQL_DB") if database == "" { database = "test" } table := os.Getenv("MYSQL_TABLE") if table == "" { table = "test" } username := os.Getenv("MYSQL_USERNAME") password := os.Getenv("MYSQL_PASSWORD") // Run vault tests logger := logging.NewVaultLogger(log.Debug) config := map[string]string{ "address": address, "database": database, "table": table, "username": username, "password": password, "ha_enabled": "true", } b, err := NewMySQLBackend(config, logger) if err != nil { t.Fatalf("Failed to create new backend: %v", err) } defer func() { mysql := b.(*MySQLBackend) _, err := mysql.client.Exec("DROP TABLE IF EXISTS " + mysql.dbTable + " ," + mysql.dbLockTable) if err != nil { t.Fatalf("Failed to drop table: %v", err) } }() b2, err := NewMySQLBackend(config, logger) if err != nil { t.Fatalf("Failed to create new backend: %v", err) } physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend)) }
[ "\"MYSQL_ADDR\"", "\"MYSQL_DB\"", "\"MYSQL_TABLE\"", "\"MYSQL_USERNAME\"", "\"MYSQL_PASSWORD\"", "\"MYSQL_ADDR\"", "\"MYSQL_DB\"", "\"MYSQL_TABLE\"", "\"MYSQL_USERNAME\"", "\"MYSQL_PASSWORD\"" ]
[]
[ "MYSQL_DB", "MYSQL_PASSWORD", "MYSQL_USERNAME", "MYSQL_ADDR", "MYSQL_TABLE" ]
[]
["MYSQL_DB", "MYSQL_PASSWORD", "MYSQL_USERNAME", "MYSQL_ADDR", "MYSQL_TABLE"]
go
5
0
picture_book/asgi.py
""" ASGI config for picture_book project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'picture_book.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
ebcli/core/fileoperations.py
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import codecs import glob import json import os import shutil import stat import sys import zipfile import yaml import warnings from pathspec import PathSpec from cement.utils.misc import minimal_logger from ebcli.objects.buildconfiguration import BuildConfiguration from six import StringIO from yaml import safe_load, safe_dump from yaml.parser import ParserError from yaml.scanner import ScannerError try: import configparser except ImportError: import ConfigParser as configparser from ebcli.core import io from ebcli.resources.strings import prompts, strings from ebcli.objects.exceptions import ( NotInitializedError, InvalidSyntaxError, NotFoundError ) from ebcli.core.ebglobals import Constants LOG = minimal_logger(__name__) def get_aws_home(): sep = os.path.sep p = '~' + sep + '.aws' + sep return os.path.expanduser(p) def get_ssh_folder(): sep = os.path.sep p = '~' + sep + '.ssh' + sep p = os.path.expanduser(p) if not os.path.exists(p): os.makedirs(p) return p beanstalk_directory = '.elasticbeanstalk' + os.path.sep buildspec_name = "buildspec.yml" buildspec_config_header = 'eb_codebuild_settings' global_config_file = beanstalk_directory + 'config.global.yml' local_config_file = beanstalk_directory + 'config.yml' aws_config_folder = get_aws_home() aws_config_location = aws_config_folder + 'config' aws_credentials_location = aws_config_folder + 'credentials' aws_access_key = 'aws_access_key_id' aws_secret_key = 'aws_secret_access_key' region_key = 'region' default_section = 'default' ebcli_section = 'profile eb-cli' app_version_folder = beanstalk_directory + 'app_versions' logs_folder = beanstalk_directory + 'logs' + os.path.sep env_yaml = 'env.yaml' _marker = object() class ProjectRoot(object): @classmethod def traverse(cls): cwd = os.getcwd() if not os.path.isdir(beanstalk_directory): LOG.debug('beanstalk directory not found in ' + cwd + ' -Going up a level') os.chdir(os.path.pardir) if cwd == os.getcwd(): LOG.debug('Still at the same directory ' + cwd) raise NotInitializedError('EB is not yet initialized') ProjectRoot.traverse() else: LOG.debug('Project root found at: ' + cwd) def _get_option(config, section, key, default): try: return config.get(section, key) except (configparser.NoSectionError, configparser.NoOptionError): return default def is_git_directory_present(): return os.path.isdir('.git') def clean_up(): cwd = os.getcwd() try: ProjectRoot.traverse() if os.path.isdir(beanstalk_directory): shutil.rmtree(beanstalk_directory, ignore_errors=True) finally: os.chdir(cwd) def _set_not_none(config, section, option, value): if value: config.set(section, option, value) def get_war_file_location(): cwd = os.getcwd() try: ProjectRoot.traverse() lst = glob.glob('{}'.format(os.path.join('build', 'libs', '*.war'))) try: return os.path.join(os.getcwd(), lst[0]) except IndexError: raise NotFoundError('Can not find .war artifact in build' + os.path.sep + 'libs' + os.path.sep) finally: os.chdir(cwd) def config_file_present(): return os.path.isfile(local_config_file) def project_file_path(filename): return os.path.join(get_project_root(), filename) def project_file_exists(filename): return file_exists(project_file_path(filename)) def save_to_aws_config(access_key, secret_key): config = configparser.ConfigParser() if not os.path.isdir(aws_config_folder): os.makedirs(aws_config_folder) config.read(aws_config_location) if ebcli_section not in config.sections(): config.add_section(ebcli_section) _set_not_none(config, ebcli_section, aws_access_key, access_key) _set_not_none(config, ebcli_section, aws_secret_key, secret_key) with open(aws_config_location, 'w') as f: config.write(f) set_user_only_permissions(aws_config_location) def set_user_only_permissions(location): """ Sets permissions so that only a user can read/write (chmod 400). Can be a folder or a file. :param location: Full location of either a folder or a location """ if os.path.isdir(location): for root, dirs, files in os.walk(location): for d in dirs: pass _set_user_only_permissions_file(os.path.join(root, d), ex=True) for f in files: _set_user_only_permissions_file(os.path.join(root, f)) else: _set_user_only_permissions_file(location) def _set_user_only_permissions_file(location, ex=False): """ :param ex: Boolean: add executable permission """ permission = stat.S_IRUSR | stat.S_IWUSR if ex: permission |= stat.S_IXUSR os.chmod(location, permission) def set_all_unrestricted_permissions(location): """ Set permissions so that user, group, and others all have read, write and execute permissions (chmod 777). :param location: Full location of either a folder or a location """ os.chmod(location, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) def remove_execute_access_from_group_and_other_users(location): os.chmod(location, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH) def get_current_directory_name(): dirname, filename = os.path.split(os.getcwd()) from ebcli.lib.utils import decode_bytes filename = decode_bytes(filename) return filename def get_platform_version(default=_marker): try: return get_global_value('platform_version') except NotInitializedError: return None def get_instance_profile(default=None): try: return get_global_value('instance_profile', default) except NotInitializedError: return default def get_application_name(default=_marker): return get_global_value('application_name') def get_platform_name(default=_marker): return get_global_value('platform_name') def get_workspace_type(default=_marker): try: return get_global_value('workspace_type', default) except NotInitializedError: if default == _marker: raise NotInitializedError return default def get_global_value(key, default=_marker): result = get_config_setting('global', key) if result is not None: return result LOG.debug('Directory found, but no config or app name exists') if default is _marker: raise NotInitializedError return default def touch_config_folder(dir_path=None): if not os.path.isdir(os.path.join(dir_path, beanstalk_directory) if dir_path else beanstalk_directory): os.makedirs(os.path.join(dir_path, beanstalk_directory) if dir_path else beanstalk_directory) def create_config_file( app_name, region, solution_stack, workspace_type=Constants.WorkSpaceTypes.APPLICATION, platform_name=None, platform_version=None, instance_profile=None, dir_path=None, repository=None, branch=None): """ We want to make sure we do not override the file if it already exists, but we do want to fill in all missing pieces :param app_name: name of the application :return: VOID: no return value """ LOG.debug('Creating config file at ' + os.getcwd()) if not os.path.isdir(os.path.join(dir_path, beanstalk_directory) if dir_path else beanstalk_directory): os.makedirs(os.path.join(dir_path, beanstalk_directory) if dir_path else beanstalk_directory) write_config_setting('global', 'application_name', app_name, dir_path=dir_path) write_config_setting('global', 'default_region', region, dir_path=dir_path) write_config_setting('global', 'default_platform', solution_stack, dir_path=dir_path) write_config_setting('global', 'workspace_type', workspace_type, dir_path=dir_path) write_config_setting('global', 'platform_name', platform_name, dir_path=dir_path) write_config_setting('global', 'platform_version', platform_version, dir_path=dir_path) write_config_setting('global', 'instance_profile', instance_profile, dir_path=dir_path) from ebcli.operations import gitops gitops.set_repo_default_for_current_environment(repository) gitops.set_branch_default_for_current_environment(branch) def get_project_root(): cwd = os.getcwd() try: ProjectRoot.traverse() return os.getcwd() finally: os.chdir(cwd) def inside_ebcli_project(): try: return not not get_project_root() except NotInitializedError: return False def get_zip_location(file_name): cwd = os.getcwd() try: ProjectRoot.traverse() if not os.path.isdir(app_version_folder): os.makedirs(app_version_folder) return os.path.abspath(app_version_folder) + os.path.sep + file_name finally: os.chdir(cwd) def get_logs_location(folder_name): cwd = os.getcwd() try: ProjectRoot.traverse() if not os.path.isdir(logs_folder): os.makedirs(logs_folder) return os.path.abspath(os.path.join(logs_folder, folder_name)) finally: os.chdir(cwd) def program_is_installed(program): return False if os_which(program) is None else True def os_which(program): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, program) if sys.platform.startswith('win'): p += '.exe' if os.path.exists(p) and os.access(p, os.X_OK): return p def delete_file(location): if os.path.exists(location): os.remove(location) def delete_directory(location): if os.path.isdir(location): shutil.rmtree(location, ignore_errors=True) def delete_app_versions(): cwd = os.getcwd() try: ProjectRoot.traverse() delete_directory(app_version_folder) finally: os.chdir(cwd) def zip_append_archive(target_file, source_file): zip_source = zipfile.ZipFile(source_file, 'r', allowZip64=True) zip_target = zipfile.ZipFile(target_file, 'a', allowZip64=True) with warnings.catch_warnings(): # Ignore UserWarning raised by zip module for zipping modules. warnings.simplefilter('ignore', category=UserWarning) for filename in zip_source.namelist(): zf = zip_source.read(filename) zip_target.writestr(filename, zf) zip_target.close() zip_source.close() def zip_up_folder(directory, location, ignore_list=None): cwd = os.getcwd() try: os.chdir(directory) io.log_info('Zipping up folder at location: ' + str(os.getcwd())) zipf = zipfile.ZipFile(location, 'w', zipfile.ZIP_DEFLATED, allowZip64=True) _zipdir('./', zipf, ignore_list=ignore_list) zipf.close() LOG.debug('File size: ' + str(os.path.getsize(location))) finally: os.chdir(cwd) def zip_up_project(location, ignore_list=None): cwd = os.getcwd() try: ProjectRoot.traverse() zip_up_folder('./', location, ignore_list=ignore_list) finally: os.chdir(cwd) def _zipdir(path, zipf, ignore_list=None): if ignore_list is None: ignore_list = ['.gitignore'] ignore_list = ['./' + i for i in ignore_list] zipped_roots = [] for root, dirs, files in os.walk(path): if '.elasticbeanstalk' in root: io.log_info(' -skipping: {}'.format(root)) continue for d in dirs: cur_dir = os.path.join(root, d) if os.path.islink(cur_dir): # It is probably safe to remove this code since os.walk seems to categorize # symlinks-to-directories as files. This doesn't matter as far as creation # of the zip is concerned, but just having the code around is confusing. zipInfo = zipfile.ZipInfo() zipInfo.filename = os.path.join(root, d) # 2716663808L is the "magic code" for symlinks if sys.version_info > (3,): zipInfo.external_attr = 2716663808 else: zipInfo.external_attr = long(2716663808) zipf.writestr(zipInfo, os.readlink(cur_dir)) for f in files: cur_file = os.path.join(root, f) if cur_file.endswith('~') or cur_file in ignore_list: # Ignore editor backup files (like file.txt~) # Ignore anything in the .ebignore file io.log_info(' -skipping: {}'.format(cur_file)) else: if root not in zipped_roots: # Windows requires us to index the folders. io.log_info(' +adding: {}/'.format(root)) zipf.write(root) zipped_roots.append(root) io.log_info(' +adding: {}'.format(cur_file)) if os.path.islink(cur_file): zipInfo = zipfile.ZipInfo() zipInfo.filename = os.path.join(root, f) if sys.version_info > (3,): zipInfo.external_attr = 2716663808 else: zipInfo.external_attr = long(2716663808) zipf.writestr(zipInfo, os.readlink(cur_file)) else: zipf.write(cur_file) def unzip_folder(file_location, directory): if not os.path.isdir(directory): os.makedirs(directory) zip = zipfile.ZipFile(file_location, 'r', allowZip64=True) for cur_file in zip.namelist(): if not cur_file.endswith('/'): root, name = os.path.split(cur_file) path = os.path.normpath(os.path.join(directory, root)) if not os.path.isdir(path): os.makedirs(path) open(os.path.join(path, name), 'wb').write(zip.read(cur_file)) def delete_app_file(app_name): cwd = os.getcwd() file_name = beanstalk_directory + app_name try: ProjectRoot.traverse() for file_ext in ['.app.yml']: path = file_name + file_ext delete_file(path) finally: os.chdir(cwd) def delete_env_file(env_name): cwd = os.getcwd() file_name = beanstalk_directory + env_name try: ProjectRoot.traverse() for file_ext in ['.ebe.yml', '.env.yml']: path = file_name + file_ext delete_file(path) finally: os.chdir(cwd) def get_editor(): editor = get_config_setting('global', 'editor') if not editor: editor = os.getenv('EDITOR') if not editor: platform = sys.platform windows = platform.startswith('win') if windows: editor = 'notepad.exe' else: editor = 'nano' return editor def save_app_file(app): cwd = os.getcwd() env_name = app['ApplicationName'] file_name = env_name + '.app.yml' file_name = beanstalk_directory + file_name try: ProjectRoot.traverse() file_name = os.path.abspath(file_name) with codecs.open(file_name, 'w', encoding='utf8') as f: f.write(safe_dump(app, default_flow_style=False, line_break=os.linesep)) finally: os.chdir(cwd) return file_name def save_env_file(env): cwd = os.getcwd() env_name = env['EnvironmentName'] file_name = env_name + '.env.yml' file_name = beanstalk_directory + file_name try: ProjectRoot.traverse() file_name = os.path.abspath(file_name) with codecs.open(file_name, 'w', encoding='utf8') as f: f.write(safe_dump(env, default_flow_style=False, line_break=os.linesep)) finally: os.chdir(cwd) return file_name def get_environment_from_file(env_name): cwd = os.getcwd() file_name = beanstalk_directory + env_name try: ProjectRoot.traverse() file_ext = '.env.yml' path = file_name + file_ext if os.path.exists(path): with codecs.open(path, 'r', encoding='utf8') as f: return safe_load(f) except (ScannerError, ParserError): raise InvalidSyntaxError('The environment file contains ' 'invalid syntax.') finally: os.chdir(cwd) def get_application_from_file(app_name): cwd = os.getcwd() file_name = beanstalk_directory + app_name try: ProjectRoot.traverse() file_ext = '.app.yml' path = file_name + file_ext if os.path.exists(path): with codecs.open(path, 'r', encoding='utf8') as f: return safe_load(f) except (ScannerError, ParserError): raise InvalidSyntaxError('The application file contains ' 'invalid syntax.') finally: os.chdir(cwd) def update_platform_version(version): if version: write_config_setting('global', 'platform_version', version) def update_platform_name(platform_name): if platform_name: write_config_setting('global', 'platform_name', platform_name) def write_keyname(keyname): write_config_setting('global', 'default_ec2_keyname', keyname) def get_keyname(): return get_config_setting('global', 'default_ec2_keyname', None) def write_config_setting(section, key_name, value, dir_path=None, file=local_config_file): cwd = os.getcwd() if dir_path: os.chdir(dir_path) try: ProjectRoot.traverse() config = _get_yaml_dict(file) if not config: config = {} # Value will be a dict when we are passing in branch config settings if type(value) is dict: for key in value.keys(): config.setdefault(section, {}).setdefault(key_name, {})[key] = value[key] else: if config.get(section) is None: config[section] = {} config.setdefault(section, {})[key_name] = value with codecs.open(file, 'w', encoding='utf8') as f: f.write(safe_dump(config, default_flow_style=False, line_break=os.linesep)) finally: os.chdir(cwd) def get_config_setting(section, key_name, default=_marker): cwd = os.getcwd() try: ProjectRoot.traverse() config_global = _get_yaml_dict(global_config_file) config_local = _get_yaml_dict(local_config_file) # Grab value, local gets priority try: value = config_global[section][key_name] except KeyError: value = None try: if config_local: value = config_local[section][key_name] except KeyError: pass if value is None and default != _marker: return default except NotInitializedError: if default == _marker: raise else: return default finally: os.chdir(cwd) return value def get_json_dict(fullpath): """ Read json file at fullpath and deserialize as dict. :param fullpath: str: path to the json file :return: dict """ return json.loads(read_from_text_file(fullpath)) def write_json_dict(json_data, fullpath): def date_handler(obj): return obj.isoformat() if hasattr(obj, 'isoformat') else obj data = json.dumps(json_data, sort_keys=True, indent=4, default=date_handler) write_to_text_file(data, fullpath) def _get_yaml_dict(filename): try: with codecs.open(filename, 'r', encoding='utf8') as f: return safe_load(f) except IOError: return {} def file_exists(full_path): return os.path.isfile(full_path) def eb_file_exists(location): cwd = os.getcwd() try: ProjectRoot.traverse() path = beanstalk_directory + location return os.path.isfile(path) finally: os.chdir(cwd) def build_spec_exists(): cwd = os.getcwd() try: ProjectRoot.traverse() return os.path.isfile(buildspec_name) finally: os.chdir(cwd) def get_build_configuration(): service_role_key = 'CodeBuildServiceRole' image_key = 'Image' compute_key = 'ComputeType' timeout_key = 'Timeout' cwd = os.getcwd() try: ProjectRoot.traverse() build_spec = _get_yaml_dict(buildspec_name) if build_spec is None or buildspec_config_header not in build_spec.keys(): LOG.debug("Buildspec Keys: {0}".format(build_spec.keys())) io.log_warning(strings['codebuild.noheader'].replace('{header}', buildspec_config_header)) return None beanstalk_build_configs = build_spec[buildspec_config_header] if beanstalk_build_configs is None: LOG.debug("No values for EB header in buildspec file") return BuildConfiguration() LOG.debug("EB Config Keys: {0}".format(beanstalk_build_configs.keys())) build_configuration = BuildConfiguration( compute_type=beanstalk_build_configs.get(compute_key), image=beanstalk_build_configs.get(image_key), service_role=beanstalk_build_configs.get(service_role_key), timeout=beanstalk_build_configs.get(timeout_key) ) finally: os.chdir(cwd) return build_configuration def write_buildspec_config_header(key_name, value): write_config_setting( buildspec_config_header, key_name, value, file=buildspec_name ) def directory_empty(location): return not os.listdir(location) def get_ebignore_list(): location = get_ebignore_location() if not os.path.isfile(location): return None with codecs.open(location, 'r', encoding='utf-8') as f: spec = PathSpec.from_lines('gitwildmatch', f) ignore_list = [f for f in spec.match_tree(get_project_root())] ignore_list.append('.ebignore') return ignore_list def make_eb_dir(location): cwd = os.getcwd() try: ProjectRoot.traverse() path = beanstalk_directory + location if not os.path.isdir(path): os.makedirs(path) finally: os.chdir(cwd) def write_to_eb_data_file(location, data): cwd = os.getcwd() try: ProjectRoot.traverse() path = beanstalk_directory + location write_to_data_file(path, data) finally: os.chdir(cwd) def write_to_data_file(location, data): with codecs.open(location, 'wb', encoding=None) as f: f.write(data) def read_from_data_file(location): with codecs.open(location, 'rb', encoding=None) as f: return f.read() def read_from_text_file(location): with codecs.open(location, 'rt', encoding=None) as f: return f.read() def write_to_text_file(data, location): with codecs.open(location, 'wt', encoding=None) as f: f.write(data) def append_to_text_file(location, data): with codecs.open(location, 'at', encoding=None) as f: f.write(data) def readlines_from_text_file(location): with codecs.open(location, 'rt', encoding=None) as f: return f.readlines() def get_project_file_full_location(location): cwd = os.getcwd() try: ProjectRoot.traverse() full_path = os.path.abspath(location) return full_path finally: os.chdir(cwd) def get_ebignore_location(): return get_project_file_full_location('.ebignore') def get_eb_file_full_location(location): return get_project_file_full_location(beanstalk_directory + location) def get_home(): return os.path.expanduser('~') def get_filename_without_extension(file_location): filename = os.path.basename(file_location) extension = 'fake' while extension != '': filename, extension = os.path.splitext(filename) return filename def env_yaml_exists(): return os.path.isfile(os.path.join(os.getcwd(), env_yaml)) def get_env_name_from_env_yaml(): with open(os.path.join(os.getcwd(), env_yaml), 'r') as f: data = yaml.safe_load(f) try: env_name = data['EnvironmentName'] return env_name except KeyError: return None def get_platform_from_env_yaml(): with open(os.path.join(os.getcwd(), env_yaml), 'r') as f: data = yaml.safe_load(f) try: env_name = data['SolutionStack'] return env_name except KeyError: return None def open_file_for_editing(file_location): file_location = '"{0}"'.format(file_location) editor = get_editor() try: os.system(editor + ' ' + file_location) except OSError: io.log_error( prompts['fileopen.error1'].replace( '{editor}', editor ) )
[]
[]
[ "EDITOR", "PATH" ]
[]
["EDITOR", "PATH"]
python
2
0
test/mrWolf/pulp_dsp_test.py
import os import time import random import shutil from plptest import Test as PulpTest, Testset from plptest import Shell, Check from itertools import product from functools import partial from collections import OrderedDict from copy import deepcopy import numpy as np from textwrap import dedent, indent, wrap import struct import traceback import re GENERATE_STIMULI = "gen_stimuli" # L2_MEM_SIZE_KB = 448 TEST_MEM_SIZE_KB = 256 class Variable(object): """Variable""" def __init__(self, name, visible=True, active=None): """ name: name for the variable """ super(Variable, self).__init__() self.name = name self.visible = visible self.active = active if callable(active) else lambda v: True class SweepVariable(Variable): """sweep variable""" def __init__(self, name, values, visible=True, active=None): """ name: name for the sweep variable values: iterable over all possible values for this variable """ super(SweepVariable, self).__init__(name, visible, active) self.values = values class DynamicVariable(Variable): """Dynamic Variable, value determined based on others""" def __init__(self, name, fun, visible=True): """ name: name of the variable fun: function, returning a value for a dictionary of all other previously defined variables. example: DynamicVairable('resLen', lambda env: env['lenA'] + env['lenB'] + 1) """ super(DynamicVariable, self).__init__(name, visible) self.fun = fun class Argument(object): """docstring for argument""" def __init__(self, name, ctype, value=None, use_l1=None, in_function=True): """ name: name of the argument (in the function declaration) ctype: String, one of the following: - C type of the argument (like 'int32_t') - 'var_type' or 'ret_type', which is determined based on the current version. - Function, returning the ctype. - A function, returning the ctype. The function can take the arguments: env, version, device, var_type. value: One of the following: - Number for constant initialization - The name of a SweepVariable or DynamicVariable, to take their value - None for a random value - tuple(min, max) for a random value in the given range - "gen_stimuli" (GENERATE_STIMULI): the function generate_stimuli in gen_stimuli.py will be called for generating the values. This function must take the argument itself, and the environment (dict(string, number)) as arguments, and return the desired value use_l1: if True, use L1 memory. If None, use default value configured in generate_test in_function: Boolean, if True, add this argument to the function signature. Set this to False, and use CustomArgument to create struts. """ super(Argument, self).__init__() self.name = name self.ctype = ctype self.value = value self.use_l1 = use_l1 self.in_function = in_function if isinstance(self.value, SweepVariable): self.value = self.value.name def apply(self, env, var_type, version, use_l1, idx, device): """ Prepare the variable for the specific test case. The following is done: - Apply the environment (current iteration of the sweep variables) - Apply the version (var_type or ret_type) - Apply use_l1 flag - Alter the name to contain the test id """ # set the use_l1 flag if self.use_l1 is None: self.use_l1 = use_l1 # apply the ctype if callable(self.ctype): self.ctype = call_dynamic_function(self.ctype, env, version, device) if self.ctype == 'var_type': self.ctype = var_type[0] elif self.ctype == 'ret_type': self.ctype = var_type[1] # change the name self.name = "t{}__{}".format(idx, self.name) return self def general_name(self): """ returns the name without txx__ prefix """ splits = self.name.split("__") if len(splits) == 0 or not splits[0].startswith("t"): return self.name else: return self.name[len(splits[0] + "__"):] def get_range(self): """ return the range for random values based on the ctype """ assert self.ctype not in ['var_type', 'ret_type'] if self.ctype == "float": return -1, 1 n_bits = 16 if self.ctype == 'int16_t' else 8 if self.ctype == 'int8_t' else 16 return (-(2 ** (n_bits - 1)), (2 ** (n_bits - 1)) - 1) def get_dtype(self): """ translation from self.ctype as string to a np dtype """ if self.ctype == "int8_t": return np.int8 if self.ctype == "int16_t": return np.int16 if self.ctype == "int32_t": return np.int32 if self.ctype == "float": return np.float32 raise RuntimeError("Unknown type: %s" % self.ctype) def arg_str(self): """ Returns the string to show for funciton argument """ if self.ctype == "float": # floats are defined as unions, so we take the float variant return "%s.f" % self.name else: return self.name def do_bench_setup_str(self): """ returns the string for setup in do_bench function """ return None def run_test_setup_str(self): """ returns the string for setup the variable """ return None def run_test_free_str(self): """ string to free up memory for the variable """ return None def check_str(self, target): """ returns the string to check the result """ return None def generate_value(self, env, version, device, gen_stimuli): """ Interpret the type of self.value and generate the stimuli """ if callable(self.value): self.value = call_dynamic_function(self.value, env, version, device) if self.value == GENERATE_STIMULI: self.value = call_dynamic_function(gen_stimuli, env, version, device, argument=self) if isinstance(self.value, str): self.value = env[self.value] if self.value is None or (isinstance(self.value, (tuple, list)) and len(self.value) == 2): if isinstance(self.value, tuple): min_value, max_value = self.value else: min_value, max_value = self.get_range() if self.ctype == "float": self.value = np.random.uniform(low=min_value, high=max_value) else: self.value = np.random.randint(low=min_value, high=max_value + 1) self.value = self.get_dtype()(self.value).item() assert isinstance(self.value, (int, np.int8, np.int16, np.int32, float, np.float32)) def header_str(self): """ return the string for delclaring and initializing the data """ assert isinstance(self.value, (float, int)) return declare_scalar(self.name, self.ctype, self.value) def reference_header_str(self, gen_function): """ return the header string for declaring and initializing the reference """ return None def estimate_memory(self): """ returns an estimate of the number of bytes in L2 for this argument """ # argument only requires a single scalar of ctype. However, the compiler usually aligns # memory. Thus, always use 4 bytes for each scalar return 4 class ArrayArgument(Argument): """Array Argument""" def __init__(self, name, ctype, length, value=None, use_l1=None, in_function=True): """ name: name of the argument ctype: String, one of the following: - C type of the argument (like 'int32_t') - 'var_type' or 'ret_type', which is determined based on the current version. - A function, returning the type. The function can take the arguments: env, version, device, var_type. length: One of the following: - Integer for a constant length - The name of a SweepVariable or DynamicVariable, to take their value - tuple(min, max) for random value in the given range - A function, returning the length. The function can take the arguments: env, version, device, var_type. value: One of the following: - Number for constant initialization, all elements will have the same value - None for a random value - tuple(min, max) for a random value in the given range - np.ndarray for a constant initialization - "gen_stimuli" (GENERATE_STIMULI): the function generate_stimuli in gen_stimuli.py will be called for generating the values. This function must take the argument itself, and the environment (dict(string, number)) as arguments, and return the numpy array. use_l1: if True, use L1 memory. If None, use default value configured in generate_test in_function: Boolean, if True, add this argument to the function signature. Set this to False, and use CustomArgument to create struts. """ super(ArrayArgument, self).__init__(name, ctype, value, use_l1, in_function) self.length = length if isinstance(self.length, SweepVariable): self.length = self.length.name def apply(self, env, var_type, version, use_l1, idx, device): """ Prepare the variable for the specific test case. The following is done: - Apply the environment (current iteration of the sweep variables) - Apply the version (var_type or ret_type) - Apply use_l1 flag - Interpret the length of the variable - Alter the name to contain the test id """ # interpret the length if callable(self.length): self.length = call_dynamic_function(self.length, env, version, device) if isinstance(self.length, tuple): assert len(self.length) == 2 self.length = random.randint(*self.length) if isinstance(self.length, str): self.length = env[self.length] if isinstance(self.length, int): self.length = self.length assert isinstance(self.length, int) # do the same thing as a regular Argument return super(ArrayArgument, self).apply(env, var_type, version, use_l1, idx, device) def l2_data_name(self): return self.name + "__l2" def arg_str(self): """ Returns the string to show for funciton argument """ return self.name def run_test_setup_str(self): """ returns the string for setup the variable """ if self.use_l1: return dedent( """\ {l1_name} = rt_alloc(RT_ALLOC_CL_DATA, sizeof({ctype}) * {len}); rt_dma_memcpy((unsigned int){l2_name}, (unsigned int){l1_name}, sizeof({ctype}) * {len}, RT_DMA_DIR_EXT2LOC, 0, &copy); rt_dma_wait(&copy); """ ).format(l1_name=self.name, l2_name=self.l2_data_name(), ctype=self.ctype, len=self.length) else: return None def run_test_free_str(self): """ string to free up memory for the variable """ if self.use_l1: return dedent( """\ rt_free(RT_ALLOC_CL_DATA, {l1_name}, sizeof({ctype}) * {len}); """ ).format(l1_name=self.name, ctype=self.ctype, len=self.length) else: return None def generate_value(self, env, version, device, gen_stimuli): """ Interpret the type of self.value and generate the stimuli """ assert isinstance(self.length, int) dtype = self.get_dtype() if callable(self.value): self.value = call_dynamic_function(self.value, env, version, device) if self.value == GENERATE_STIMULI: self.value = call_dynamic_function(gen_stimuli, env, version, device, variable=self) if isinstance(self.value, str): self.value = env[self.value] if self.value is None or (isinstance(self.value, (tuple, list)) and len(self.value) == 2): if isinstance(self.value, tuple): min_value, max_value = self.value else: min_value, max_value = self.get_range() if self.ctype == "float": self.value = np.random.uniform(low=min_value, high=max_value, size=self.length) else: self.value = np.random.randint(low=min_value, high=max_value + 1, size=self.length) self.value = self.value.astype(dtype) if isinstance(self.value, (int, float)): self.value = (np.ones(self.length) * self.value).astype(dtype) if isinstance(self.value, np.ndarray): pass # nothing to do assert isinstance(self.value, (list, np.ndarray)) def header_str(self): """ return the string for delclaring and initializing the data """ assert isinstance(self.value, np.ndarray) if self.use_l1: return dedent( """\ {ctype}* {name}; {l2_array}\ """ ).format(ctype=self.ctype, name=self.name, l2_array=declare_array(self.l2_data_name(), self.ctype, self.length, self.value)) else: return declare_array(self.name, self.ctype, self.length, self.value) def estimate_memory(self): """ returns an estimate of the number of bytes in L2 for this argument """ # ArrayArgument needs only 1 array of defined length, and 4 bytes for the pointer. however, # if the ctype is float, we need one pointer extra. mem = ctype_mem_size(self.ctype) * self.length mem += 8 if "float" in self.ctype else 4 return mem class OutputArgument(ArrayArgument): """Output Array Argument""" def __init__(self, name, ctype, length, use_l1=None, tolerance=0, in_function=True, skip_check=False): """ name: name of the argument ctype: String, one of the following: - C type of the argument (like 'int32_t') - 'var_type' or 'ret_type', which is determined based on the current version. - A function, returning the type. The function can take the arguments: env, version, device, var_type. length: One of the following: - Integer for a constant length - The name of a SweepVariable or DynamicVariable, to take their value - tuple(min, max) for random value in the given range - A function, returning the length. The function can take the arguments: env, version, device, var_type. use_l1: if True, use L1 memory. If None, use default value configured in generate_test tolerance: constant or function, which returns the tolerance. Values larger or equal to 1 will be interpreted as absolute tolerance. The funciton can thake the arguments: env, version, device, var_type. in_function: Boolean, if True, add this argument to the function signature. Set this to False, and use CustomArgument to create struts. skip_check: Boolean, if True, the output is not checked. """ super(OutputArgument, self).__init__(name, ctype, length, 0, use_l1, in_function) self.tolerance = tolerance self.skip_check = skip_check def reference_name(self): return self.name + "__reference" def apply(self, env, var_type, version, use_l1, idx, device): """ Prepare the variable for the specific test case. The following is done: - Apply the environment (current iteration of the sweep variables) - Apply the version (var_type or ret_type) - Apply use_l1 flag - Interpret the length of the variable - Apply the tolerance - Alter the name to contain the test id """ if callable(self.tolerance): self.tolerance = call_dynamic_function(self.tolerance, env, version, device) return super(OutputArgument, self).apply(env, var_type, version, use_l1, idx, device) def check_str(self, target): """ returns the string to check the result """ if self.skip_check: return "" display_format = "%.10f" if self.ctype == "float" else "%d" check_str = tolerance_check_str("%s[i]" % self.name, "%s[i]" % self.reference_name(), self.tolerance, self.ctype, " ", target) return dedent( """\ for (int i = 0; i < {len}; i++) {{ {check_str} passed = 0; printf("\\n#@# mismatch {name}[%d]: acq={fmt}, exp={fmt}\\n", i, {acq}[i], {exp}[i]); }} }} """ ).format(len=self.length, check_str=check_str, name=self.general_name(), acq=self.name, exp=self.reference_name(), fmt=display_format) def reference_header_str(self, gen_function): """ Generates and writes reference value to header file """ if self.skip_check: return "" reference = gen_function(self) return declare_array(self.reference_name(), self.ctype, self.length, reference) def estimate_memory(self): """ returns an estimate of the number of bytes in L2 for this argument """ # OutputArgument needs 2 arrays of the same length, one to store the output and one for the # reference, and the memory for storing two pointers. In fact, it is twice as much as for # ArrayArgument, thus, just call estimate_memory of the super array. return super(OutputArgument, self).estimate_memory() * 2 return ctype_mem_size(self.ctype) * self.length * 2 class InplaceArgument(OutputArgument): """ Array, that is both used as input and output It must be handled differently in order to make the various benchmark passes identical (in case the runtime of the function is data dependent). It will produce three arrays in total: One with the original values (which will never be modified), one with the expected result (which will also never be modified), and one on which the computation is performed. Before each new call of the function, the data from the original array needs to be copied to the actual computation array. """ def __init__(self, name, ctype, length, value=None, use_l1=None, tolerance=0, in_function=True, skip_check=False): """ name: name of the argument ctype: type of the argument (or 'var_type', 'ret_type') length: Length of the array, or SweepVariable, or None for random value value: One of the following: - Number for constant initialization, all elements will have the same value - None for a random value - tuple(min, max) for a random value in the given range - np.ndarray for a constant initialization - "gen_stimuli" (GENERATE_STIMULI): the function generate_stimuli in gen_stimuli.py will be called for generating the values. This function must take the argument itself, and the environment (dict(string, number)) as arguments, and return the numpy array. use_l1: if True, use L1 memory. If None, use default value configured in generate_test tolerance: constant or function, which maps the output variable type to a relative or absolute tolerance. If the value is greater or equal to 1, it is interpreted as absolute. in_function: Boolean, if True, add this argument to the function signature. Set this to False, and use CustomArgument to create struts. skip_check: Boolean, if True, the output is not checked. """ super(InplaceArgument, self).__init__(name, ctype, length, use_l1, tolerance, in_function, skip_check) # overwrite the value self.value = value def original_name(self): return self.name + "__original" def do_bench_setup_str(self): """ returns the string for setup in do_bench function """ return dedent( """\ for (int i = 0; i < {len}; i++) {{ {data}[i] = {original}[i]; }} """ ).format(len=self.length, data=self.name, original=self.original_name()) def header_str(self): """ return the string for delclaring and initializing the data """ assert isinstance(self.value, np.ndarray) return dedent( """\ {super_init} {original_init}\ """ ).format(super_init=super(InplaceArgument, self).header_str(), original_init=declare_array(self.original_name(), self.ctype, self.length, self.value)) def estimate_memory(self): """ returns an estimate of the number of bytes in L2 for this argument """ # OutputArgument needs 3 arrays of the same length, one to store the output, one for the # original value to fall back, and one for the reference. Hence, just call estimate_memory # of Output Argument, divide by 2 and multiply by 3. return (super(InplaceArgument, self).estimate_memory() // 2) * 3 class ReturnValue(Argument): """ result value """ def __init__(self, ctype, use_l1=None, tolerance=0): """ ctype: type of the argument (or 'var_type', 'ret_type') use_l1: if True, use L1 memory. If None, use default value configured in generate_test tolerance: constant or function, which maps the output variable type to a relative or absolute tolerance. If the value is greater or equal to 1, it is interpreted as absolute. """ super(ReturnValue, self).__init__("return_value", ctype, 0, use_l1, False) self.tolerance = tolerance self.in_function = False def reference_name(self): return self.name + "__reference" def apply(self, env, var_type, version, use_l1, idx, device): """ Prepare the variable for the specific test case. The following is done: - Apply the environment (current iteration of the sweep variables) - Apply the version (var_type or ret_type) - Apply use_l1 flag - Apply the tolerance - Alter the name to contain the test id """ if callable(self.tolerance): self.tolerance = call_dynamic_function(self.tolerance, env, version, device) return super(ReturnValue, self).apply(env, var_type, version, use_l1, idx, device) def check_str(self, target): """ returns the string to check the result """ display_format = "%.10f" if self.ctype == "float" else "%d" val_name = self.name + ".f" if self.ctype == "float" else self.name ref_name = self.reference_name() + ".f" if self.ctype == "float" else self.reference_name() check_str = tolerance_check_str(val_name, ref_name, self.tolerance, self.ctype, "", target) return dedent( """\ {check_str} passed = 0; printf("\\n#@# mismatch {name}: acq={fmt}, exp={fmt}\\n", {acq}, {exp}); }} """ ).format(check_str=check_str, name=self.general_name(), acq=self.name, exp=self.reference_name(), fmt=display_format) def reference_header_str(self, gen_function): """ Generates and writes reference value to header file """ reference = gen_function(self) return declare_scalar(self.reference_name(), self.ctype, reference) def estimate_memory(self): """ returns an estimate of the number of bytes in L2 for this argument """ # OutputArgument needs 2 scalars of ctype. In fact, just call estimate_memory on Argument # and multiply the result by 2 return super(ReturnValue, self).estimate_memory() * 2 class FixPointArgument(Argument): """fixpoint argument for setting the decimal point, ctype is always set to uint32_t""" def __init__(self, name, value, use_l1=None, in_function=True): """ name: name of the argument (in the function declaration) value: Number for initialization, or SweepVariable, or None for random value use_l1: if True, use L1 memory. If None, use default value configured in generate_test in_function: Boolean, if True, add this argument to the function signature. Set this to False, and use CustomArgument to create struts. """ super(FixPointArgument, self).__init__(name, "uint32_t", value, use_l1, in_function) class ParallelArgument(Argument): """Argument for choosing the number of cores argument. ctype is always set to uint32_t""" def __init__(self, name, value, use_l1=None, in_function=True): """ name: name of the argument value: Number for initialization, or SweepVariable, or None for random value use_l1: if True, use L1 memory. If None, use default value configured in generate_test in_function: Boolean, if True, add this argument to the function signature. Set this to False, and use CustomArgument to create struts. """ super(ParallelArgument, self).__init__(name, "uint32_t", value, use_l1, in_function) class CustomArgument(Argument): """ Custom Argument With this argument, you can write your own initialization. It can be used either to point to an externally defined variable, struct or array. But it can also be used to create a struct with fields, which may point to other arguments. """ def __init__(self, name, value, as_ptr=False, deref=False, in_function=True): """ name: Name of the argument (in the initialization and function declaration) value: Function, which should return a string for initializing the CustomVariable. By using other arguments (for which you have set in_function=False), you can craft structs. This function can produce a multi-line initialization string. The function has the following arguments (can use only a subset of those): - env: dict(name: str, value: number): Dictionary with the environment - version: str: Version string - var_type: tuple(str, str), which contains (var_type, ret_type) - use_l1: Bool, wether to use L1 memory. - target: name of the target device (ibex or riscy) - arg_name: F: str -> str: Function which transforms the name of an argument to the name which will actually appear in the test program. Each iteration of the accumulated test will use different variable names. Therefore, all references need to be transformed. The function *must* return the entire string for initialization, including the type and the name of the variable. as_ptr: Boolean, if True, the struct is passed as pointer to the function. Else, it is passed without referencing it. deref: Boolean, if True, the struct is dereferenced before passing to the function. Else, it is passed without dereferencing it. in_function: Boolean, if True, add this argument to the function signature. Set this to False, and use CustomArgument to create struts. """ super(CustomArgument, self).__init__(name, None, value, None, in_function) self.as_ptr = as_ptr self.deref = deref assert not (self.as_ptr and self.deref) def apply(self, env, var_type, version, use_l1, idx, device): """ Prepares the value (initialization string) of the custom argument, and the name to include the test id """ def arg_name(name): return "t{}__{}".format(idx, name) self.name = arg_name(self.name) self.value = call_dynamic_function(self.value, env, version, device, use_l1=use_l1, arg_name=arg_name) return self def arg_str(self): """ Returns the string to show for funciton argument """ if not self.in_function: return None if self.as_ptr: return "&%s" % (self.name) if self.deref: return "*%s" % (self.name) else: return self.name def generate_value(self, env, version, device, gen_stimuli): """ Interpret the type of self.value and generate the stimuli """ # Nothing to do here! the init string was already created pass def header_str(self): """ return the string for delclaring and initializing the data """ # here, we just need to return self.value, since this is the initialization string. return self.value def estimate_memory(self): """ returns an estimate of the number of bytes in L2 for this argument """ # for Custom Argument, we just add a constant value of 16 bytes. # TODO add a function which the user can define! return 16 class AggregatedTestCase(object): """ Structure for one testcase in the aggregated tests """ def __init__(self, idx, arguments, env, n_ops, version, device_name): """ constructor. Arguments must already be applied! """ self.idx = idx self.arguments = arguments self.env = env self.n_ops = n_ops self.version = version self.device_name = device_name def generate_header_content(self, gen_stimuli, gen_result): """ generate all stimuli values and compute the expected result """ # generate value of all arguments [arg.generate_value(self.env, self.version, self.device_name, gen_stimuli) for arg in self.arguments] content = "\n".join([arg.header_str() for arg in self.arguments]) content += "\n" # prepare inputs dictionary inputs = {arg.general_name(): arg for arg in self.arguments if isinstance(arg, InplaceArgument) or not isinstance(arg, (ReturnValue, OutputArgument))} # get the fix point fix_point = ([arg.value for arg in self.arguments if isinstance(arg, FixPointArgument)] or [None])[0] assert fix_point is None or isinstance(fix_point, int) # prepare the gen_result function gen_result_prep = partial(gen_result, inputs=inputs, env=self.env, fix_point=fix_point) content += "\n".join([x for x in [arg.reference_header_str(gen_result_prep) for arg in self.arguments] if x is not None]) return content def get_do_bench_function(self, function_name): """ returns the do_bench function for the current test """ ret_str = ([a.arg_str() + " = " for a in self.arguments if isinstance(a, ReturnValue)] or [""])[0] return dedent( """\ static int t{idx}__do_bench(rt_perf_t *perf, int events, int do_check) {{ // setup variables (like resetting InplaceArguments) {setup} // start the performance counters rt_perf_conf(perf, events); rt_perf_reset(perf); rt_perf_start(perf); // call the function-under-test {ret_str}{fname}({args}); rt_perf_stop(perf); // check the result int passed = 1; if (do_check) {{ {check} }} return passed; }} """ ).format(idx=self.idx, setup=indent("\n".join([arg.do_bench_setup_str() for arg in self.arguments if arg.do_bench_setup_str() is not None]), " "), ret_str=ret_str, fname=function_name, args=", ".join([a.arg_str() for a in self.arguments if a.in_function]), check=indent("\n".join([arg.check_str(self.device_name) for arg in self.arguments if arg.check_str(self.device_name) is not None]), " ")) def get_run_test_function_call(self): return "t{}__run_test();".format(self.idx) def get_run_test_function(self): """ returns the run_test function for the current test """ return dedent( """\ static void t{idx}__run_test(void) {{ printf("\\n#@# testcase {idx} {{\\n"); // setup the test rt_dma_copy_t copy; {setup} // setup performance counter rt_perf_t perf; rt_perf_init(&perf); // run 1: check result and get numebr of cycles / instructions int passed = t{idx}__do_bench(&perf, (1<<RT_PERF_CYCLES) | (1<<RT_PERF_INSTR), 1); printf("\\n#@# passed: %d\\n", passed); printf("#@# cycles: %d\\n", rt_perf_read(RT_PERF_CYCLES)); printf("#@# instructions: %d\\n", rt_perf_read(RT_PERF_INSTR)); // run 2: count load stalls t{idx}__do_bench(&perf, 1<<RT_PERF_LD_STALL, 0); printf("\\n#@# load_stalls: %d\\n", rt_perf_read(RT_PERF_LD_STALL)); // run 3: count instruction cache misses t{idx}__do_bench(&perf, 1<<RT_PERF_IMISS, 0); printf("\\n#@# icache_miss: %d\\n", rt_perf_read(RT_PERF_IMISS)); // run 4: count TCDM contentions printf("\\n#@# output start\\n"); t{idx}__do_bench(&perf, 1<<RT_PERF_TCDM_CONT, 0); printf("\\n#@# output end\\n"); printf("#@# tcdm_cont: %d\\n", rt_perf_read(RT_PERF_TCDM_CONT)); // free up all memory {free} printf("\\n#@# }}\\n"); }} """ ).format(idx=self.idx, setup=indent("\n".join([arg.run_test_setup_str() for arg in self.arguments if arg.run_test_setup_str() is not None]), " "), free=indent("".join([arg.run_test_free_str() for arg in self.arguments if arg.run_test_free_str() is not None]), " ")) def get_header_filename(self): """ returns the name of the header file """ return "data_t{}.h".format(self.idx) def get_header_file_str(self, gen_stimuli, gen_result): """ returns the header file of this test as a string """ return dedent( """\ #ifndef __PULP_DSP_TEST__DATA_T{idx}_H__ #define __PULP_DSP_TEST__DATA_T{idx}_H__ // include the common header #include "common.h" {content} #endif//__PULP_DSP_TEST__DATA_T{idx}_H__ """ ).format(idx=self.idx, content=self.generate_header_content(gen_stimuli, gen_result)) def estimate_memory(self): """ returns an estimate of the number of bytes needed on L2 """ return sum([arg.estimate_memory() for arg in self.arguments]) class AggregatedTest(object): """ Test structure for aggregated tests Aggregated tests work by generating all required arrays for all iterations at once, and storing them into L2 memory. Then, at runtime, each iteration is executed one by one, and all data for L1 memory is copied to L1 storage. For this, the largest size of each array is allocated statically. """ def __init__(self, function_name, version, arg_ret_type, arguments, variables, visible_env, device_name, use_l1, extended_output=True, n_ops=None): """ Build an aggregated test. This will also apply all arguments for all versions """ self.function_name = function_name self.version = version self.device_name = device_name self.extended_output = extended_output self.visible_env = visible_env self.sub_folder = "%s_%s_%s" % (self.function_name, self.version, self.device_name) # extend funciton name self.function_name += "_" + self.version # set use_l1 to false for ibex if self.device_name == "ibex": use_l1 = False # set n_ops function if n_ops is None: self.n_ops = lambda env: 0 elif isinstance(n_ops, int): self.n_ops = lambda env: n_ops elif callable(n_ops): self.n_ops = n_ops else: raise RuntimeError("Unknown type for n_ops: {}".format(type(n_ops))) # prepare var_type version_type = version.split('_')[0] if arg_ret_type is not None and version_type in arg_ret_type: var_type = arg_ret_type[version_type] elif version.startswith('i32') or version.startswith('q32'): var_type = ['int32_t', 'int32_t'] elif version.startswith('i16') or version.startswith('q16'): var_type = ['int16_t', 'int32_t'] elif version.startswith('i8') or version.startswith('q8'): var_type = ['int8_t', 'int32_t'] else: var_type = ['float', 'float'] # arguments based on if fix-point and parallel is used if not version.startswith('q') and not version.endswith('parallel'): arguments = [arg for arg in arguments if not isinstance(arg, (FixPointArgument, ParallelArgument))] if not version.startswith('q') and version.endswith('parallel'): arguments = [arg for arg in arguments if not isinstance(arg, FixPointArgument)] if version.startswith('q') and not version.endswith('parallel'): arguments = [arg for arg in arguments if not isinstance(arg, ParallelArgument)] if version.startswith('q') and version.endswith('parallel'): arguments = arguments # check fixpoint stuff if version.startswith('q'): assert len([arg for arg in arguments if isinstance(arg, FixPointArgument)]) == 1 # generate all aggregated tests self.cases = [ AggregatedTestCase( idx=i, arguments=[ deepcopy(arg).apply(env, var_type, self.version, use_l1, i, self.device_name) for arg in arguments ], env=env, n_ops=self.n_ops(env), version=self.version, device_name=self.device_name ) for (i, env) in enumerate(Sweep(variables, version)) ] def to_plptest(self): """ Returns the PulpTest structure """ test_name = self.function_name # set the platform for compatibility with various different Pulp-SDK versions platform_str = "platform=gvsoc" # default platform if "TEST_PLATFORM" in os.environ: platform_str = "platform=%s" % (os.environ["TEST_PLATFORM"]) elif "PULP_CURRENT_CONFIG_ARGS" in os.environ: platform_str = os.environ["PULP_CURRENT_CONFIG_ARGS"] return PulpTest( name=test_name, commands=self.generate_test_commands(platform_str), # timeout=40 # Timeout is now handled with shell script in run.sh ) def generate_test_commands(self, platform_str): """ Generates the commands array for PulpTest. It will generate as many tests as necessary to fit everything into L2 memory. """ c = [] start_case_id = 0 used_mem = 0 allowed_mem = TEST_MEM_SIZE_KB * 1024 num_cases = len(self.cases) for i, case in enumerate(self.cases): case_mem = case.estimate_memory() assert case_mem <= allowed_mem # Memory must be large enough to fit a single testcase. used_mem += case_mem end_case_id = i + 1 if i + 1 == num_cases else i if used_mem > allowed_mem or end_case_id == num_cases: used_mem = case_mem gen_test_fn = partial(generate_test_program, start=start_case_id, end=end_case_id) c.append(Check('gen', gen_test_fn, test_obj=self)) c.append(Shell('test', 'bash %s/run.sh %s' % (self.sub_folder, platform_str))) start_case_id = end_case_id assert start_case_id == num_cases c.append(Check('check', check_output, test_obj=self)) return c def get_common_header_str(self): return dedent( """\ #ifndef __PULP_DSP_TEST__COMMON_H__ #define __PULP_DSP_TEST__COMMON_H__ typedef union { uint32_t u; float f; } __u2f; #define ABS(x) (x > 0 ? x : -x) #endif//__PULP_DSP_TEST__COMMON_H__ """ ) def get_main_imports(self, start, end): """ returns a string containing all imports of the test case headers """ return "\n".join(["#include \"{}\"".format(case.get_header_filename()) for case in self.cases[start:end]]) def get_test_entry_function(self, start, end): """ write the test_entry function. """ return dedent( """\ void test_entry(void) {{ {} }} """ ).format(indent("\n".join([case.get_run_test_function_call() for case in self.cases[start:end]]), " ")) def generate_test_program(self, gen_stimuli, gen_result, start, end): """ generate all files needed for the test, from start to end """ # remove all files in the directory if it still exists clean(self.sub_folder) # create the fresh directory os.mkdir(self.sub_folder) # generate the run.sh script which will run the test but exit at timeout. The script will # also always return with status 0, such that we can check the output and give meaningful # error messages with open(os.path.join(self.sub_folder, "run.sh"), "w") as fp: fp.write( dedent( """ cd $(dirname $0) make clean make all if [ $? -eq 0 ]; then timeout -k 1 5 make run $@ if [ $? -eq 0 ]; then echo "#@# success" else echo "#@# error: run" fi else echo "#@# error: build" fi cd .. """ ) ) # generate all necessary header files (independent of device name) # common header with open(os.path.join(self.sub_folder, "common.h"), "w") as fp: fp.write(self.get_common_header_str()) # header of all tests for case in self.cases[start:end]: with open(os.path.join(self.sub_folder, case.get_header_filename()), "w") as fp: fp.write(case.get_header_file_str(gen_stimuli, gen_result)) # next, generate the remaining test structure if self.device_name == "ibex": self.generate_ibex_test_program(start, end) elif self.device_name == "riscy": self.generate_riscy_test_program(start, end) else: raise RuntimeError("Unknown device name: {}".format(self.device_name)) def generate_ibex_test_program(self, start, end): """ generate all files needed for the ibex test """ with open(os.path.join(self.sub_folder, "test.c"), "w") as fp: fp.write( dedent( """\ #include "rt/rt_api.h" #include "stdio.h" #include "plp_math.h" #include "common.h" {includes} {do_benchs} {run_tests} {test_entry} int main(void) {{ test_entry(); return 0; }} """ ).format(includes=self.get_main_imports(start, end), test_entry=self.get_test_entry_function(start, end), run_tests="\n".join([case.get_run_test_function() for case in self.cases[start:end]]), do_benchs="\n".join([case.get_do_bench_function(self.function_name) for case in self.cases[start:end]])) ) with open(os.path.join(self.sub_folder, "Makefile"), "w") as fp: fp.write(dedent( """\ PULP_APP = test PULP_APP_FC_SRCS = test.c PULP_LDFLAGS += -lplpdsp PULP_CFLAGS += -I$(CONFIG_BUILD_DIR) -O3 -g ifdef TFLAGS PULP_CFLAGS += $(TFLAGS) endif include $(PULP_SDK_HOME)/install/rules/pulp_rt.mk PULP_CFLAGS += -D DATA=$(CONFIG_BUILD_DIR)$(BUILD_DIR_EXT) """ )) def generate_riscy_test_program(self, start, end): """ generate all files needed for the riscy test """ with open(os.path.join(self.sub_folder, "test.c"), "w") as fp: fp.write(dedent( """\ #include "rt/rt_api.h" #include "stdio.h" #include "cluster.h" int main(){ rt_cluster_mount(1, 0, 0, NULL); rt_cluster_call(NULL, 0, cluster_entry, NULL, NULL, 0, 0, 0, NULL); rt_cluster_mount(0, 0, 0, NULL); return 0; } """ )) with open(os.path.join(self.sub_folder, "cluster.h"), "w") as fp: fp.write(dedent( """\ #ifndef __PULP_DSP_TEST__CLUSTER_H__ #define __PULP_DSP_TEST__CLUSTER_H__ void cluster_entry(void *arg); #endif//__PULP_DSP_TEST__CLUSTER_H__ """ )) with open(os.path.join(self.sub_folder, "cluster.c"), "w") as fp: fp.write( dedent( """\ #include "rt/rt_api.h" #include "stdio.h" #include "plp_math.h" #include "common.h" {includes} {do_benchs} {run_tests} {test_entry} void cluster_entry(void* args) {{ test_entry(); }} """ ).format(includes=self.get_main_imports(start, end), test_entry=self.get_test_entry_function(start, end), run_tests="\n".join([case.get_run_test_function() for case in self.cases[start:end]]), do_benchs="\n".join([case.get_do_bench_function(self.function_name) for case in self.cases[start:end]])) ) with open(os.path.join(self.sub_folder, "Makefile"), "w") as fp: fp.write(dedent( """\ PULP_APP = test PULP_APP_FC_SRCS = test.c PULP_APP_CL_SRCS = cluster.c PULP_LDFLAGS += -lplpdsp PULP_CFLAGS += -I$(CONFIG_BUILD_DIR) -O3 -g ifdef TFLAGS PULP_CFLAGS += $(TFLAGS) endif include $(PULP_SDK_HOME)/install/rules/pulp_rt.mk PULP_CFLAGS += -D DATA=$(CONFIG_BUILD_DIR)$(BUILD_DIR_EXT) """ )) def generate_test_program(_config, _output, test_obj, start, end): """ generate the test program without serialization and deserialization It will generate tests for cases from start up to, but not including end """ # wrap everything in a try block in order to see the error message try: gen_stimuli_file = os.path.join(os.getcwd(), "gen_stimuli.py") # import gen_stimuli import runpy gen_stimuli = runpy.run_path(gen_stimuli_file) compute_result = gen_stimuli['compute_result'] try: generate_stimuli = gen_stimuli['generate_stimuli'] except KeyError: generate_stimuli = None test_obj.generate_test_program(generate_stimuli, compute_result, start, end) return (True, None) except Exception: print("generate_test_program failed") print(traceback.format_exc()) return (False, None) def check_output(config, output, test_obj): """ parses the output and prints the results """ # parse the output and get all cases cases_result, err = parse_output(output) if err: status = '\033[91mKILL:\033[0m' print("{} {}".format(status, err)) return (False, None) passed = all([c['passed'] for c in cases_result] or [False]) passed = passed and len(cases_result) == len(test_obj.cases) tests_missing = {i for i, _ in enumerate(test_obj.cases)} for case_idx, result in enumerate(cases_result): case = test_obj.cases[case_idx] tests_missing.remove(case_idx) # print the user messages if len(result['user_msg']) >= 1: print("\n".join(result['user_msg'])) # print the result if result['passed']: status = '\033[92mOK:\033[0m ' else: if result['error_msg']: status = '\033[91mKILL:\033[0m' else: status = '\033[91mFAIL:\033[0m' print("{} {}".format(status, ", ".join(["{}={}".format(k, case.env[k]) for k in test_obj.visible_env]))) # print error messages if result['error_msg']: err = "\033[1m%s\033[0m" % err for msg in result['error_msg']: print(" %s" % msg) # print mismatches if result['mismatches'] and test_obj.extended_output: print(indent("\n".join(result['mismatches']), " ")) if passed: bench_output(result, test_obj, case) for case_idx in tests_missing: case = test_obj.cases[case_idx] status = '\033[93mSKIP:\033[0m' print("{} {}".format(status, ", ".join(["{}={}".format(k, case.env[k]) for k in test_obj.visible_env]))) # clean the directory clean(test_obj.sub_folder) return (passed, None) def parse_output(output): """ Parse the output of running all tests (all test projects together) """ cases = [] current_case = -1 user_msg_mode = False gvsoc_error_str = [] gvsoc_error_re = re.compile("^[0-9]*: [0-9]*: \[.*\]") for line in output.split('\n'): if user_msg_mode: # special mode where everything is added to user_msg if "#@# output end" in line: if len(cases[current_case]['user_msg']) > 0 and len(cases[current_case]['user_msg'][-1].strip()) == 0: del cases[current_case]['user_msg'][-1] user_msg_mode = False # do not add the line if it is empty and it is the first message elif not (len(cases[current_case]['user_msg']) == 0 and len(line.strip()) == 0): cases[current_case]['user_msg'].append(line) continue # normal parsing mode line = line.strip() if line == "#@# }": current_case = -1 gvsoc_error_str = [] elif line.startswith("#@# testcase") and line.endswith("{"): current_case = int(line[len("#@# testcase "):-len(" {")]) assert len(cases) == current_case cases.append({'passed': False, 'error_msg': None, 'user_msg': [], 'cycles': 0, 'instructions': 0, 'load_stalls': 0, 'icache_miss': 0, 'tcdm_cont': 0, 'mismatches': []}) elif line.startswith('#@# passed:'): cases[current_case]['passed'] = line.find('1') != -1 elif line.startswith('#@# cycles'): cases[current_case]['cycles'] = int(line.split(": ")[1]) elif line.startswith('#@# instructions'): cases[current_case]['instructions'] = int(line.split(": ")[1]) elif line.startswith('#@# load_stalls'): cases[current_case]['load_stalls'] = int(line.split(": ")[1]) elif line.startswith('#@# icache_miss'): cases[current_case]['icache_miss'] = int(line.split(": ")[1]) elif line.startswith('#@# tcdm_cont'): cases[current_case]['tcdm_cont'] = int(line.split(": ")[1]) elif line.startswith('#@# mismatch'): cases[current_case]['mismatches'].append("Mismatch: %s" % line[13:]) elif "#@# output start" in line: user_msg_mode = True elif gvsoc_error_re.match(line): gvsoc_error_str.append(line[line.find("["):]) elif line.startswith('#@# error:'): reason = line.split(": ")[1] if reason == "run": cases[current_case]['passed'] = False cases[current_case]['error_msg'] = gvsoc_error_str if gvsoc_error_str else ["Timeout"] # clear the current case current_case = -1 if reason == "clean": return None, "Cannot clean test" if reason == "build": return None, "Cannot build test" return cases, None BENCHMARK_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bench_{}.csv".format(time.strftime("%Y-%m-%d_%H:%M:%S"))) def bench_output(performance, test_obj, test_case): # generate file and first header line if it does not yet exist if not os.path.isfile(BENCHMARK_FILE): # create file and write header with open(BENCHMARK_FILE, "w") as f: f.write( "name,device,dimension,cycles,instructions,ipc,imiss,ld_stall,tcdm_cont,ops,mpc\n" ) # extract relevant fields dimension = "; ".join(["%s=%s" % (k, str(test_case.env[k])) for k in test_obj.visible_env]) insn_per_cycles = performance['instructions'] / performance['cycles'] ops_per_cycle = test_case.n_ops / performance['cycles'] # write the new line with open(BENCHMARK_FILE, "a") as f: f.write(",".join([test_obj.function_name, test_obj.device_name, dimension, str(performance['cycles']), str(performance['instructions']), str(insn_per_cycles), str(performance['icache_miss']), str(performance['load_stalls']), str(performance['tcdm_cont']), str(test_case.n_ops), str(ops_per_cycle)])) f.write("\n") class Sweep: """ Iterator over all variables and returns the environment""" def __init__(self, variables, version): self.variables = variables self.prod_iter = product(*[v.values if v.active(version) else [v.values[0]] for v in self.variables if isinstance(v, SweepVariable)]) def __iter__(self): return self def __next__(self): sweep_vars = iter(next(self.prod_iter)) env = OrderedDict() for var in self.variables: if isinstance(var, SweepVariable): env[var.name] = next(sweep_vars) if isinstance(var, DynamicVariable): env[var.name] = var.fun(env) return env def fmt_float(val): """ This function returns the hex representation of a float """ if val == 0: val = 0.0 if isinstance(val, float): val = np.float32(val) assert isinstance(val, np.float32) packed = struct.pack('!f', val) int_val = sum([b << ((3 - i) * 8) for i, b in enumerate(packed)]) return hex(int_val) def ctype_mem_size(ctype): """ returns the memory size for a specific ctype """ if "int8_t" in ctype: return 1 if "int16_t" in ctype: return 2 if "int32_t" in ctype: return 4 if "float" in ctype: return 4 raise RuntimeError("Memory size of ctype %s is unknown!", ctype) def declare_scalar(name, ctype, value): """ returns a string to declare and initialize a scalar value """ assert isinstance(value, (int, float, np.int8, np.int16, np.int32, np.float32)) if ctype == "float": # We want to write the floating point as hex representation to the header file (and not # as a decimal "string"). Then, we want to typecast it to a float. One way is to get the # address of the variable, and then cast it to a float pointer. However, it is not # possible to dereference a pointer in a .h file. Therefore, we use the second method; # generating a union with a float (.f) and a unsigned int (.u). return "__u2f %s = {.u = %sU};\n" % (name, fmt_float(value)) else: return('%s %s = %s;\n' % (ctype, name, value)) def declare_array(name, ctype, length, arr): assert isinstance(arr, (np.ndarray, list)) assert length == len(arr) if ctype == "float": values_str = ", ".join([fmt_float(x) for x in arr]) # We store float values in their hex representation. This way, we do not use the # inaccurate decimal "string" representation, and we guarantee that the data is the # exact same as when computeing the expected result. return dedent( """\ RT_L2_DATA uint32_t {name}__int[{len}] = {{ {content} }}; float* {name} = (float*)((void*){name}__int); """ ).format(name=name, len=length, content=indent("\n".join(wrap(values_str, 96)), " ")) else: values_str = ", ".join([str(x) for x in arr]) return dedent( """\ RT_L2_DATA {ctype} {name}[{len}] = {{ {content} }}; """ ).format(ctype=ctype, name=name, len=length, content=indent("\n".join(wrap(values_str, 96)), " ")) def tolerance_check_str(acq, exp, tolerance, ctype, indent, target): """ returns a string which performs the check of acq and exp. The check will properly add the tolerance, including all possible overflow cases.""" if tolerance == 0: return "%sif (%s != %s) {" % (indent, exp, acq) elif ctype == "float": # only relative tolerance is allowed assert tolerance < 1 # In case of float: add a tiny absolute offset of 0.0001 return dedent( """\ {indent}float __tol = ABS({tol:E} * (float){exp} + 0.0001); {indent}if (!({acq} >= ({ty})({exp} - __tol) && {indent} {acq} <= ({ty})({exp} + __tol))) {{\ """ ).format(indent=indent, acq=acq, exp=exp, tol=tolerance, ty=ctype) unsigned_bits = 7 if ctype == "int8_t" else 15 if ctype == "int16_t" else 31 type_min = -(1 << unsigned_bits) type_max = (1 << unsigned_bits) - 1 if tolerance < 1: # interpret tolerance as relative tolerance if target == "ibex": # in this case, we cannot use floating point! But make sure that the fraction is at # least 1. return dedent( """\ {indent}{ty} __tol_t = ABS({exp} / {tol_fraction}) + 1; {indent}if (!(({exp} < {type_min} + __tol_t && {indent} ({acq} <= {exp} + __tol_t || {indent} {acq} >= {exp} - __tol_t)) || {indent} ({exp} > {type_max} - __tol_t && {indent} ({acq} >= {exp} - __tol_t || {indent} {acq} <= {exp} + __tol_t)) || {indent} ({exp} >= {type_min} + __tol_t && {indent} {exp} <= {type_max} - __tol_t && {indent} ({acq} >= {exp} - __tol_t && {indent} {acq} <= {exp} + __tol_t)))) {{\ """ ).format(indent=indent, acq=acq, exp=exp, ty=ctype, tol_fraction=int(1 / tolerance), type_min=type_min, type_max=type_max) else: # Here, we can use float. But for the int-version, we want to round up. return dedent( """\ {indent}float __tol = ABS({tol:E} * (float){exp}); {indent}{ty} __tol_t = ({ty})(__tol + 0.999); {indent}if (!(({exp} < {type_min} + __tol_t && {indent} ({acq} <= {exp} + __tol_t || {indent} {acq} >= {exp} - __tol_t)) || {indent} ({exp} > {type_max} - __tol_t && {indent} ({acq} >= {exp} - __tol_t || {indent} {acq} <= {exp} + __tol_t)) || {indent} ({exp} >= {type_min} + __tol_t && {indent} {exp} <= {type_max} - __tol_t && {indent} ({acq} >= {exp} - __tol_t && {indent} {acq} <= {exp} + __tol_t)))) {{\ """ ).format(indent=indent, acq=acq, exp=exp, tol=tolerance, ty=ctype, type_min=type_min, type_max=type_max) else: # interpret tolerance as absolute tolerance if target == "ibex": return dedent( """\ {indent}{ty} __tol_t = {tol}; {indent}if (!(({exp} < {type_min} + __tol_t && {indent} ({acq} <= {exp} + __tol_t || {indent} {acq} >= {exp} - __tol_t)) || {indent} ({exp} > {type_max} - __tol_t && {indent} ({acq} >= {exp} - __tol_t || {indent} {acq} <= {exp} + __tol_t)) || {indent} ({exp} >= {type_min} + __tol_t && {indent} {exp} <= {type_max} - __tol_t && {indent} ({acq} >= {exp} - __tol_t && {indent} {acq} <= {exp} + __tol_t)))) {{\ """ ).format(indent=indent, acq=acq, exp=exp, ty=ctype, tol=int(tolerance), type_min=type_min, type_max=type_max) else: return dedent( """\ {indent}{ty} __tol_t = {tol}; {indent}if (!(({exp} < {type_min} + __tol_t && {indent} ({acq} <= {exp} + __tol_t || {indent} {acq} >= {exp} - __tol_t)) || {indent} ({exp} > {type_max} - __tol_t && {indent} ({acq} >= {exp} - __tol_t || {indent} {acq} <= {exp} + __tol_t)) || {indent} ({exp} >= {type_min} + __tol_t && {indent} {exp} <= {type_max} - __tol_t && {indent} ({acq} >= {exp} - __tol_t && {indent} {acq} <= {exp} + __tol_t)))) {{\ """ ).format(indent=indent, acq=acq, exp=exp, tol=int(tolerance), ty=ctype, type_min=type_min, type_max=type_max) def generate_test(function_name, arguments, variables, implemented, use_l1=False, extended_output=True, n_ops=None, arg_ret_type=None): """ Entry-Point of the phase 1 """ testsets = [ Testset( name=device_name, tests=[ AggregatedTest(function_name=function_name, version=v, arg_ret_type=arg_ret_type, arguments=arguments, variables=variables, visible_env=[var.name for var in variables if var.visible and var.active(v)], device_name=device_name, use_l1=use_l1, extended_output=extended_output, n_ops=n_ops).to_plptest() for v in impl if impl[v] ] ) for device_name, impl in implemented.items() ] return {'testsets': testsets} def call_dynamic_function(f, env, version, device, arg_name=None, argument=None, use_l1=None): """ Calls the funciton f and passes env, version, device or var_types, based on the arguments of the function """ possible_args = { 'e': (env, "environment: dict(str -> value)"), 'env': (env, "environment: dict(str -> value)"), 'environ': (env, "environment: dict(str -> value)"), 'environment': (env, "environment: dict(str -> value)"), 'v': (version, "version: str"), 'ver': (version, "version: str"), 'version': (version, "version: str"), 'd': (device, "device: str"), 'dev': (device, "device: str"), 'device': (device, "device: str"), 't': (device, "device: str"), 'tar': (device, "device: str"), 'target': (device, "device: str") } assert not (arg_name is not None and argument is not None) if arg_name is not None: possible_args.update({ 'a': (arg_name, "arg_name: F: str -> str"), 'arg': (arg_name, "arg_name: F: str -> str"), 'name': (arg_name, "arg_name: F: str -> str"), 'arg_name': (arg_name, "arg_name: F: str -> str"), }) if argument is not None: possible_args.update({ 'a': (argument, "arg_name: F: str -> str"), 'arg': (argument, "arg_name: F: str -> str"), 'argument': (argument, "arg_name: F: str -> str"), }) if use_l1 is not None: possible_args.update({ 'l1': (argument, "use_l1: bool"), 'use_l1': (argument, "use_l1: bool"), }) # __code__.co_varnames returns the list of argument names of the function arg_list = f.__code__.co_varnames if not set(arg_list).issubset(possible_args.keys()): valid_options = "\n".join(["{:11} -> {}".format(k, v[1]) for k, v in sorted( possible_args.items(), key=lambda t: "{}:{}".format(t[1][1], t[0]) )]) raise RuntimeError( "Function '{}' can use only the following argument names: \n\n{}\n\n{}" .format( f.__name__, indent(valid_options, " "), "InvalidArgument: '%s'" % [a for a in arg_list if a not in possible_args][0] ) ) args = (possible_args[arg_name][0] for arg_name in arg_list) return f(*args) def clean(sub_folder): """ Clean the test environment """ if os.path.exists(sub_folder): shutil.rmtree(sub_folder)
[]
[]
[ "PULP_CURRENT_CONFIG_ARGS", "TEST_PLATFORM" ]
[]
["PULP_CURRENT_CONFIG_ARGS", "TEST_PLATFORM"]
python
2
0
problem-solving/Birthday Cake Candles/Solution.py
#!/bin/python3 import math import os import random import re import sys # # Complete the 'birthdayCakeCandles' function below. # # The function is expected to return an INTEGER. # The function accepts INTEGER_ARRAY candles as parameter. # def birthdayCakeCandles(candles): # Write your code here count = 0 pt = max(candles) for i in candles: if i == pt: count+=1 return count if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') candles_count = int(input().strip()) candles = list(map(int, input().rstrip().split())) result = birthdayCakeCandles(candles) fptr.write(str(result) + '\n') fptr.close()
[]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
python
1
0
website/website/wsgi.py
""" WSGI config for website project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "website.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
pkg/tests/fixtures/app_gateway.go
// ------------------------------------------------------------------------------------------- // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // -------------------------------------------------------------------------------------------- package fixtures import ( "github.com/Azure/application-gateway-kubernetes-ingress/pkg/tests" n "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-09-01/network" "github.com/Azure/go-autorest/autorest/to" ) // GetAppGateway creates an ApplicationGateway struct. func GetAppGateway() n.ApplicationGateway { // The order of the lists below is important as we reference these by index in unit tests. return n.ApplicationGateway{ ApplicationGatewayPropertiesFormat: &n.ApplicationGatewayPropertiesFormat{ RequestRoutingRules: &[]n.ApplicationGatewayRequestRoutingRule{ *GetDefaultRoutingRule(), *GetRequestRoutingRuleBasic(), *GetRequestRoutingRulePathBased1(), *GetRequestRoutingRulePathBased2(), }, URLPathMaps: &[]n.ApplicationGatewayURLPathMap{ *GetDefaultURLPathMap(), *GetURLPathMap1(), *GetURLPathMap2(), }, HTTPListeners: &[]n.ApplicationGatewayHTTPListener{ *GetDefaultListener(), *GetListenerBasic(), *GetListenerPathBased1(), *GetListenerPathBased2(), *GetListenerUnassociated(), }, SslCertificates: &[]n.ApplicationGatewaySslCertificate{ GetCertificate1(), GetCertificate2(), GetCertificate3(), }, Probes: &[]n.ApplicationGatewayProbe{ GetApplicationGatewayProbe(nil, to.StringPtr(PathFoo)), // /foo GetApplicationGatewayProbe(nil, to.StringPtr(PathBar)), // /bar GetApplicationGatewayProbe(to.StringPtr(tests.OtherHost), nil), }, BackendHTTPSettingsCollection: &[]n.ApplicationGatewayBackendHTTPSettings{ GetHTTPSettings1(), GetHTTPSettings2(), GetHTTPSettings3(), }, FrontendIPConfigurations: &[]n.ApplicationGatewayFrontendIPConfiguration{ GetPublicIPConfiguration(), }, RedirectConfigurations: &[]n.ApplicationGatewayRedirectConfiguration{ { Name: to.StringPtr("redirect-1"), ApplicationGatewayRedirectConfigurationPropertiesFormat: &n.ApplicationGatewayRedirectConfigurationPropertiesFormat{}, }, }, }, } }
[]
[]
[]
[]
[]
go
null
null
null
desktop/core/src/desktop/management/commands/desktop_document_cleanup.py
# adapted from django-extensions (http://code.google.com/p/django-command-extensions/) # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from importlib import import_module from django.conf import settings from django.core.management.base import BaseCommand, CommandError from django.utils.translation import ugettext_lazy as _t, ugettext as _ from beeswax.models import SavedQuery from beeswax.models import Session from datetime import date, timedelta from oozie.models import Workflow from django.db.utils import DatabaseError import desktop.conf from desktop.models import Document2 import logging import logging.handlers import desktop.conf LOG = logging.getLogger(__name__) class Command(BaseCommand): """ Handler for purging old Query History, Workflow documents and Session data """ try: from optparse import make_option option_list = BaseCommand.option_list + ( make_option("--keep-days", help=_t("Number of days of history data to keep."), action="store", type=int, default=30), ) except AttributeError, e: baseoption_test = 'BaseCommand' in str(e) and 'option_list' in str(e) if baseoption_test: def add_arguments(self, parser): parser.add_argument("--keep-days", help=_t("Number of days of history data to keep."), action="store", type=int, default=30) else: LOG.exception(str(e)) sys.exit(1) def objectCleanup(self, objClass, filterType, filterValue, dateField): errorCount = 0 checkCount = 0 resets = 0 deleteRecords = self.deleteRecordsBase totalObjects = objClass.objects.filter(**{ '%s' % filterType: filterValue, '%s__lte' % dateField: self.timeDeltaObj, })\ .values_list("id", flat=True) LOG.info("Looping through %s objects. %s objects to be deleted." % (objClass.__name__, totalObjects.count())) while totalObjects.count(): if deleteRecords < 30 and resets < self.resetMax: checkCount += 1 if checkCount == self.resetCount: deleteRecords = self.deleteRecordsBase resets += 1 checkCount = 0 LOG.info("%s objects left: %s" % (objClass.__name__, totalObjects.count())) deleteObjects = objClass.objects.filter(**{ '%s' % filterType: filterValue, '%s__lte' % dateField: self.timeDeltaObj, })\ .values_list("id", flat=True)[:deleteRecords] try: objClass.objects.filter(pk__in=list(deleteObjects)).delete() errorCount = 0 except DatabaseError, e: LOG.info("Non Fatal Exception: %s: %s" % (e.__class__.__name__, e)) errorCount += 1 if errorCount > 9 and deleteRecords == 1: raise if deleteRecords > 100: deleteRecords = max(deleteRecords - 100, 1) else: deleteRecords = max(deleteRecords - 10, 1) LOG.info("Decreasing max delete records to: %s" % deleteRecords) totalObjects = objClass.objects.filter(**{'%s' % filterType: filterValue, '%s__lte' % dateField: self.timeDeltaObj, })\ .values_list("id", flat=True) def handle(self, *args, **options): self.keepDays = options['keep_days'] self.timeDeltaObj = date.today() - timedelta(days=self.keepDays) self.resetCount = 15 self.resetMax = 5 self.deleteRecordsBase = 999 #number of documents to delete in a batch #to avoid Non Fatal Exception: DatabaseError: too many SQL variables LOG.warn("HUE_CONF_DIR: %s" % os.environ['HUE_CONF_DIR']) LOG.info("DB Engine: %s" % desktop.conf.DATABASE.ENGINE.get()) LOG.info("DB Name: %s" % desktop.conf.DATABASE.NAME.get()) LOG.info("DB User: %s" % desktop.conf.DATABASE.USER.get()) LOG.info("DB Host: %s" % desktop.conf.DATABASE.HOST.get()) LOG.info("DB Port: %s" % str(desktop.conf.DATABASE.PORT.get())) LOG.info("Cleaning up anything in the Hue tables django_session, oozie*, desktop* and beeswax* older than %s old" % self.keepDays) start = time.time() #Clean out Hive / Impala Query History self.objectCleanup(SavedQuery, 'is_auto', True, 'mtime') #Clear out old Hive/Impala sessions self.objectCleanup(Session, 'status_code__gte', -10000, 'last_used') #Clean out Trashed Workflows self.objectCleanup(Workflow, 'is_trashed', True, 'last_modified') #Clean out Workflows without a name self.objectCleanup(Workflow, 'name', '', 'last_modified') #Clean out history Doc2 objects self.objectCleanup(Document2, 'is_history', True, 'last_modified') #Clean out expired sessions LOG.debug("Cleaning out expired sessions from django_session table") engine = import_module(settings.SESSION_ENGINE) try: engine.SessionStore.clear_expired() except NotImplementedError: LOG.error("Session engine '%s' doesn't support clearing " "expired sessions.\n" % settings.SESSION_ENGINE) end = time.time() elapsed = (end - start) LOG.debug("Total time elapsed (seconds): %.2f" % elapsed)
[]
[]
[ "HUE_CONF_DIR" ]
[]
["HUE_CONF_DIR"]
python
1
0
testutil/shrug.go
package testutil import ( "fmt" "os" "testing" ) const shrug = `¯\_(ツ)_/¯` func Shrug(t *testing.T, issue int) { if os.Getenv("TEST_UNSKIP") == "" { t.Skip(fmt.Sprintf("%v - https://github.com/ovrclk/akash/issues/%d", shrug, issue)) } }
[ "\"TEST_UNSKIP\"" ]
[]
[ "TEST_UNSKIP" ]
[]
["TEST_UNSKIP"]
go
1
0
test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go
package uvm import ( "context" "fmt" "io" "net" "os" "path/filepath" "strings" "github.com/Microsoft/hcsshim/internal/ncproxyttrpc" "github.com/containerd/ttrpc" "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" "github.com/pkg/errors" "github.com/sirupsen/logrus" "go.opencensus.io/trace" "github.com/Microsoft/hcsshim/internal/gcs" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/processorinfo" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/osversion" ) type PreferredRootFSType int const ( PreferredRootFSTypeInitRd PreferredRootFSType = iota PreferredRootFSTypeVHD entropyVsockPort = 1 linuxLogVsockPort = 109 ) // OutputHandler is used to process the output from the program run in the UVM. type OutputHandler func(io.Reader) const ( // InitrdFile is the default file name for an initrd.img used to boot LCOW. InitrdFile = "initrd.img" // VhdFile is the default file name for a rootfs.vhd used to boot LCOW. VhdFile = "rootfs.vhd" // KernelFile is the default file name for a kernel used to boot LCOW. KernelFile = "kernel" // UncompressedKernelFile is the default file name for an uncompressed // kernel used to boot LCOW with KernelDirect. UncompressedKernelFile = "vmlinux" ) // OptionsLCOW are the set of options passed to CreateLCOW() to create a utility vm. type OptionsLCOW struct { *Options BootFilesPath string // Folder in which kernel and root file system reside. Defaults to \Program Files\Linux Containers KernelFile string // Filename under `BootFilesPath` for the kernel. Defaults to `kernel` KernelDirect bool // Skip UEFI and boot directly to `kernel` RootFSFile string // Filename under `BootFilesPath` for the UVMs root file system. Defaults to `InitrdFile` KernelBootOptions string // Additional boot options for the kernel EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe SCSIControllerCount uint32 // The number of SCSI controllers. Defaults to 1. Currently we only support 0 or 1. UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true ExecCommandLine string // The command line to exec from init. Defaults to GCS ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false ForwardStderr bool // Whether stderr will be forwarded from the executed program. Defaults to true OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken. VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`. VPMemNoMultiMapping bool // Disables LCOW layer multi mapping PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD` EnableColdDiscardHint bool // Whether the HCS should use cold discard hints. Defaults to false VPCIEnabled bool // Whether the kernel should enable pci SecurityPolicy string // Optional security policy } // defaultLCOWOSBootFilesPath returns the default path used to locate the LCOW // OS kernel and root FS files. This default is the subdirectory // `LinuxBootFiles` in the directory of the executable that started the current // process; or, if it does not exist, `%ProgramFiles%\Linux Containers`. func defaultLCOWOSBootFilesPath() string { localDirPath := filepath.Join(filepath.Dir(os.Args[0]), "LinuxBootFiles") if _, err := os.Stat(localDirPath); err == nil { return localDirPath } return filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers") } // NewDefaultOptionsLCOW creates the default options for a bootable version of // LCOW. // // `id` the ID of the compute system. If not passed will generate a new GUID. // // `owner` the owner of the compute system. If not passed will use the // executable files name. func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW { // Use KernelDirect boot by default on all builds that support it. kernelDirectSupported := osversion.Build() >= 18286 opts := &OptionsLCOW{ Options: newDefaultOptions(id, owner), BootFilesPath: defaultLCOWOSBootFilesPath(), KernelFile: KernelFile, KernelDirect: kernelDirectSupported, RootFSFile: InitrdFile, KernelBootOptions: "", EnableGraphicsConsole: false, ConsolePipe: "", SCSIControllerCount: 1, UseGuestConnection: true, ExecCommandLine: fmt.Sprintf("/bin/gcs -v4 -log-format json -loglevel %s", logrus.StandardLogger().Level.String()), ForwardStdout: false, ForwardStderr: true, OutputHandler: parseLogrus(id), VPMemDeviceCount: DefaultVPMEMCount, VPMemSizeBytes: DefaultVPMemSizeBytes, VPMemNoMultiMapping: osversion.Get().Build < osversion.V19H1, PreferredRootFSType: PreferredRootFSTypeInitRd, EnableColdDiscardHint: false, VPCIEnabled: false, SecurityPolicy: "", } if _, err := os.Stat(filepath.Join(opts.BootFilesPath, VhdFile)); err == nil { // We have a rootfs.vhd in the boot files path. Use it over an initrd.img opts.RootFSFile = VhdFile opts.PreferredRootFSType = PreferredRootFSTypeVHD } if kernelDirectSupported { // KernelDirect supports uncompressed kernel if the kernel is present. // Default to uncompressed if on box. NOTE: If `kernel` is already // uncompressed and simply named 'kernel' it will still be used // uncompressed automatically. if _, err := os.Stat(filepath.Join(opts.BootFilesPath, UncompressedKernelFile)); err == nil { opts.KernelFile = UncompressedKernelFile } } return opts } // CreateLCOW creates an HCS compute system representing a utility VM. func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error) { ctx, span := trace.StartSpan(ctx, "uvm::CreateLCOW") defer span.End() defer func() { oc.SetSpanStatus(span, err) }() if opts.ID == "" { g, err := guid.NewV4() if err != nil { return nil, err } opts.ID = g.String() } span.AddAttributes(trace.StringAttribute(logfields.UVMID, opts.ID)) log.G(ctx).WithField("options", fmt.Sprintf("%+v", opts)).Debug("uvm::CreateLCOW options") // We dont serialize OutputHandler so if it is missing we need to put it back to the default. if opts.OutputHandler == nil { opts.OutputHandler = parseLogrus(opts.ID) } uvm := &UtilityVM{ id: opts.ID, owner: opts.Owner, operatingSystem: "linux", scsiControllerCount: opts.SCSIControllerCount, vpmemMaxCount: opts.VPMemDeviceCount, vpmemMaxSizeBytes: opts.VPMemSizeBytes, vpciDevices: make(map[string]*VPCIDevice), physicallyBacked: !opts.AllowOvercommit, devicesPhysicallyBacked: opts.FullyPhysicallyBacked, createOpts: opts, vpmemMultiMapping: !opts.VPMemNoMultiMapping, } defer func() { if err != nil { uvm.Close() } }() kernelFullPath := filepath.Join(opts.BootFilesPath, opts.KernelFile) if _, err := os.Stat(kernelFullPath); os.IsNotExist(err) { return nil, fmt.Errorf("kernel: '%s' not found", kernelFullPath) } rootfsFullPath := filepath.Join(opts.BootFilesPath, opts.RootFSFile) if _, err := os.Stat(rootfsFullPath); os.IsNotExist(err) { return nil, fmt.Errorf("boot file: '%s' not found", rootfsFullPath) } if err := verifyOptions(ctx, opts); err != nil { return nil, errors.Wrap(err, errBadUVMOpts.Error()) } processorTopology, err := processorinfo.HostProcessorInfo(ctx) if err != nil { return nil, fmt.Errorf("failed to get host processor information: %s", err) } // To maintain compatability with Docker we need to automatically downgrade // a user CPU count if the setting is not possible. uvm.processorCount = uvm.normalizeProcessorCount(ctx, opts.ProcessorCount, processorTopology) // Align the requested memory size. memorySizeInMB := uvm.normalizeMemorySize(ctx, opts.MemorySizeInMB) processor := &hcsschema.Processor2{ Count: uvm.processorCount, Limit: opts.ProcessorLimit, Weight: opts.ProcessorWeight, } // We can set a cpu group for the VM at creation time in recent builds. if opts.CPUGroupID != "" { if osversion.Build() < cpuGroupCreateBuild { return nil, errCPUGroupCreateNotSupported } processor.CpuGroup = &hcsschema.CpuGroup{Id: opts.CPUGroupID} } doc := &hcsschema.ComputeSystem{ Owner: uvm.owner, SchemaVersion: schemaversion.SchemaV21(), ShouldTerminateOnLastHandleClosed: true, VirtualMachine: &hcsschema.VirtualMachine{ StopOnReset: true, Chipset: &hcsschema.Chipset{}, ComputeTopology: &hcsschema.Topology{ Memory: &hcsschema.Memory2{ SizeInMB: memorySizeInMB, AllowOvercommit: opts.AllowOvercommit, EnableDeferredCommit: opts.EnableDeferredCommit, EnableColdDiscardHint: opts.EnableColdDiscardHint, LowMMIOGapInMB: opts.LowMMIOGapInMB, HighMMIOBaseInMB: opts.HighMMIOBaseInMB, HighMMIOGapInMB: opts.HighMMIOGapInMB, }, Processor: processor, }, Devices: &hcsschema.Devices{ HvSocket: &hcsschema.HvSocket2{ HvSocketConfig: &hcsschema.HvSocketSystemConfig{ // Allow administrators and SYSTEM to bind to vsock sockets // so that we can create a GCS log socket. DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)", }, }, Plan9: &hcsschema.Plan9{}, }, }, } // Handle StorageQoS if set if opts.StorageQoSBandwidthMaximum > 0 || opts.StorageQoSIopsMaximum > 0 { doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{ IopsMaximum: opts.StorageQoSIopsMaximum, BandwidthMaximum: opts.StorageQoSBandwidthMaximum, } } if uvm.scsiControllerCount > 0 { // TODO: JTERRY75 - this should enumerate scsicount and add an entry per value. doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ "0": { Attachments: make(map[string]hcsschema.Attachment), }, } } if uvm.vpmemMaxCount > 0 { doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{ MaximumCount: uvm.vpmemMaxCount, MaximumSizeBytes: uvm.vpmemMaxSizeBytes, } } var kernelArgs string switch opts.PreferredRootFSType { case PreferredRootFSTypeInitRd: if !opts.KernelDirect { kernelArgs = "initrd=/" + opts.RootFSFile } case PreferredRootFSTypeVHD: // Support for VPMem VHD(X) booting rather than initrd.. kernelArgs = "root=/dev/pmem0 ro rootwait init=/init" imageFormat := "Vhd1" if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" { imageFormat = "Vhdx" } doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{ "0": { HostPath: rootfsFullPath, ReadOnly: true, ImageFormat: imageFormat, }, } if uvm.vpmemMultiMapping { pmem := newPackedVPMemDevice() pmem.maxMappedDeviceCount = 1 st, err := os.Stat(rootfsFullPath) if err != nil { return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath) } devSize := pageAlign(uint64(st.Size())) memReg, err := pmem.Allocate(devSize) if err != nil { return nil, errors.Wrap(err, "failed to allocate memory for rootfs") } defer func() { if err != nil { if err = pmem.Release(memReg); err != nil { log.G(ctx).WithError(err).Debug("failed to release memory region") } } }() dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) if err := pmem.mapVHDLayer(ctx, dev); err != nil { return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") } uvm.vpmemDevicesMultiMapped[0] = pmem } else { dev := newDefaultVPMemInfo(opts.RootFSFile, "/") uvm.vpmemDevicesDefault[0] = dev } } vmDebugging := false if opts.ConsolePipe != "" { vmDebugging = true kernelArgs += " 8250_core.nr_uarts=1 8250_core.skip_txen_test=1 console=ttyS0,115200" doc.VirtualMachine.Devices.ComPorts = map[string]hcsschema.ComPort{ "0": { // Which is actually COM1 NamedPipe: opts.ConsolePipe, }, } } else { kernelArgs += " 8250_core.nr_uarts=0" } if opts.EnableGraphicsConsole { vmDebugging = true kernelArgs += " console=tty" doc.VirtualMachine.Devices.Keyboard = &hcsschema.Keyboard{} doc.VirtualMachine.Devices.EnhancedModeVideo = &hcsschema.EnhancedModeVideo{} doc.VirtualMachine.Devices.VideoMonitor = &hcsschema.VideoMonitor{} } if !vmDebugging { // Terminate the VM if there is a kernel panic. kernelArgs += " panic=-1 quiet" } // Add Kernel Boot options if opts.KernelBootOptions != "" { kernelArgs += " " + opts.KernelBootOptions } if !opts.VPCIEnabled { kernelArgs += ` pci=off` } // Inject initial entropy over vsock during init launch. initArgs := fmt.Sprintf("-e %d", entropyVsockPort) // With default options, run GCS with stderr pointing to the vsock port // created below in order to forward guest logs to logrus. initArgs += " /bin/vsockexec" if opts.ForwardStdout { initArgs += fmt.Sprintf(" -o %d", linuxLogVsockPort) } if opts.ForwardStderr { initArgs += fmt.Sprintf(" -e %d", linuxLogVsockPort) } initArgs += " " + opts.ExecCommandLine if vmDebugging { // Launch a shell on the console. initArgs = `sh -c "` + initArgs + ` & exec sh"` } kernelArgs += fmt.Sprintf(" nr_cpus=%d", opts.ProcessorCount) kernelArgs += ` brd.rd_nr=0 pmtmr=0 -- ` + initArgs if !opts.KernelDirect { doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{ BootThis: &hcsschema.UefiBootEntry{ DevicePath: `\` + opts.KernelFile, DeviceType: "VmbFs", VmbFsRootPath: opts.BootFilesPath, OptionalData: kernelArgs, }, } } else { doc.VirtualMachine.Chipset.LinuxKernelDirect = &hcsschema.LinuxKernelDirect{ KernelFilePath: kernelFullPath, KernelCmdLine: kernelArgs, } if opts.PreferredRootFSType == PreferredRootFSTypeInitRd { doc.VirtualMachine.Chipset.LinuxKernelDirect.InitRdPath = rootfsFullPath } } err = uvm.create(ctx, doc) if err != nil { return nil, fmt.Errorf("error while creating the compute system: %s", err) } // Cerate a socket to inject entropy during boot. uvm.entropyListener, err = uvm.listenVsock(entropyVsockPort) if err != nil { return nil, err } // Create a socket that the executed program can send to. This is usually // used by GCS to send log data. if opts.ForwardStdout || opts.ForwardStderr { uvm.outputHandler = opts.OutputHandler uvm.outputProcessingDone = make(chan struct{}) uvm.outputListener, err = uvm.listenVsock(linuxLogVsockPort) if err != nil { return nil, err } } if opts.UseGuestConnection { log.G(ctx).WithField("vmID", uvm.runtimeID).Debug("Using external GCS bridge") l, err := uvm.listenVsock(gcs.LinuxGcsVsockPort) if err != nil { return nil, err } uvm.gcListener = l } // If network config proxy address passed in, construct a client. if opts.NetworkConfigProxy != "" { conn, err := winio.DialPipe(opts.NetworkConfigProxy, nil) if err != nil { return nil, errors.Wrap(err, "failed to connect to ncproxy service") } client := ttrpc.NewClient(conn, ttrpc.WithOnClose(func() { conn.Close() })) uvm.ncProxyClient = ncproxyttrpc.NewNetworkConfigProxyClient(client) } return uvm, nil } func (uvm *UtilityVM) listenVsock(port uint32) (net.Listener, error) { return winio.ListenHvsock(&winio.HvsockAddr{ VMID: uvm.runtimeID, ServiceID: winio.VsockServiceID(port), }) }
[ "\"ProgramFiles\"" ]
[]
[ "ProgramFiles" ]
[]
["ProgramFiles"]
go
1
0
controller/config.py
import os API_ENDPOINT = "https://racweb.herokuapp.com/raspberry/1/arduino" DEVICES_ENDPOINT = "{}/".format(API_ENDPOINT) SLOTS_ENDPOINT = "{}/{{}}/slots/".format(API_ENDPOINT) PLANTS_ENDPOINT = "https://docs.google.com/spreadsheets/d/e/2PACX-1vT-KbxCsv32_6xZfwCi-KQEUeVskm4cAomqczfHPWIYL-3Nj3D9aawaH6yPFohSzvkJaaU9VSjifk1P/pub?gid=590649384&single=true&output=csv" DB_PATH = "./database/db/" RASPBERRY_ID = os.environ.get("RASPBERRY_ID", "1")
[]
[]
[ "RASPBERRY_ID" ]
[]
["RASPBERRY_ID"]
python
1
0
galaxy/settings/production.py
# (c) 2012-2018, Ansible by Red Hat # # This file is part of Ansible Galaxy # # Ansible Galaxy is free software: you can redistribute it and/or modify # it under the terms of the Apache License as published by # the Apache Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Ansible Galaxy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Apache License for more details. # # You should have received a copy of the Apache License # along with Galaxy. If not, see <http://www.apache.org/licenses/>. # Django settings for galaxy project. """ Production configuration file. The following environment variables are supported: * GALAXY_SECRET_KEY * GALAXY_ALLOWED_HOSTS * GALAXY_DB_URL * GALAXY_DB_NAME * GALAXY_DB_USER * GALAXY_DB_PASSWORD * GALAXY_DB_HOST * GALAXY_DB_PORT * GALAXY_EMAIL_HOST * GALAXY_EMAIL_PORT * GALAXY_EMAIL_USER * GALAXY_EMAIL_PASSWORD * GALAXY_REDIS_HOST * GALAXY_REDIS_PORT * GALAXY_RABBITMQ_HOST * GALAXY_RABBITMQ_PORT * GALAXY_RABBITMQ_USER * GALAXY_RABBITMQ_PASSWORD * GALAXY_ADMIN_PATH * GALAXY_METRICS_ENABLED * GALAXY_INFLUX_DB_HOST * GALAXY_INFLUX_DB_PORT * GALAXY_INFLUX_DB_USERNAME * GALAXY_INFLUX_DB_PASSWORD * GALAXY_INFLUX_DB_UI_EVENTS_DB_NAME * GALAXY_AWS_ACCESS_KEY_ID * GALAXY_AWS_SECRET_ACCESS_KEY * GALAXY_AWS_STORAGE_BUCKET_NAME """ import os import dj_database_url from . import include_settings from .default import LOGGING from .default import * # noqa def _read_secret_key(settings_dir='/etc/galaxy'): """ Reads secret key from environment variable, otherwise from SECRET_KEY file in settings directory. In case secret key cannot be read, function returns None, which causes django configuration exception. :param settings_dir: Settings directory, default: '/etc/galaxy'. :return: Secret key string, if available, None otherwise. """ try: return os.environ['GALAXY_SECRET_KEY'] except KeyError: pass try: with open(os.path.join(settings_dir, 'SECRET_KEY')) as fp: return fp.read().strip() except IOError: return None # ========================================================= # Django Core Settings # ========================================================= DEBUG = False ALLOWED_HOSTS = os.environ.get('GALAXY_ALLOWED_HOSTS', '*').split(',') # Database # --------------------------------------------------------- # Define GALAXY_DB_URL=postgres://USER:PASSWORD@HOST:PORT/NAME DATABASES = {} if os.environ.get('GALAXY_DB_URL'): DATABASES['default'] = dj_database_url.config( env='GALAXY_DB_URL', conn_max_age=0) else: DATABASES['default'] = { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': os.environ.get('GALAXY_DB_NAME', 'galaxy'), 'USER': os.environ.get('GALAXY_DB_USER', 'galaxy'), 'PASSWORD': os.environ.get('GALAXY_DB_PASSWORD', ''), 'HOST': os.environ.get('GALAXY_DB_HOST', ''), 'PORT': int(os.environ.get('GALAXY_DB_PORT', 5432)), 'CONN_MAX_AGE': 0, } # Create default alias for worker logging DATABASES['logging'] = DATABASES['default'].copy() # Static files # --------------------------------------------------------- STATIC_ROOT = '/usr/share/galaxy/public/static' # Security # --------------------------------------------------------- SECRET_KEY = _read_secret_key() # Files upload # --------------------------------------------------------- DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage' # Email settings # --------------------------------------------------------- # FIXME(cutwater): Review parameters usage EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = os.environ.get('GALAXY_EMAIL_HOST', '') EMAIL_PORT = int(os.environ.get('GALAXY_EMAIL_PORT', 587)) EMAIL_HOST_USER = os.environ.get('GALAXY_EMAIL_USER', '') EMAIL_HOST_PASSWORD = os.environ.get('GALAXY_EMAIL_PASSWORD', '') EMAIL_USE_TLS = True # ========================================================= # Third Party Apps Settings # ========================================================= # Celery settings # --------------------------------------------------------- # TODO(cutwater): Replace with BROKER_URL connection string parameter BROKER_URL = 'amqp://{user}:{password}@{host}:{port}/{vhost}'.format( user=os.environ.get('GALAXY_RABBITMQ_USER', 'galaxy'), password=os.environ.get('GALAXY_RABBITMQ_PASSWORD', ''), host=os.environ.get('GALAXY_RABBITMQ_HOST', 'localhost'), port=os.environ.get('GALAXY_RABBITMQ_PORT', 5672), vhost=os.environ.get('GALAXY_RABBITMQ_VHOST', 'galaxy'), ) # Redis # --------------------------------------------------------- REDIS_HOST = os.environ.get('GALAXY_REDIS_HOST', 'localhost') REDIS_PORT = int(os.environ.get('GALAXY_REDIS_PORT', 6379)) # InfluxDB Settings # --------------------------------------------------------- INFLUX_DB_HOST = os.environ.get('GALAXY_INFLUX_DB_HOST', 'localhost') INFLUX_DB_PORT = os.environ.get('GALAXY_INFLUX_DB_PORT', '8086') INFLUX_DB_USERNAME = os.environ.get('GALAXY_INFLUX_DB_USERNAME', 'admin') INFLUX_DB_PASSWORD = os.environ.get('GALAXY_INFLUX_DB_PASSWORD', '') INFLUX_DB_UI_EVENTS_DB_NAME = os.environ.get( 'GALAXY_INFLUX_DB_UI_EVENTS_DB_NAME', 'galaxy_ui_events' ) # AWS settings # --------------------------------------------------------- AWS_ACCESS_KEY_ID = os.environ['GALAXY_AWS_ACCESS_KEY_ID'] AWS_SECRET_ACCESS_KEY = os.environ['GALAXY_AWS_SECRET_ACCESS_KEY'] AWS_STORAGE_BUCKET_NAME = os.environ['GALAXY_AWS_STORAGE_BUCKET_NAME'] # AWS_DEFAULT_ACL = 'public-read' # ========================================================= # Galaxy Settings # ========================================================= SITE_ENV = 'PROD' SITE_NAME = os.environ.get('GALAXY_SITE_NAME', 'localhost') # FIXME(cutwater): Remove WAIT_FOR logic from django application WAIT_FOR = [ { 'host': DATABASES['default']['HOST'], 'port': DATABASES['default']['PORT'], }, { 'host': os.environ.get('GALAXY_RABBITMQ_HOST', ''), 'port': int(os.environ.get('GALAXY_RABBITMQ_PORT', 5672)) }, { 'host': INFLUX_DB_HOST, 'port': int(INFLUX_DB_PORT), }, { 'host': REDIS_HOST, 'port': REDIS_PORT, }, ] ADMIN_URL_PATH = os.environ.get('GALAXY_ADMIN_PATH', 'admin') ADMIN_URL_PATTERN = r'^{}/'.format(ADMIN_URL_PATH) GITHUB_TASK_USERS = ['galaxytasks01', 'galaxytasks02', 'galaxytasks03', 'galaxytasks04', 'galaxytasks05'] GALAXY_URL = 'https://{site}' # ========================================================= # System Settings # ========================================================= include_settings('/etc/galaxy/settings.py', scope=globals(), optional=True) # ========================================================= # Logging Settings # ========================================================= # https://github.com/dabapps/django-log-request-id LOG_REQUEST_ID_HEADER = "HTTP_X_REQUEST_ID" GENERATE_REQUEST_ID_IF_NOT_IN_HEADER = True REQUEST_ID_RESPONSE_HEADER = "X-REQUEST-ID" LOGGING['handlers']['console'] = { 'level': 'INFO', 'class': 'logging.StreamHandler', 'filters': ['request_id'], 'formatter': 'json', } LOGGING['loggers']['galaxy'] = { 'level': 'WARNING', 'handlers': ['console'], 'propagate': False, }
[]
[]
[ "GALAXY_INFLUX_DB_HOST", "GALAXY_DB_PASSWORD", "GALAXY_RABBITMQ_HOST", "GALAXY_DB_USER", "GALAXY_INFLUX_DB_PASSWORD", "GALAXY_EMAIL_PORT", "GALAXY_SECRET_KEY", "GALAXY_EMAIL_USER", "GALAXY_ADMIN_PATH", "GALAXY_AWS_SECRET_ACCESS_KEY", "GALAXY_REDIS_PORT", "GALAXY_AWS_ACCESS_KEY_ID", "GALAXY_INFLUX_DB_USERNAME", "GALAXY_RABBITMQ_PORT", "GALAXY_SITE_NAME", "GALAXY_EMAIL_HOST", "GALAXY_EMAIL_PASSWORD", "GALAXY_RABBITMQ_PASSWORD", "GALAXY_INFLUX_DB_UI_EVENTS_DB_NAME", "GALAXY_REDIS_HOST", "GALAXY_DB_URL", "GALAXY_ALLOWED_HOSTS", "GALAXY_AWS_STORAGE_BUCKET_NAME", "GALAXY_RABBITMQ_USER", "GALAXY_INFLUX_DB_PORT", "GALAXY_RABBITMQ_VHOST", "GALAXY_DB_NAME", "GALAXY_DB_PORT", "GALAXY_DB_HOST" ]
[]
["GALAXY_INFLUX_DB_HOST", "GALAXY_DB_PASSWORD", "GALAXY_RABBITMQ_HOST", "GALAXY_DB_USER", "GALAXY_INFLUX_DB_PASSWORD", "GALAXY_EMAIL_PORT", "GALAXY_SECRET_KEY", "GALAXY_EMAIL_USER", "GALAXY_ADMIN_PATH", "GALAXY_AWS_SECRET_ACCESS_KEY", "GALAXY_REDIS_PORT", "GALAXY_AWS_ACCESS_KEY_ID", "GALAXY_INFLUX_DB_USERNAME", "GALAXY_RABBITMQ_PORT", "GALAXY_SITE_NAME", "GALAXY_EMAIL_HOST", "GALAXY_EMAIL_PASSWORD", "GALAXY_RABBITMQ_PASSWORD", "GALAXY_INFLUX_DB_UI_EVENTS_DB_NAME", "GALAXY_REDIS_HOST", "GALAXY_DB_URL", "GALAXY_ALLOWED_HOSTS", "GALAXY_AWS_STORAGE_BUCKET_NAME", "GALAXY_RABBITMQ_USER", "GALAXY_INFLUX_DB_PORT", "GALAXY_RABBITMQ_VHOST", "GALAXY_DB_NAME", "GALAXY_DB_PORT", "GALAXY_DB_HOST"]
python
29
0