filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
CNN_Model/train_classifier.py | #! /usr/bin/env python
import gc
import os
import pickle
import warnings
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from keras.models import Model
from keras.layers import Input, Dense, Embedding, SpatialDropout1D, concatenate
from keras.layers import GRU, Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.preprocessing import text, sequence
from keras.callbacks import TensorBoard
from UTILS import get_root, get_logger
np.random.seed(42)
warnings.filterwarnings('ignore')
os.environ['OMP_NUM_THREADS'] = '4'
DIR_ROOT = get_root()
DIR_ASSETS = os.path.join(DIR_ROOT, 'assets')
MODEL_PATH = os.path.join(DIR_ASSETS, 'CNN_Model')
LOG_PATH = os.path.join(DIR_ASSETS, 'tb_logs')
EMBEDDING_FILE = os.path.join(DIR_ASSETS, 'CNN_Embedding_Layer', 'fasttext-crawl-300d-2m', 'crawl-300d-2M.vec')
DATA_FILE = os.path.join(DIR_ASSETS, 'CNN_Data', 'train.csv')
MAX_FEATURES = 30000
MAXLEN = 100
EMBED_SIZE = 300
TRAIN_SIZE = 0.95
BATCH_SIZE = 32
EPOCHS = 10
def convert_binary_toxic(data, classes):
target = data[classes].values != np.zeros((len(data), 6))
binary = target.any(axis=1)
return binary
class Preprocess(object):
def __init__(self, max_features, maxlen):
self.max_features = max_features
self.maxlen = maxlen
def fit_texts(self, list_sentences):
self.tokenizer = text.Tokenizer(num_words=self.max_features)
self.tokenizer.fit_on_texts(list_sentences)
def transform_texts(self, list_sentences):
tokenized_sentences = self.tokenizer.texts_to_sequences(list_sentences)
features = sequence.pad_sequences(tokenized_sentences, maxlen=self.maxlen)
return features
def get_embeddings(embed_file, word_index, max_features, embed_size):
def get_coefs(word, *arr):
return word, np.asarray(arr, dtype='float32')
embeddings_pretrained = dict(get_coefs(*o.rstrip().rsplit(' ')) for o in open(embed_file, encoding="utf8", errors='ignore'))
nb_words = min(max_features, len(word_index))
embedding_matrix = np.zeros((nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features:
continue
embedding_vector = embeddings_pretrained.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
class RocAucEvaluation(TensorBoard):
def __init__(self, log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch',
validation_data=(),
interval=1):
super().__init__(log_dir=log_dir, batch_size=batch_size)
self.X_val, self.y_val = validation_data
self.interval = interval
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs)
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
score = roc_auc_score(self.y_val, y_pred)
print("\n ROC-AUC - epoch: %d - score: %.6f \n" % (epoch+1, score))
def get_model(maxlen, max_features, embed_size, embedding_matrix):
input = Input(shape=(maxlen, ))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(input)
x = SpatialDropout1D(0.2)(x)
x = Bidirectional(GRU(80, return_sequences=True))(x)
avg_pool = GlobalAveragePooling1D()(x)
max_pool = GlobalMaxPooling1D()(x)
conc = concatenate([avg_pool, max_pool])
output = Dense(1, activation="sigmoid")(conc)
model = Model(inputs=input, outputs=output)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
if __name__ == "__main__":
logger = get_logger()
logger.info(f"Loading CNN_Data: {DATA_FILE}")
train = pd.read_csv(DATA_FILE)
classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
features = train["comment_text"].fillna("# #").values
target = convert_binary_toxic(train, classes)
del train
gc.collect()
logger.info(f"Transforming CNN_Data")
preprocessor = Preprocess(max_features=MAX_FEATURES, maxlen=MAXLEN)
preprocessor.fit_texts(list(features))
features = preprocessor.transform_texts(features)
word_index = preprocessor.tokenizer.word_index
PRERPOCESSOR_FILE = os.path.join(MODEL_PATH, 'preprocessor.pkl')
logger.info(f"Saving the text transformer: {PRERPOCESSOR_FILE}")
with open(PRERPOCESSOR_FILE, 'wb') as file:
pickle.dump(preprocessor, file)
del preprocessor
gc.collect()
logger.info(f"Loading CNN_Embedding_Layer vectors: {EMBEDDING_FILE}")
embedding_matrix = get_embeddings(EMBEDDING_FILE, word_index, MAX_FEATURES, EMBED_SIZE)
logger.info(f"Model training, train size: {TRAIN_SIZE}")
X_train, X_val, y_train, y_val = train_test_split(features, target, train_size=TRAIN_SIZE, random_state=233)
RocAuc = RocAucEvaluation(log_dir=LOG_PATH, batch_size=BATCH_SIZE, validation_data=(X_val, y_val), interval=1)
model = get_model(MAXLEN, MAX_FEATURES, EMBED_SIZE, embedding_matrix)
hist = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS,
validation_data=(X_val, y_val), callbacks=[RocAuc], verbose=1)
ARCHITECTURE_FILE = os.path.join(MODEL_PATH, 'gru_architecture.json')
logger.info(f"Saving the architecture: {ARCHITECTURE_FILE}")
with open(ARCHITECTURE_FILE, 'w') as file:
architecture_json = model.to_json()
file.write(architecture_json)
WEIGHTS_FILE = os.path.join(MODEL_PATH, 'gru_weights.h5')
logger.info(f"Saving the weights: {WEIGHTS_FILE}")
model.save_weights(WEIGHTS_FILE)
| []
| []
| [
"OMP_NUM_THREADS"
]
| [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
storage/grpc_client.go | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"os"
gapic "cloud.google.com/go/storage/internal/apiv2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
iampb "google.golang.org/genproto/googleapis/iam/v1"
storagepb "google.golang.org/genproto/googleapis/storage/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
)
const (
// defaultConnPoolSize is the default number of connections
// to initialize in the GAPIC gRPC connection pool. A larger
// connection pool may be necessary for jobs that require
// high throughput and/or leverage many concurrent streams.
//
// This is an experimental API and not intended for public use.
defaultConnPoolSize = 4
// globalProjectAlias is the project ID alias used for global buckets.
//
// This is only used for the gRPC API.
globalProjectAlias = "_"
)
// defaultGRPCOptions returns a set of the default client options
// for gRPC client initialization.
//
// This is an experimental API and not intended for public use.
func defaultGRPCOptions() []option.ClientOption {
defaults := []option.ClientOption{
option.WithGRPCConnectionPool(defaultConnPoolSize),
}
// Set emulator options for gRPC if an emulator was specified. Note that in a
// hybrid client, STORAGE_EMULATOR_HOST will set the host to use for HTTP and
// STORAGE_EMULATOR_HOST_GRPC will set the host to use for gRPC (when using a
// local emulator, HTTP and gRPC must use different ports, so this is
// necessary).
//
// TODO: When the newHybridClient is not longer used, remove
// STORAGE_EMULATOR_HOST_GRPC and use STORAGE_EMULATOR_HOST for both the
// HTTP and gRPC based clients.
if host := os.Getenv("STORAGE_EMULATOR_HOST_GRPC"); host != "" {
// Strip the scheme from the emulator host. WithEndpoint does not take a
// scheme for gRPC.
host = stripScheme(host)
defaults = append(defaults,
option.WithEndpoint(host),
option.WithGRPCDialOption(grpc.WithInsecure()),
option.WithoutAuthentication(),
)
}
return defaults
}
// grpcStorageClient is the gRPC API implementation of the transport-agnostic
// storageClient interface.
//
// This is an experimental API and not intended for public use.
type grpcStorageClient struct {
raw *gapic.Client
settings *settings
}
// newGRPCStorageClient initializes a new storageClient that uses the gRPC
// Storage API.
//
// This is an experimental API and not intended for public use.
func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) {
s := initSettings(opts...)
s.clientOption = append(defaultGRPCOptions(), s.clientOption...)
g, err := gapic.NewClient(ctx, s.clientOption...)
if err != nil {
return nil, err
}
return &grpcStorageClient{
raw: g,
settings: s,
}, nil
}
func (c *grpcStorageClient) Close() error {
return c.raw.Close()
}
// Top-level methods.
func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) {
s := callSettings(c.settings, opts...)
req := &storagepb.GetServiceAccountRequest{
Project: toProjectResource(project),
}
var resp *storagepb.ServiceAccount
err := run(ctx, func() error {
var err error
resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent)
if err != nil {
return "", err
}
return resp.EmailAddress, err
}
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
b := attrs.toProtoBucket()
// If there is lifecycle information but no location, explicitly set
// the location. This is a GCS quirk/bug.
if b.GetLocation() == "" && b.GetLifecycle() != nil {
b.Location = "US"
}
req := &storagepb.CreateBucketRequest{
Parent: toProjectResource(project),
Bucket: b,
BucketId: b.GetName(),
PredefinedAcl: attrs.PredefinedACL,
PredefinedDefaultObjectAcl: attrs.PredefinedDefaultObjectACL,
}
var battrs *BucketAttrs
err := run(ctx, func() error {
res, err := c.raw.CreateBucket(ctx, req, s.gax...)
battrs = newBucketFromProto(res)
return err
}, s.retry, s.idempotent)
return battrs, err
}
func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator {
s := callSettings(c.settings, opts...)
it := &BucketIterator{
ctx: ctx,
projectID: project,
}
var gitr *gapic.BucketIterator
fetch := func(pageSize int, pageToken string) (token string, err error) {
// Initialize GAPIC-based iterator when pageToken is empty, which
// indicates that this fetch call is attempting to get the first page.
//
// Note: Initializing the GAPIC-based iterator lazily is necessary to
// capture the BucketIterator.Prefix set by the user *after* the
// BucketIterator is returned to them from the veneer.
if pageToken == "" {
req := &storagepb.ListBucketsRequest{
Parent: toProjectResource(it.projectID),
Prefix: it.Prefix,
}
gitr = c.raw.ListBuckets(it.ctx, req, s.gax...)
}
var buckets []*storagepb.Bucket
var next string
err = run(it.ctx, func() error {
buckets, next, err = gitr.InternalFetch(pageSize, pageToken)
return err
}, s.retry, s.idempotent)
if err != nil {
return "", err
}
for _, bkt := range buckets {
b := newBucketFromProto(bkt)
it.buckets = append(it.buckets, b)
}
return next, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
fetch,
func() int { return len(it.buckets) },
func() interface{} { b := it.buckets; it.buckets = nil; return b })
return it
}
// Bucket methods.
func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
s := callSettings(c.settings, opts...)
req := &storagepb.DeleteBucketRequest{
Name: bucketResourceName(globalProjectAlias, bucket),
}
if err := applyBucketCondsProto("grpcStorageClient.DeleteBucket", conds, req); err != nil {
return err
}
if s.userProject != "" {
req.CommonRequestParams = &storagepb.CommonRequestParams{
UserProject: toProjectResource(s.userProject),
}
}
return run(ctx, func() error {
return c.raw.DeleteBucket(ctx, req, s.gax...)
}, s.retry, s.idempotent)
}
func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
req := &storagepb.GetBucketRequest{
Name: bucketResourceName(globalProjectAlias, bucket),
}
if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil {
return nil, err
}
if s.userProject != "" {
req.CommonRequestParams = &storagepb.CommonRequestParams{
UserProject: toProjectResource(s.userProject),
}
}
var battrs *BucketAttrs
err := run(ctx, func() error {
res, err := c.raw.GetBucket(ctx, req, s.gax...)
battrs = newBucketFromProto(res)
return err
}, s.retry, s.idempotent)
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return nil, ErrBucketNotExist
}
return battrs, err
}
func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
b := uattrs.toProtoBucket()
b.Name = bucketResourceName(globalProjectAlias, bucket)
req := &storagepb.UpdateBucketRequest{
Bucket: b,
PredefinedAcl: uattrs.PredefinedACL,
PredefinedDefaultObjectAcl: uattrs.PredefinedDefaultObjectACL,
}
if err := applyBucketCondsProto("grpcStorageClient.UpdateBucket", conds, req); err != nil {
return nil, err
}
if s.userProject != "" {
req.CommonRequestParams = &storagepb.CommonRequestParams{
UserProject: toProjectResource(s.userProject),
}
}
var paths []string
fieldMask := &fieldmaskpb.FieldMask{
Paths: paths,
}
if uattrs.CORS != nil {
fieldMask.Paths = append(fieldMask.Paths, "cors")
}
if uattrs.DefaultEventBasedHold != nil {
fieldMask.Paths = append(fieldMask.Paths, "default_event_based_hold")
}
if uattrs.RetentionPolicy != nil {
fieldMask.Paths = append(fieldMask.Paths, "retention_policy")
}
if uattrs.VersioningEnabled != nil {
fieldMask.Paths = append(fieldMask.Paths, "versioning")
}
if uattrs.RequesterPays != nil {
fieldMask.Paths = append(fieldMask.Paths, "billing")
}
if uattrs.BucketPolicyOnly != nil || uattrs.UniformBucketLevelAccess != nil || uattrs.PublicAccessPrevention != PublicAccessPreventionUnknown {
fieldMask.Paths = append(fieldMask.Paths, "iam_config")
}
if uattrs.Encryption != nil {
fieldMask.Paths = append(fieldMask.Paths, "encryption")
}
if uattrs.Lifecycle != nil {
fieldMask.Paths = append(fieldMask.Paths, "lifecycle")
}
if uattrs.Logging != nil {
fieldMask.Paths = append(fieldMask.Paths, "logging")
}
if uattrs.Website != nil {
fieldMask.Paths = append(fieldMask.Paths, "website")
}
if uattrs.PredefinedACL != "" {
// In cases where PredefinedACL is set, Acl is cleared.
fieldMask.Paths = append(fieldMask.Paths, "acl")
}
if uattrs.PredefinedDefaultObjectACL != "" {
// In cases where PredefinedDefaultObjectACL is set, DefaultObjectAcl is cleared.
fieldMask.Paths = append(fieldMask.Paths, "default_object_acl")
}
if uattrs.acl != nil {
// In cases where acl is set by UpdateBucketACL method.
fieldMask.Paths = append(fieldMask.Paths, "acl")
}
if uattrs.defaultObjectACL != nil {
// In cases where defaultObjectACL is set by UpdateBucketACL method.
fieldMask.Paths = append(fieldMask.Paths, "default_object_acl")
}
if uattrs.StorageClass != "" {
fieldMask.Paths = append(fieldMask.Paths, "storage_class")
}
if uattrs.RPO != RPOUnknown {
fieldMask.Paths = append(fieldMask.Paths, "rpo")
}
// TODO(cathyo): Handle labels. Pending b/230510191.
req.UpdateMask = fieldMask
var battrs *BucketAttrs
err := run(ctx, func() error {
res, err := c.raw.UpdateBucket(ctx, req, s.gax...)
battrs = newBucketFromProto(res)
return err
}, s.retry, s.idempotent)
return battrs, err
}
func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator {
s := callSettings(c.settings, opts...)
it := &ObjectIterator{
ctx: ctx,
}
if q != nil {
it.query = *q
}
req := &storagepb.ListObjectsRequest{
Parent: bucketResourceName(globalProjectAlias, bucket),
Prefix: it.query.Prefix,
Delimiter: it.query.Delimiter,
Versions: it.query.Versions,
LexicographicStart: it.query.StartOffset,
LexicographicEnd: it.query.EndOffset,
// TODO(noahietz): Convert a projection to a FieldMask.
// ReadMask: q.Projection,
}
if s.userProject != "" {
req.CommonRequestParams = &storagepb.CommonRequestParams{UserProject: s.userProject}
}
gitr := c.raw.ListObjects(it.ctx, req, s.gax...)
fetch := func(pageSize int, pageToken string) (token string, err error) {
var objects []*storagepb.Object
err = run(it.ctx, func() error {
objects, token, err = gitr.InternalFetch(pageSize, pageToken)
return err
}, s.retry, s.idempotent)
if err != nil {
if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound {
err = ErrBucketNotExist
}
return "", err
}
for _, obj := range objects {
b := newObjectFromProto(obj)
it.items = append(it.items, b)
}
return token, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}
// Object metadata methods.
func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
return nil, errMethodNotSupported
}
// Default Object ACL methods.
func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
// There is no separate API for PATCH in gRPC.
// Make a GET call first to retrieve BucketAttrs.
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
if err != nil {
return err
}
// Delete the entity and copy other remaining ACL entities.
var acl []ACLRule
for _, a := range attrs.DefaultObjectACL {
if a.Entity != entity {
acl = append(acl, a)
}
}
uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl}
// Call UpdateBucket with a MetagenerationMatch precondition set.
if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil {
return err
}
return nil
}
func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
if err != nil {
return nil, err
}
return attrs.DefaultObjectACL, nil
}
func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
}
// Bucket ACL methods.
func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
// There is no separate API for PATCH in gRPC.
// Make a GET call first to retrieve BucketAttrs.
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
if err != nil {
return err
}
// Delete the entity and copy other remaining ACL entities.
var acl []ACLRule
for _, a := range attrs.ACL {
if a.Entity != entity {
acl = append(acl, a)
}
}
uattrs := &BucketAttrsToUpdate{acl: acl}
// Call UpdateBucket with a MetagenerationMatch precondition set.
if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil {
return err
}
return nil
}
func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
if err != nil {
return nil, err
}
return attrs.ACL, nil
}
func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
// There is no separate API for PATCH in gRPC.
// Make a GET call first to retrieve BucketAttrs.
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
if err != nil {
return nil, err
}
var acl []ACLRule
aclRule := ACLRule{Entity: entity, Role: role}
acl = append(attrs.ACL, aclRule)
uattrs := &BucketAttrsToUpdate{acl: acl}
// Call UpdateBucket with a MetagenerationMatch precondition set.
_, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...)
if err != nil {
return nil, err
}
return &aclRule, err
}
// Object ACL methods.
func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
}
// Media operations.
func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) OpenReader(ctx context.Context, r *Reader, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *grpcStorageClient) OpenWriter(ctx context.Context, w *Writer, opts ...storageOption) error {
return errMethodNotSupported
}
// IAM methods.
func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) {
// TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter.
s := callSettings(c.settings, opts...)
req := &iampb.GetIamPolicyRequest{
Resource: bucketResourceName(globalProjectAlias, resource),
Options: &iampb.GetPolicyOptions{
RequestedPolicyVersion: version,
},
}
var rp *iampb.Policy
err := run(ctx, func() error {
var err error
rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent)
return rp, err
}
func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error {
// TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter.
s := callSettings(c.settings, opts...)
req := &iampb.SetIamPolicyRequest{
Resource: bucketResourceName(globalProjectAlias, resource),
Policy: policy,
}
return run(ctx, func() error {
_, err := c.raw.SetIamPolicy(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent)
}
func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) {
// TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter.
s := callSettings(c.settings, opts...)
req := &iampb.TestIamPermissionsRequest{
Resource: bucketResourceName(globalProjectAlias, resource),
Permissions: permissions,
}
var res *iampb.TestIamPermissionsResponse
err := run(ctx, func() error {
var err error
res, err = c.raw.TestIamPermissions(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent)
if err != nil {
return nil, err
}
return res.Permissions, nil
}
// HMAC Key methods.
func (c *grpcStorageClient) GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator {
return &HMACKeysIterator{}
}
func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error {
return errMethodNotSupported
}
| [
"\"STORAGE_EMULATOR_HOST_GRPC\""
]
| []
| [
"STORAGE_EMULATOR_HOST_GRPC"
]
| [] | ["STORAGE_EMULATOR_HOST_GRPC"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'streams.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/test_email_notifications.py | from unittest import TestCase
from vcr import VCR
from mock import patch
from freezegun import freeze_time
import smtplib
import os
import click
from click.testing import CliRunner
from icloudpd.base import main
import pyicloud_ipd
vcr = VCR(decode_compressed_response=True)
class EmailNotificationsTestCase(TestCase):
@freeze_time("2018-01-01")
def test_2sa_required_email_notification(self):
with vcr.use_cassette("tests/vcr_cassettes/auth_requires_2sa.yml"):
with patch("smtplib.SMTP") as smtp:
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "EC5646DE-9423-11E8-BF21-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"[email protected]",
"--password",
"password1",
"--smtp-username",
"[email protected]",
"--smtp-password",
"password1",
"--notification-email",
"[email protected]",
"tests/fixtures/Photos",
],
)
print(result.output)
assert result.exit_code == 1
smtp_instance = smtp()
smtp_instance.connect.assert_called_once()
smtp_instance.starttls.assert_called_once()
smtp_instance.sendmail.assert_called_once_with(
"[email protected]",
"[email protected]",
"From: iCloud Photos Downloader <[email protected]>\n"
"To: [email protected]\n"
"Subject: icloud_photos_downloader: Two step authentication has expired\n"
"Date: 01/01/2018 00:00\n\nHello,\n\n"
"Two-step authentication has expired for the icloud_photos_downloader script.\n"
"Please log in to your server and run the script manually to update two-step "
"authentication.",
)
| []
| []
| [
"CLIENT_ID"
]
| [] | ["CLIENT_ID"] | python | 1 | 0 | |
chain/stmgr/utils.go | package stmgr
import (
"bytes"
"context"
"fmt"
"os"
"reflect"
"runtime"
"strings"
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/rt"
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
func GetNetworkName(ctx context.Context, sm *StateManager, st cid.Cid) (dtypes.NetworkName, error) {
act, err := sm.LoadActorRaw(ctx, init_.Address, st)
if err != nil {
return "", err
}
ias, err := init_.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return "", err
}
return ias.NetworkName()
}
func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (address.Address, error) {
state, err := sm.StateTree(st)
if err != nil {
return address.Undef, xerrors.Errorf("(get sset) failed to load state tree: %w", err)
}
act, err := state.GetActor(maddr)
if err != nil {
return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
info, err := mas.Info()
if err != nil {
return address.Undef, xerrors.Errorf("failed to load actor info: %w", err)
}
return vm.ResolveToKeyAddr(state, sm.cs.ActorStore(ctx), info.Worker)
}
func GetPower(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (power.Claim, power.Claim, bool, error) {
return GetPowerRaw(ctx, sm, ts.ParentState(), maddr)
}
func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (power.Claim, power.Claim, bool, error) {
act, err := sm.LoadActorRaw(ctx, power.Address, st)
if err != nil {
return power.Claim{}, power.Claim{}, false, xerrors.Errorf("(get sset) failed to load power actor state: %w", err)
}
pas, err := power.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return power.Claim{}, power.Claim{}, false, err
}
tpow, err := pas.TotalPower()
if err != nil {
return power.Claim{}, power.Claim{}, false, err
}
var mpow power.Claim
var minpow bool
if maddr != address.Undef {
var found bool
mpow, found, err = pas.MinerPower(maddr)
if err != nil || !found {
return power.Claim{}, tpow, false, err
}
minpow, err = pas.MinerNominalPowerMeetsConsensusMinimum(maddr)
if err != nil {
return power.Claim{}, power.Claim{}, false, err
}
}
return mpow, tpow, minpow, nil
}
func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (*miner.SectorPreCommitOnChainInfo, error) {
act, err := sm.LoadActor(ctx, maddr, ts)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
return mas.GetPrecommittedSector(sid)
}
func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (*miner.SectorOnChainInfo, error) {
act, err := sm.LoadActor(ctx, maddr, ts)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
return mas.GetSector(sid)
}
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
act, err := sm.LoadActorRaw(ctx, maddr, st)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
}
var provingSectors bitfield.BitField
if nv < network.Version7 {
allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
if err != nil {
return nil, xerrors.Errorf("get all sectors: %w", err)
}
faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
if err != nil {
return nil, xerrors.Errorf("get faulty sectors: %w", err)
}
provingSectors, err = bitfield.SubtractBitField(allSectors, faultySectors)
if err != nil {
return nil, xerrors.Errorf("calc proving sectors: %w", err)
}
} else {
provingSectors, err = miner.AllPartSectors(mas, miner.Partition.ActiveSectors)
if err != nil {
return nil, xerrors.Errorf("get active sectors sectors: %w", err)
}
}
numProvSect, err := provingSectors.Count()
if err != nil {
return nil, xerrors.Errorf("failed to count bits: %w", err)
}
// TODO(review): is this right? feels fishy to me
if numProvSect == 0 {
return nil, nil
}
info, err := mas.Info()
if err != nil {
return nil, xerrors.Errorf("getting miner info: %w", err)
}
mid, err := address.IDFromAddress(maddr)
if err != nil {
return nil, xerrors.Errorf("getting miner ID: %w", err)
}
proofType, err := miner.WinningPoStProofTypeFromWindowPoStProofType(nv, info.WindowPoStProofType)
if err != nil {
return nil, xerrors.Errorf("determining winning post proof type: %w", err)
}
ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, proofType, abi.ActorID(mid), rand, numProvSect)
if err != nil {
return nil, xerrors.Errorf("generating winning post challenges: %w", err)
}
iter, err := provingSectors.BitIterator()
if err != nil {
return nil, xerrors.Errorf("iterating over proving sectors: %w", err)
}
// Select winning sectors by _index_ in the all-sectors bitfield.
selectedSectors := bitfield.New()
prev := uint64(0)
for _, n := range ids {
sno, err := iter.Nth(n - prev)
if err != nil {
return nil, xerrors.Errorf("iterating over proving sectors: %w", err)
}
selectedSectors.Set(sno)
prev = n
}
sectors, err := mas.LoadSectors(&selectedSectors)
if err != nil {
return nil, xerrors.Errorf("loading proving sectors: %w", err)
}
out := make([]builtin.SectorInfo, len(sectors))
for i, sinfo := range sectors {
out[i] = builtin.SectorInfo{
SealProof: sinfo.SealProof,
SectorNumber: sinfo.SectorNumber,
SealedCID: sinfo.SealedCID,
}
}
return out, nil
}
func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (bool, error) {
act, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
return false, xerrors.Errorf("failed to load power actor: %w", err)
}
spas, err := power.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return false, xerrors.Errorf("failed to load power actor state: %w", err)
}
_, ok, err := spas.MinerPower(maddr)
if err != nil {
return false, xerrors.Errorf("getting miner power: %w", err)
}
if !ok {
return true, nil
}
return false, nil
}
func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts *types.TipSet) (*api.MarketDeal, error) {
act, err := sm.LoadActor(ctx, market.Address, ts)
if err != nil {
return nil, xerrors.Errorf("failed to load market actor: %w", err)
}
state, err := market.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load market actor state: %w", err)
}
proposals, err := state.Proposals()
if err != nil {
return nil, err
}
proposal, found, err := proposals.Get(dealID)
if err != nil {
return nil, err
} else if !found {
return nil, xerrors.Errorf(
"deal %d not found "+
"- deal may not have completed sealing before deal proposal "+
"start epoch, or deal may have been slashed",
dealID)
}
states, err := state.States()
if err != nil {
return nil, err
}
st, found, err := states.Get(dealID)
if err != nil {
return nil, err
}
if !found {
st = market.EmptyDealState()
}
return &api.MarketDeal{
Proposal: *proposal,
State: *st,
}, nil
}
func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([]address.Address, error) {
act, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
return nil, xerrors.Errorf("failed to load power actor: %w", err)
}
powState, err := power.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load power actor state: %w", err)
}
return powState.ListAllMiners()
}
func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, msgs []*types.Message, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
if ts == nil {
ts = sm.cs.GetHeaviestTipSet()
}
base, trace, err := sm.ExecutionTrace(ctx, ts)
if err != nil {
return cid.Undef, nil, err
}
for i := ts.Height(); i < height; i++ {
// handle state forks
base, err = sm.handleStateForks(ctx, base, i, traceFunc(&trace), ts)
if err != nil {
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
}
// TODO: should we also run cron here?
}
r := store.NewChainRand(sm.cs, ts.Cids())
vmopt := &vm.VMOpts{
StateBase: base,
Epoch: height,
Rand: r,
Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.cs.VMSys(),
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: ts.Blocks()[0].ParentBaseFee,
LookbackState: LookbackStateGetterForTipset(sm, ts),
}
vmi, err := sm.newVM(ctx, vmopt)
if err != nil {
return cid.Undef, nil, err
}
for i, msg := range msgs {
// TODO: Use the signed message length for secp messages
ret, err := vmi.ApplyMessage(ctx, msg)
if err != nil {
return cid.Undef, nil, xerrors.Errorf("applying message %s: %w", msg.Cid(), err)
}
if ret.ExitCode != 0 {
log.Infof("compute state apply message %d failed (exit: %d): %s", i, ret.ExitCode, ret.ActorErr)
}
}
root, err := vmi.Flush(ctx)
if err != nil {
return cid.Undef, nil, err
}
return root, trace, nil
}
func LookbackStateGetterForTipset(sm *StateManager, ts *types.TipSet) vm.LookbackStateGetter {
return func(ctx context.Context, round abi.ChainEpoch) (*state.StateTree, error) {
_, st, err := GetLookbackTipSetForRound(ctx, sm, ts, round)
if err != nil {
return nil, err
}
return sm.StateTree(st)
}
}
func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, cid.Cid, error) {
var lbr abi.ChainEpoch
lb := policy.GetWinningPoStSectorSetLookback(sm.GetNtwkVersion(ctx, round))
if round > lb {
lbr = round - lb
}
// more null blocks than our lookback
if lbr >= ts.Height() {
// This should never happen at this point, but may happen before
// network version 3 (where the lookback was only 10 blocks).
st, _, err := sm.TipSetState(ctx, ts)
if err != nil {
return nil, cid.Undef, err
}
return ts, st, nil
}
// Get the tipset after the lookback tipset, or the next non-null one.
nextTs, err := sm.ChainStore().GetTipsetByHeight(ctx, lbr+1, ts, false)
if err != nil {
return nil, cid.Undef, xerrors.Errorf("failed to get lookback tipset+1: %w", err)
}
if lbr > nextTs.Height() {
return nil, cid.Undef, xerrors.Errorf("failed to find non-null tipset %s (%d) which is known to exist, found %s (%d)", ts.Key(), ts.Height(), nextTs.Key(), nextTs.Height())
}
lbts, err := sm.ChainStore().GetTipSetFromKey(nextTs.Parents())
if err != nil {
return nil, cid.Undef, xerrors.Errorf("failed to resolve lookback tipset: %w", err)
}
return lbts, nextTs.ParentState(), nil
}
func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) {
ts, err := sm.ChainStore().LoadTipSet(tsk)
if err != nil {
return nil, xerrors.Errorf("failed to load tipset for mining base: %w", err)
}
prev, err := sm.ChainStore().GetLatestBeaconEntry(ts)
if err != nil {
if os.Getenv("LOTUS_IGNORE_DRAND") != "_yes_" {
return nil, xerrors.Errorf("failed to get latest beacon entry: %w", err)
}
prev = &types.BeaconEntry{}
}
entries, err := beacon.BeaconEntriesForBlock(ctx, bcs, round, ts.Height(), *prev)
if err != nil {
return nil, err
}
rbase := *prev
if len(entries) > 0 {
rbase = entries[len(entries)-1]
}
lbts, lbst, err := GetLookbackTipSetForRound(ctx, sm, ts, round)
if err != nil {
return nil, xerrors.Errorf("getting lookback miner actor state: %w", err)
}
act, err := sm.LoadActorRaw(ctx, maddr, lbst)
if xerrors.Is(err, types.ErrActorNotFound) {
_, err := sm.LoadActor(ctx, maddr, ts)
if err != nil {
return nil, xerrors.Errorf("loading miner in current state: %w", err)
}
return nil, nil
}
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
}
buf := new(bytes.Buffer)
if err := maddr.MarshalCBOR(buf); err != nil {
return nil, xerrors.Errorf("failed to marshal miner address: %w", err)
}
prand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes())
if err != nil {
return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err)
}
nv := sm.GetNtwkVersion(ctx, ts.Height())
sectors, err := GetSectorsForWinningPoSt(ctx, nv, pv, sm, lbst, maddr, prand)
if err != nil {
return nil, xerrors.Errorf("getting winning post proving set: %w", err)
}
if len(sectors) == 0 {
return nil, nil
}
mpow, tpow, _, err := GetPowerRaw(ctx, sm, lbst, maddr)
if err != nil {
return nil, xerrors.Errorf("failed to get power: %w", err)
}
info, err := mas.Info()
if err != nil {
return nil, err
}
worker, err := sm.ResolveToKeyAddress(ctx, info.Worker, ts)
if err != nil {
return nil, xerrors.Errorf("resolving worker address: %w", err)
}
// TODO: Not ideal performance...This method reloads miner and power state (already looked up here and in GetPowerRaw)
eligible, err := MinerEligibleToMine(ctx, sm, maddr, ts, lbts)
if err != nil {
return nil, xerrors.Errorf("determining miner eligibility: %w", err)
}
return &api.MiningBaseInfo{
MinerPower: mpow.QualityAdjPower,
NetworkPower: tpow.QualityAdjPower,
Sectors: sectors,
WorkerKey: worker,
SectorSize: info.SectorSize,
PrevBeaconEntry: *prev,
BeaconEntries: entries,
EligibleForMining: eligible,
}, nil
}
type MethodMeta struct {
Name string
Params reflect.Type
Ret reflect.Type
}
var MethodsMap = map[cid.Cid]map[abi.MethodNum]MethodMeta{}
func init() {
// TODO: combine with the runtime actor registry.
var actors []rt.VMActor
actors = append(actors, exported0.BuiltinActors()...)
actors = append(actors, exported2.BuiltinActors()...)
actors = append(actors, exported3.BuiltinActors()...)
actors = append(actors, exported4.BuiltinActors()...)
actors = append(actors, exported5.BuiltinActors()...)
for _, actor := range actors {
exports := actor.Exports()
methods := make(map[abi.MethodNum]MethodMeta, len(exports))
// Explicitly add send, it's special.
methods[builtin.MethodSend] = MethodMeta{
Name: "Send",
Params: reflect.TypeOf(new(abi.EmptyValue)),
Ret: reflect.TypeOf(new(abi.EmptyValue)),
}
// Iterate over exported methods. Some of these _may_ be nil and
// must be skipped.
for number, export := range exports {
if export == nil {
continue
}
ev := reflect.ValueOf(export)
et := ev.Type()
// Extract the method names using reflection. These
// method names always match the field names in the
// `builtin.Method*` structs (tested in the specs-actors
// tests).
fnName := runtime.FuncForPC(ev.Pointer()).Name()
fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm")
switch abi.MethodNum(number) {
case builtin.MethodSend:
panic("method 0 is reserved for Send")
case builtin.MethodConstructor:
if fnName != "Constructor" {
panic("method 1 is reserved for Constructor")
}
}
methods[abi.MethodNum(number)] = MethodMeta{
Name: fnName,
Params: et.In(1),
Ret: et.Out(0),
}
}
MethodsMap[actor.Code()] = methods
}
}
func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, method abi.MethodNum, ts *types.TipSet) (cbg.CBORUnmarshaler, error) {
act, err := sm.LoadActor(ctx, to, ts)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
m, found := MethodsMap[act.Code][method]
if !found {
return nil, fmt.Errorf("unknown method %d for actor %s", method, act.Code)
}
return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil
}
func GetParamType(actCode cid.Cid, method abi.MethodNum) (cbg.CBORUnmarshaler, error) {
m, found := MethodsMap[actCode][method]
if !found {
return nil, fmt.Errorf("unknown method %d for actor %s", method, actCode)
}
return reflect.New(m.Params.Elem()).Interface().(cbg.CBORUnmarshaler), nil
}
func minerHasMinPower(ctx context.Context, sm *StateManager, addr address.Address, ts *types.TipSet) (bool, error) {
pact, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
return false, xerrors.Errorf("loading power actor state: %w", err)
}
ps, err := power.Load(sm.cs.ActorStore(ctx), pact)
if err != nil {
return false, err
}
return ps.MinerNominalPowerMeetsConsensusMinimum(addr)
}
func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Address, baseTs *types.TipSet, lookbackTs *types.TipSet) (bool, error) {
hmp, err := minerHasMinPower(ctx, sm, addr, lookbackTs)
// TODO: We're blurring the lines between a "runtime network version" and a "Lotus upgrade epoch", is that unavoidable?
if sm.GetNtwkVersion(ctx, baseTs.Height()) <= network.Version3 {
return hmp, err
}
if err != nil {
return false, err
}
if !hmp {
return false, nil
}
// Post actors v2, also check MinerEligibleForElection with base ts
pact, err := sm.LoadActor(ctx, power.Address, baseTs)
if err != nil {
return false, xerrors.Errorf("loading power actor state: %w", err)
}
pstate, err := power.Load(sm.cs.ActorStore(ctx), pact)
if err != nil {
return false, err
}
mact, err := sm.LoadActor(ctx, addr, baseTs)
if err != nil {
return false, xerrors.Errorf("loading miner actor state: %w", err)
}
mstate, err := miner.Load(sm.cs.ActorStore(ctx), mact)
if err != nil {
return false, err
}
// Non-empty power claim.
if claim, found, err := pstate.MinerPower(addr); err != nil {
return false, err
} else if !found {
return false, err
} else if claim.QualityAdjPower.LessThanEqual(big.Zero()) {
return false, err
}
// No fee debt.
if debt, err := mstate.FeeDebt(); err != nil {
return false, err
} else if !debt.IsZero() {
return false, err
}
// No active consensus faults.
if mInfo, err := mstate.Info(); err != nil {
return false, err
} else if baseTs.Height() <= mInfo.ConsensusFaultElapsed {
return false, nil
}
return true, nil
}
func CheckTotalFIL(ctx context.Context, sm *StateManager, ts *types.TipSet) (abi.TokenAmount, error) {
str, err := state.LoadStateTree(sm.ChainStore().ActorStore(ctx), ts.ParentState())
if err != nil {
return abi.TokenAmount{}, err
}
sum := types.NewInt(0)
err = str.ForEach(func(a address.Address, act *types.Actor) error {
sum = types.BigAdd(sum, act.Balance)
return nil
})
if err != nil {
return abi.TokenAmount{}, err
}
return sum, nil
}
func MakeMsgGasCost(msg *types.Message, ret *vm.ApplyRet) api.MsgGasCost {
return api.MsgGasCost{
Message: msg.Cid(),
GasUsed: big.NewInt(ret.GasUsed),
BaseFeeBurn: ret.GasCosts.BaseFeeBurn,
OverEstimationBurn: ret.GasCosts.OverEstimationBurn,
MinerPenalty: ret.GasCosts.MinerPenalty,
MinerTip: ret.GasCosts.MinerTip,
Refund: ret.GasCosts.Refund,
TotalCost: big.Sub(msg.RequiredFunds(), ret.GasCosts.Refund),
}
}
| [
"\"LOTUS_IGNORE_DRAND\""
]
| []
| [
"LOTUS_IGNORE_DRAND"
]
| [] | ["LOTUS_IGNORE_DRAND"] | go | 1 | 0 | |
venv/Lib/site-packages/huggingface_hub/hub_mixin.py | import json
import logging
import os
from typing import Dict, Optional
import requests
from .constants import CONFIG_NAME, PYTORCH_WEIGHTS_NAME
from .file_download import cached_download, hf_hub_url, is_torch_available
from .hf_api import HfApi, HfFolder
from .repository import Repository
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
class ModelHubMixin(object):
def __init__(self, *args, **kwargs):
"""
Mix this class with your torch-model class for ease process of saving & loading from huggingface-hub
Example::
>>> from huggingface_hub import ModelHubMixin
>>> class MyModel(nn.Module, ModelHubMixin):
... def __init__(self, **kwargs):
... super().__init__()
... self.config = kwargs.pop("config", None)
... self.layer = ...
... def forward(self, ...)
... return ...
>>> model = MyModel()
>>> model.save_pretrained("mymodel", push_to_hub=False) # Saving model weights in the directory
>>> model.push_to_hub("mymodel", "model-1") # Pushing model-weights to hf-hub
>>> # Downloading weights from hf-hub & model will be initialized from those weights
>>> model = MyModel.from_pretrained("username/mymodel@main")
"""
def save_pretrained(
self,
save_directory: str,
config: Optional[dict] = None,
push_to_hub: bool = False,
**kwargs,
):
"""
Saving weights in local directory.
Parameters:
save_directory (:obj:`str`):
Specify directory in which you want to save weights.
config (:obj:`dict`, `optional`):
specify config (must be dict) incase you want to save it.
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set it to `True` in case you want to push your weights to huggingface_hub
model_id (:obj:`str`, `optional`, defaults to :obj:`save_directory`):
Repo name in huggingface_hub. If not specified, repo name will be same as `save_directory`
kwargs (:obj:`Dict`, `optional`):
kwargs will be passed to `push_to_hub`
"""
os.makedirs(save_directory, exist_ok=True)
# saving config
if isinstance(config, dict):
path = os.path.join(save_directory, CONFIG_NAME)
with open(path, "w") as f:
json.dump(config, f)
# saving model weights
path = os.path.join(save_directory, PYTORCH_WEIGHTS_NAME)
self._save_pretrained(path)
if push_to_hub:
return self.push_to_hub(save_directory, **kwargs)
def _save_pretrained(self, path):
"""
Overwrite this method in case you don't want to save complete model, rather some specific layers
"""
model_to_save = self.module if hasattr(self, "module") else self
torch.save(model_to_save.state_dict(), path)
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path: Optional[str],
strict: bool = True,
map_location: Optional[str] = "cpu",
force_download: bool = False,
resume_download: bool = False,
proxies: Dict = None,
use_auth_token: Optional[str] = None,
cache_dir: Optional[str] = None,
local_files_only: bool = False,
**model_kwargs,
):
r"""
Instantiate a pretrained pytorch model from a pre-trained model configuration from huggingface-hub.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To
train the model, you should first set it back in training mode with ``model.train()``.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`, `optional`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- You can add `revision` by appending `@` at the end of model_id simply like this: ``dbmdz/bert-base-german-cased@main``
Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id,
since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
cache_dir (:obj:`Union[str, os.PathLike]`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (i.e., do not try to download the model).
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
model_kwargs (:obj:`Dict`, `optional`)::
model_kwargs will be passed to the model during initialization
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
"""
model_id = pretrained_model_name_or_path
map_location = torch.device(map_location)
revision = None
if len(model_id.split("@")) == 2:
model_id, revision = model_id.split("@")
if model_id in os.listdir() and CONFIG_NAME in os.listdir(model_id):
config_file = os.path.join(model_id, CONFIG_NAME)
else:
try:
config_url = hf_hub_url(
model_id, filename=CONFIG_NAME, revision=revision
)
config_file = cached_download(
config_url,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
except requests.exceptions.RequestException:
logger.warning("config.json NOT FOUND in HuggingFace Hub")
config_file = None
if model_id in os.listdir():
print("LOADING weights from local directory")
model_file = os.path.join(model_id, PYTORCH_WEIGHTS_NAME)
else:
model_url = hf_hub_url(
model_id, filename=PYTORCH_WEIGHTS_NAME, revision=revision
)
model_file = cached_download(
model_url,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
if config_file is not None:
with open(config_file, "r", encoding="utf-8") as f:
config = json.load(f)
model_kwargs.update({"config": config})
model = cls(**model_kwargs)
state_dict = torch.load(model_file, map_location=map_location)
model.load_state_dict(state_dict, strict=strict)
model.eval()
return model
@staticmethod
def push_to_hub(
save_directory: Optional[str],
model_id: Optional[str] = None,
repo_url: Optional[str] = None,
commit_message: Optional[str] = "add model",
organization: Optional[str] = None,
private: bool = None,
) -> str:
"""
Parameters:
save_directory (:obj:`Union[str, os.PathLike]`):
Directory having model weights & config.
model_id (:obj:`str`, `optional`, defaults to :obj:`save_directory`):
Repo name in huggingface_hub. If not specified, repo name will be same as `save_directory`
repo_url (:obj:`str`, `optional`):
Specify this in case you want to push to existing repo in hub.
organization (:obj:`str`, `optional`):
Organization in which you want to push your model.
private (:obj:`bool`, `optional`):
private: Whether the model repo should be private (requires a paid huggingface.co account)
commit_message (:obj:`str`, `optional`, defaults to :obj:`add model`):
Message to commit while pushing
Returns:
url to commit on remote repo.
"""
if model_id is None:
model_id = save_directory
token = HfFolder.get_token()
if repo_url is None:
repo_url = HfApi().create_repo(
token,
model_id,
organization=organization,
private=private,
repo_type=None,
exist_ok=True,
)
repo = Repository(save_directory, clone_from=repo_url, use_auth_token=token)
return repo.push_to_hub(commit_message=commit_message)
| []
| []
| []
| [] | [] | python | null | null | null |
starknet_py/utils/crypto/facade.py | import functools
import os
from typing import List, Callable, Iterable, Optional
from dataclasses import dataclass
from starkware.cairo.common.hash_state import compute_hash_on_elements
from starkware.cairo.lang.vm.crypto import pedersen_hash as default_hash
from starkware.crypto.signature.signature import sign
from crypto_cpp_py.cpp_bindings import (
cpp_hash,
get_cpp_lib_file,
ECSignature,
)
def sign_calldata(calldata: Iterable[int], priv_key: int):
"""
Helper function that signs hash:
hash = pedersen_hash(calldata[0], 0)
hash = pedersen_hash(calldata[1], hash)
hash = pedersen_hash(calldata[2], hash)
...
:param calldata: iterable of ints
:param priv_key: private key
:return: signed calldata's hash
"""
hashed_calldata = functools.reduce(lambda x, y: pedersen_hash(y, x), calldata, 0)
return message_signature(hashed_calldata, priv_key)
# PREFIX_TRANSACTION = 'StarkNet Transaction'
PREFIX_TRANSACTION = 476441609247967894954472788179128007176248455022
@dataclass(frozen=True)
class Call:
to_addr: int
selector: int
calldata: List[int]
@dataclass(frozen=True)
class MultiCall:
account: int
calls: Iterable[Call]
nonce: int
max_fee: int = 0
version: int = 0
# pylint: disable=too-many-arguments
def hash_multicall_with(
multi_call: MultiCall,
hash_fun: Callable[[int, int], int],
) -> int:
"""
Mimics the behavior of
https://github.com/argentlabs/cairo-contracts/blob/c2ff198e5de5b19514d99ecff604a7cbf3377d2f/contracts/Account.cairo#L248
"""
calls_hash = compute_hash_on_elements(
[hash_call_with(c, hash_fun=hash_fun) for c in multi_call.calls],
hash_func=hash_fun,
)
return compute_hash_on_elements(
[
PREFIX_TRANSACTION,
multi_call.account,
calls_hash,
multi_call.nonce,
multi_call.max_fee,
multi_call.version,
],
hash_func=hash_fun,
)
def hash_call_with(call: Call, hash_fun):
return compute_hash_on_elements(
[
call.to_addr,
call.selector,
compute_hash_on_elements(
call.calldata,
hash_func=hash_fun,
),
]
)
# Interface
def use_cpp_variant() -> bool:
force_disable_ext = (
os.getenv("DISABLE_CRYPTO_C_EXTENSION", "false").lower() == "true"
)
cpp_lib_file = get_cpp_lib_file()
return not force_disable_ext and bool(cpp_lib_file)
def message_signature(msg_hash, priv_key, seed: Optional[int] = 32) -> ECSignature:
# TODO: When sign from crypto-cpp is faster, uncomment this section # pylint: disable=fixme
# if use_cpp_variant():
# return cpp_sign(msg_hash, priv_key, seed)
return sign(msg_hash, priv_key, seed)
def pedersen_hash(left: int, right: int) -> int:
if use_cpp_variant():
return cpp_hash(left, right)
return default_hash(left, right)
def hash_multicall(multi_call: MultiCall) -> int:
return hash_multicall_with(
multi_call=multi_call,
hash_fun=pedersen_hash,
)
| []
| []
| [
"DISABLE_CRYPTO_C_EXTENSION"
]
| [] | ["DISABLE_CRYPTO_C_EXTENSION"] | python | 1 | 0 | |
client/foundries.go | package client
import (
"bytes"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
canonical "github.com/docker/go/canonical/json"
"github.com/sirupsen/logrus"
tuf "github.com/theupdateframework/notary/tuf/data"
)
type Config struct {
Factory string
Token string
ClientCredentials OAuthConfig
ExtraHeaders map[string]string
}
type Api struct {
serverUrl string
config Config
client http.Client
}
type CaCerts struct {
RootCrt string `json:"root-crt"`
CaCrt string `json:"ca-crt"`
CaCsr string `json:"ca-csr"`
TlsCrt string `json:"tls-crt"`
TlsCsr string `json:"tls-csr"`
CreateCaScript *string `json:"create_ca"`
CreateDeviceCaScript *string `json:"create_device_ca"`
SignCaScript *string `json:"sign_ca_csr"`
SignTlsScript *string `json:"sign_tls_csr"`
}
type ConfigFile struct {
Name string `json:"name"`
Value string `json:"value"`
Unencrypted bool `json:"unencrypted"`
OnChanged []string `json:"on-changed,omitempty"`
}
type ConfigCreateRequest struct {
Reason string `json:"reason"`
Files []ConfigFile `json:"files"`
}
type DeviceConfig struct {
CreatedAt string `json:"created-at"`
AppliedAt string `json:"applied-at"` // This is not present in factory config
Reason string `json:"reason"`
Files []ConfigFile `json:"files"`
}
type DeviceConfigList struct {
Configs []DeviceConfig `json:"config"`
Total int `json:"total"`
Next *string `json:"next"`
}
type NetInfo struct {
Hostname string `json:"hostname"`
Ipv4 string `json:"local_ipv4"`
MAC string `json:"mac"`
}
type Update struct {
CorrelationId string `json:"correlation-id"`
Target string `json:"target"`
Version string `json:"version"`
Time string `json:"time"`
}
type UpdateList struct {
Updates []Update `json:"updates"`
Total int `json:"total"`
Next *string `json:"next"`
}
type EventType struct {
Id string `json:"id"`
}
type EventDetail struct {
Version string `json:"version"`
TargetName string `json:"targetName"`
Success *bool `json:"success,omitempty"`
Details string `json:"details"`
}
type UpdateEvent struct {
Time string `json:"deviceTime"`
Type EventType `json:"eventType"`
Detail EventDetail `json:"event"`
}
type DeviceGroup struct {
Id int `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
CreatedAt string `json:"created-at"`
}
type Device struct {
Uuid string `json:"uuid"`
Name string `json:"name"`
Owner string `json:"owner"`
Factory string `json:"factory"`
GroupName string `json:"device-group"` // Returned in List API
Group *DeviceGroup `json:"group"` // Returned in Get API
CreatedAt string `json:"created-at"`
LastSeen string `json:"last-seen"`
OstreeHash string `json:"ostree-hash"`
DockerApps []string `json:"docker-apps,omitempty"`
Tag string `json:"tag,omitempty"`
Network *NetInfo `json:"network-info,omitempty"`
Hardware *json.RawMessage `json:"hardware-info,omitempty"`
TargetName string `json:"target-name"`
Status string `json:"status"`
CurrentUpdate string `json:"current-update"`
UpToDate bool `json:"up-to-date"`
PublicKey string `json:"public-key"`
ActiveConfig *DeviceConfig `json:"active-config,omitempty"`
AktualizrToml string `json:"aktualizr-toml,omitempty"`
IsProd bool `json:"is-prod"`
IsWave bool `json:"is-wave"`
Secondaries []struct {
Serial string `json:"serial"`
TargetName string `json:"target-name"`
HardwareId string `json:"hardware-id"`
} `json:"secondary-ecus"`
}
type DeviceList struct {
Devices []Device `json:"devices"`
Total int `json:"total"`
Next *string `json:"next"`
}
type DockerApp struct {
FileName string `json:"filename"`
Uri string `json:"uri"`
}
type ComposeApp struct {
Uri string `json:"uri"`
}
func (a ComposeApp) Hash() string {
parts := strings.SplitN(a.Uri, "@sha256:", 2)
return parts[len(parts)-1]
}
type FactoryUser struct {
PolisId string `json:"polis-id"`
Name string `json:"name"`
Role string `json:"role"`
}
type JobservRun struct {
Name string `json:"name"`
Url string `json:"url"`
Artifacts []string `json:"artifacts"`
}
type TargetStatus struct {
Version int `json:"version"`
Devices int `json:"devices"`
Reinstalling int `json:"(re-)installing"`
IsOrphan bool `json:"is-orphan"`
}
type DeviceGroupStatus struct {
Name string `json:"name"`
DevicesTotal int `json:"devices-total"`
DevicesOnline int `json:"devices-online"`
DevicesOnLatest int `json:"devices-on-latest"`
DevicesOnOrphan int `json:"devices-on-orphan"`
Reinstalling int `json:"(re-)installing"`
}
type TagStatus struct {
Name string `json:"name"`
DevicesTotal int `json:"devices-total"`
DevicesOnline int `json:"devices-online"`
DevicesOnLatest int `json:"devices-on-latest"`
DevicesOnOrphan int `json:"devices-on-orphan"`
LatestTarget int `json:"latest-target"`
Targets []TargetStatus `json:"targets"`
DeviceGroups []DeviceGroupStatus `json:"device-groups"`
}
type FactoryStatus struct {
TotalDevices int `json:"total-devices"`
Tags []TagStatus `json:"tags"`
ProdTags []TagStatus `json:"prod-tags"`
ProdWaveTags []TagStatus `json:"wave-tags"`
}
type ProjectSecret struct {
Name string `json:"name"`
Value *string `json:"value"`
}
type ProjectTrigger struct {
Type string `json:"type"`
Id int `json:"id,omitempty"`
Secrets []ProjectSecret `json:"secrets"`
}
type TufCustom struct {
HardwareIds []string `json:"hardwareIds,omitempty"`
Tags []string `json:"tags,omitempty"`
TargetFormat string `json:"targetFormat,omitempty"`
Version string `json:"version,omitempty"`
ComposeApps map[string]ComposeApp `json:"docker_compose_apps,omitempty"`
Name string `json:"name,omitempty"`
ContainersSha string `json:"containers-sha,omitempty"`
LmpManifestSha string `json:"lmp-manifest-sha,omitempty"`
OverridesSha string `json:"meta-subscriber-overrides-sha,omitempty"`
}
// ota-tuf serializes root.json differently from Notary. The key representation
// and signing algoritms differ slightly. These Ats* structs provide an
// an implementation compatible with ota-tuf and libaktualizr.
type AtsKeyVal struct {
Public string `json:"public,omitempty"`
Private string `json:"private,omitempty"`
}
type AtsKey struct {
KeyType string `json:"keytype"`
KeyValue AtsKeyVal `json:"keyval"`
}
func (k AtsKey) KeyID() (string, error) {
bytes, err := canonical.MarshalCanonical(k)
if err != nil {
return "", nil
}
return fmt.Sprintf("%x", sha256.Sum256(bytes)), nil
}
type AtsRootMeta struct {
tuf.SignedCommon
Consistent bool `json:"consistent_snapshot"`
Keys map[string]AtsKey `json:"keys"`
Roles map[tuf.RoleName]*tuf.RootRole `json:"roles"`
}
type AtsTufRoot struct {
// A non-standard targets-signatures field allows to make an atomic key rotation
TargetsSignatures map[string][]tuf.Signature `json:"targets-signatures,omitempty"`
Signatures []tuf.Signature `json:"signatures"`
Signed AtsRootMeta `json:"signed"`
}
type AtsTargetsMeta struct {
tuf.SignedCommon
Targets tuf.Files `json:"targets"`
// omitempty below in tuf package doesn't work, because it's not a reference type
// Delegations tuf.Delegations `json:"delegations,omitempty"` // unnecessary
}
type AtsTufTargets struct {
Signatures []tuf.Signature `json:"signatures"`
Signed AtsTargetsMeta `json:"signed"`
}
type ComposeAppContent struct {
Files []string `json:"files"`
ComposeSpec map[string]interface{} `json:"compose_spec"`
}
type ComposeAppBundle struct {
Uri string `json:"uri"`
Error string `json:"error"`
Warnings []string `json:"warnings"`
Manifest map[string]interface{} `json:"manifest"`
Content ComposeAppContent `json:"content"`
}
type TargetTestResults struct {
Name string `json:"name"`
Status string `json:"status"`
Details string `json:"details"`
}
type TargetTest struct {
Name string `json:"name"`
Id string `json:"test-id"`
DeviceUUID string `json:"device-uuid"`
DeviceName string `json:"device-name"`
Status string `json:"status"`
Details string `json:"details"`
CreatedOn float32 `json:"created-on"`
CompletedOn float32 `json:"completed-on"`
Results []TargetTestResults `json:"results"`
Artifacts []string `json:"artifacts"`
}
type TargetTestList struct {
Tests []TargetTest `json:"tests"`
Total int `json:"total"`
Next *string `json:"next"`
}
type WaveRolloutGroupRef struct {
GroupId int `json:"group-id"`
GroupName string `json:"group-name"`
CreatedAt string `json:"created-at"`
}
type Wave struct {
Name string `json:"name"`
Version string `json:"version"`
Tag string `json:"tag"`
Targets *json.RawMessage `json:"targets"`
CreatedAt string `json:"created-at"`
FinishedAt string `json:"finished-at"`
Status string `json:"status"`
RolloutGroups map[string]WaveRolloutGroupRef `json:"rollout-groups"`
}
type WaveCreate struct {
Name string `json:"name"`
Version string `json:"version"`
Tag string `json:"tag"`
Targets tuf.Signed `json:"targets"`
}
type WaveRolloutOptions struct {
Group string `json:"group"`
}
type RolloutGroupStatus struct {
Name string `json:"name"`
RolloutAt string `json:"rollout-at"`
DevicesTotal int `json:"devices-total"`
DevicesOnline int `json:"devices-online"`
DevicesOnWave int `json:"devices-on-wave-version"`
DevicesOnNewer int `json:"devices-on-newer-version"`
DevicesOnOlder int `json:"devices-on-older-version"`
Targets []TargetStatus `json:"targets"`
}
type WaveStatus struct {
Name string `json:"name"`
Version int `json:"version"`
Tag string `json:"tag"`
Status string `json:"status"`
CreatedAt string `json:"created-at"`
FinishedAt string `json:"finished-at"`
TotalDevices int `json:"total-devices"`
UpdatedDevices int `json:"updated-devices"`
ScheduledDevices int `json:"scheduled-devices"`
UnscheduledDevices int `json:"unscheduled-devices"`
RolloutGroups []RolloutGroupStatus `json:"rollout-groups"`
OtherGroups []RolloutGroupStatus `json:"other-groups"`
}
type WireGuardIp struct {
Name string `json:"name"`
Ip string `json:"ip"`
Enabled bool `json:"enabled"`
}
// This is an error returned in case if we've successfully received an HTTP response which contains
// an unexpected HTTP status code
type HttpError struct {
Message string
Response *http.Response
}
func (err *HttpError) Error() string {
return err.Message
}
// This is much better than err.(HttpError) as it also accounts for wrapped errors.
func AsHttpError(err error) *HttpError {
var httpError *HttpError
if errors.As(err, &httpError) {
return httpError
} else {
return nil
}
}
func (d Device) Online(inactiveHoursThreshold int) bool {
if len(d.LastSeen) == 0 {
return false
}
t, err := time.Parse("2006-01-02T15:04:05+00:00", d.LastSeen)
if err == nil {
duration := time.Since(t)
if duration.Hours() > float64(inactiveHoursThreshold) {
return false
}
} else {
logrus.Error(err)
return false
}
return true
}
func NewApiClient(serverUrl string, config Config, caCertPath string) *Api {
if len(caCertPath) > 0 {
rootCAs, _ := x509.SystemCertPool()
if rootCAs == nil {
rootCAs = x509.NewCertPool()
}
certs, err := ioutil.ReadFile(caCertPath)
if err != nil {
logrus.Fatalf("Failed to append %q to RootCAs: %v", caCertPath, err)
}
if ok := rootCAs.AppendCertsFromPEM(certs); !ok {
logrus.Warning("No certs appended, using system certs only")
}
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
RootCAs: rootCAs,
}
}
api := Api{
serverUrl: strings.TrimRight(serverUrl, "/"),
config: config,
client: *http.DefaultClient,
}
return &api
}
func httpLogger(req *http.Request) logrus.FieldLogger {
return logrus.WithFields(logrus.Fields{"url": req.URL.String(), "method": req.Method})
}
func readResponse(res *http.Response, log logrus.FieldLogger) (*[]byte, error) {
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Debugf("I/O error reading response: %s", err)
return nil, err
}
// Accept all "normal" successful status codes: 200, 201, 202, 204, excluding quite inappropriate
// for RESTful web services 203, 205, and 206. There are some preferences what to return for
// each operation, but a client side normally should not fail if e.g. a POST returns 200, 202, or
// 204 instead of a usual 201. There are use cases when each of those status codes is valid and
// should be treated as a success. Though there might be some differences how that success is
// handled by a higher-level logic.
switch res.StatusCode {
case 200:
case 201:
case 202:
case 204:
break
default:
var PRINT_LIMIT, DEBUG_LIMIT int = 512, 8196
errBody := (string)(body)
if len(body) > DEBUG_LIMIT {
// too much is too much, even for a debug message
errBody = fmt.Sprintf("%s...(truncated body over %d)", body[:DEBUG_LIMIT], DEBUG_LIMIT)
}
log.Debugf("HTTP error received %s", res.Status)
log.Debugf(errBody)
// Still return a body, a caller might need it, but also return an error
msg := fmt.Sprintf("HTTP error during %s '%s': %s",
res.Request.Method, res.Request.URL.String(), res.Status)
if len(body) < PRINT_LIMIT {
// return an error response body up to a meaningful limit - if it spans beyond a few
// lines, need to find a more appropriate message.
msg = fmt.Sprintf("%s\n=%s", msg, body)
}
err = &HttpError{msg, res}
}
return &body, err
}
func parseJobServResponse(resp *[]byte, err error, runName string) (string, string, error) {
if err != nil {
return "", "", err
}
type PatchResp struct {
JobServUrl string `json:"jobserv-url"`
WebUrl string `json:"web-url"`
}
pr := PatchResp{}
if err := json.Unmarshal(*resp, &pr); err != nil {
return "", "", err
}
return pr.JobServUrl + fmt.Sprintf("runs/%s/console.log", runName), pr.WebUrl, nil
}
func (a *Api) setReqHeaders(req *http.Request, jsonContent bool) {
req.Header.Set("User-Agent", "fioctl-2")
if len(a.config.Token) > 0 {
logrus.Debug("Using API token for http request")
headerName := os.Getenv("TOKEN_HEADER")
if len(headerName) == 0 {
headerName = "OSF-TOKEN"
}
req.Header.Set(headerName, a.config.Token)
}
for k, v := range a.config.ExtraHeaders {
logrus.Debugf("Setting extra HTTP header %s=%s", k, v)
req.Header.Set(k, v)
}
if len(a.config.ClientCredentials.AccessToken) > 0 {
logrus.Debug("Using oauth token for http request")
tok := base64.StdEncoding.EncodeToString([]byte(a.config.ClientCredentials.AccessToken))
req.Header.Set("Authorization", "Bearer "+tok)
}
if jsonContent {
req.Header.Set("Content-Type", "application/json")
}
}
func (a *Api) GetOauthConfig() OAuthConfig {
return a.config.ClientCredentials
}
func (a *Api) RawGet(url string, headers *map[string]string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
a.setReqHeaders(req, false)
if headers != nil {
for key, val := range *headers {
req.Header.Set(key, val)
}
}
return a.client.Do(req)
}
func (a *Api) Get(url string) (*[]byte, error) {
res, err := a.RawGet(url, nil)
log := logrus.WithFields(logrus.Fields{"url": url, "method": "GET"})
if err != nil {
log.Debugf("Network Error: %s", err)
return nil, err
}
return readResponse(res, log)
}
func (a *Api) Patch(url string, data []byte) (*[]byte, error) {
req, err := http.NewRequest(http.MethodPatch, url, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
a.setReqHeaders(req, true)
log := httpLogger(req)
res, err := a.client.Do(req)
if err != nil {
log.Debugf("Network Error: %s", err)
return nil, err
}
return readResponse(res, log)
}
func (a *Api) Post(url string, data []byte) (*[]byte, error) {
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
a.setReqHeaders(req, true)
log := httpLogger(req)
res, err := a.client.Do(req)
if err != nil {
log.Debugf("Network Error: %s", err)
return nil, err
}
return readResponse(res, log)
}
func (a *Api) Put(url string, data []byte) (*[]byte, error) {
req, err := http.NewRequest(http.MethodPut, url, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
a.setReqHeaders(req, true)
log := httpLogger(req)
res, err := a.client.Do(req)
if err != nil {
log.Debugf("Network Error: %s", err)
return nil, err
}
return readResponse(res, log)
}
func (a *Api) Delete(url string, data []byte) (*[]byte, error) {
req, err := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
a.setReqHeaders(req, true)
log := httpLogger(req)
res, err := a.client.Do(req)
if err != nil {
log.Debugf("Network Error: %s", err)
return nil, err
}
return readResponse(res, log)
}
func (a *Api) DeviceGet(device string) (*Device, error) {
body, err := a.Get(a.serverUrl + "/ota/devices/" + device + "/")
if err != nil {
return nil, err
}
d := Device{}
err = json.Unmarshal(*body, &d)
if err != nil {
return nil, err
}
return &d, nil
}
func (a *Api) DeviceList(mine bool, matchTag, byFactory, byGroup, nameIlike, uuid string, page, limit int) (*DeviceList, error) {
mineInt := 0
if mine {
mineInt = 1
}
url := a.serverUrl + "/ota/devices/?"
url += fmt.Sprintf(
"mine=%d&match_tag=%s&name_ilike=%s&factory=%s&uuid=%s&group=%s&page=%d&limit=%d",
mineInt, matchTag, nameIlike, byFactory, uuid, byGroup, page, limit)
logrus.Debugf("DeviceList with url: %s", url)
return a.DeviceListCont(url)
}
func (a *Api) DeviceListCont(url string) (*DeviceList, error) {
body, err := a.Get(url)
if err != nil {
return nil, err
}
devices := DeviceList{}
err = json.Unmarshal(*body, &devices)
if err != nil {
return nil, err
}
return &devices, nil
}
func (a *Api) DeviceChown(name, owner string) error {
body := map[string]string{"owner": owner}
data, err := json.Marshal(body)
if err != nil {
return err
}
_, err = a.Patch(a.serverUrl+"/ota/devices/"+name+"/", data)
return err
}
func (a *Api) DeviceRename(curName, newName string) error {
body := map[string]string{"name": newName}
data, err := json.Marshal(body)
if err != nil {
return err
}
_, err = a.Patch(a.serverUrl+"/ota/devices/"+curName+"/", data)
return err
}
func (a *Api) DeviceSetGroup(device string, group string) error {
body := map[string]string{"group": group}
data, err := json.Marshal(body)
if err != nil {
return err
}
_, err = a.Patch(a.serverUrl+"/ota/devices/"+device+"/", data)
return err
}
func (a *Api) DeviceDelete(device string) error {
bytes := []byte{}
_, err := a.Delete(a.serverUrl+"/ota/devices/"+device+"/", bytes)
return err
}
func (a *Api) DeviceListUpdates(device string) (*UpdateList, error) {
return a.DeviceListUpdatesCont(a.serverUrl + "/ota/devices/" + device + "/updates/")
}
func (a *Api) DeviceListUpdatesCont(url string) (*UpdateList, error) {
body, err := a.Get(url)
if err != nil {
return nil, err
}
updates := UpdateList{}
err = json.Unmarshal(*body, &updates)
if err != nil {
return nil, err
}
return &updates, nil
}
func (a *Api) DeviceUpdateEvents(device, correlationId string) ([]UpdateEvent, error) {
var events []UpdateEvent
body, err := a.Get(a.serverUrl + "/ota/devices/" + device + "/updates/" + correlationId + "/")
if err != nil {
return nil, err
}
err = json.Unmarshal(*body, &events)
if err != nil {
return events, err
}
return events, nil
}
func (a *Api) DeviceCreateConfig(device string, cfg ConfigCreateRequest) error {
data, err := json.Marshal(cfg)
if err != nil {
return err
}
url := a.serverUrl + "/ota/devices/" + device + "/config/"
logrus.Debug("Creating new device config")
_, err = a.Post(url, data)
return err
}
func (a *Api) DevicePatchConfig(device string, cfg ConfigCreateRequest, force bool) error {
data, err := json.Marshal(cfg)
if err != nil {
return err
}
url := a.serverUrl + "/ota/devices/" + device + "/config/"
if force {
url += "?force=1"
}
logrus.Debug("Patching device config")
_, err = a.Patch(url, data)
return err
}
func (a *Api) DeviceListConfig(device string) (*DeviceConfigList, error) {
url := a.serverUrl + "/ota/devices/" + device + "/config/"
logrus.Debugf("DeviceListConfig with url: %s", url)
return a.DeviceListConfigCont(url)
}
func (a *Api) DeviceListConfigCont(url string) (*DeviceConfigList, error) {
body, err := a.Get(url)
if err != nil {
return nil, err
}
config := DeviceConfigList{}
err = json.Unmarshal(*body, &config)
if err != nil {
return nil, err
}
return &config, nil
}
func (a *Api) DeviceDeleteConfig(device, filename string) error {
url := a.serverUrl + "/ota/devices/" + device + "/config/" + filename + "/"
logrus.Debugf("Deleting config file: %s", url)
_, err := a.Delete(url, nil)
return err
}
func (a *Api) FactoryCreateConfig(factory string, cfg ConfigCreateRequest) error {
data, err := json.Marshal(cfg)
if err != nil {
return err
}
url := a.serverUrl + "/ota/factories/" + factory + "/config/"
logrus.Debug("Creating new factory config")
_, err = a.Post(url, data)
return err
}
func (a *Api) FactoryDeleteConfig(factory, filename string) error {
url := a.serverUrl + "/ota/factories/" + factory + "/config/" + filename + "/"
logrus.Debugf("Deleting config file: %s", url)
_, err := a.Delete(url, nil)
return err
}
func (a *Api) FactoryPatchConfig(factory string, cfg ConfigCreateRequest, force bool) error {
data, err := json.Marshal(cfg)
if err != nil {
return err
}
url := a.serverUrl + "/ota/factories/" + factory + "/config/"
if force {
url += "?force=1"
}
logrus.Debug("Creating new factory config")
_, err = a.Patch(url, data)
return err
}
func (a *Api) FactoryListConfig(factory string) (*DeviceConfigList, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/config/"
logrus.Debugf("FactoryListConfig with url: %s", url)
return a.FactoryListConfigCont(url)
}
func (a *Api) FactoryListConfigCont(url string) (*DeviceConfigList, error) {
// A short cut as it behaves just the same
return a.DeviceListConfigCont(url)
}
func (a *Api) GroupCreateConfig(factory, group string, cfg ConfigCreateRequest) error {
data, err := json.Marshal(cfg)
if err != nil {
return err
}
url := a.serverUrl + "/ota/factories/" + factory + "/device-groups/" + group + "/config/"
logrus.Debug("Creating new device group config")
_, err = a.Post(url, data)
return err
}
func (a *Api) GroupDeleteConfig(factory, group, filename string) error {
url := a.serverUrl + "/ota/factories/" + factory + "/device-groups/" + group + "/config/" + filename + "/"
logrus.Debugf("Deleting config file: %s", url)
_, err := a.Delete(url, nil)
return err
}
func (a *Api) GroupPatchConfig(factory, group string, cfg ConfigCreateRequest, force bool) error {
data, err := json.Marshal(cfg)
if err != nil {
return err
}
url := a.serverUrl + "/ota/factories/" + factory + "/device-groups/" + group + "/config/"
if force {
url += "?force=1"
}
logrus.Debug("Creating new device group config")
_, err = a.Patch(url, data)
return err
}
func (a *Api) GroupListConfig(factory, group string) (*DeviceConfigList, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/device-groups/" + group + "/config/"
logrus.Debugf("GroupListConfig with url: %s", url)
return a.GroupListConfigCont(url)
}
func (a *Api) GroupListConfigCont(url string) (*DeviceConfigList, error) {
// A short cut as it behaves just the same
return a.DeviceListConfigCont(url)
}
func (a *Api) FactoryStatus(factory string, inactiveThreshold int) (*FactoryStatus, error) {
url := fmt.Sprintf("%s/ota/factories/%s/status/?offline-threshold=%d", a.serverUrl, factory, inactiveThreshold)
logrus.Debugf("FactoryStatus with url: %s", url)
body, err := a.Get(url)
if err != nil {
return nil, err
}
s := FactoryStatus{}
err = json.Unmarshal(*body, &s)
if err != nil {
return nil, err
}
return &s, nil
}
func (a *Api) FactoryCreateDeviceGroup(factory string, name string, description *string) (*DeviceGroup, error) {
body := map[string]string{"name": name}
if description != nil {
body["description"] = *description
}
data, err := json.Marshal(body)
if err != nil {
return nil, err
}
url := a.serverUrl + "/ota/factories/" + factory + "/device-groups/"
logrus.Debugf("Creating new factory device group: %s", url)
resp, err := a.Post(url, data)
if err != nil {
if herr := AsHttpError(err); herr != nil && herr.Response.StatusCode == 409 {
err = fmt.Errorf("A device group with this name already exists")
}
return nil, err
}
grp := DeviceGroup{}
err = json.Unmarshal(*resp, &grp)
if err != nil {
return nil, err
}
return &grp, nil
}
func (a *Api) FactoryDeleteDeviceGroup(factory string, name string) error {
url := a.serverUrl + "/ota/factories/" + factory + "/device-groups/" + name + "/"
logrus.Debugf("Deleting factory device group: %s", url)
_, err := a.Delete(url, nil)
if herr := AsHttpError(err); herr != nil && herr.Response.StatusCode == 409 {
err = fmt.Errorf("There are devices assigned to this device group")
}
return err
}
func (a *Api) FactoryPatchDeviceGroup(factory string, name string, new_name *string, new_desc *string) error {
body := map[string]string{}
if new_name != nil {
body["name"] = *new_name
}
if new_desc != nil {
body["description"] = *new_desc
}
data, err := json.Marshal(body)
if err != nil {
return err
}
url := a.serverUrl + "/ota/factories/" + factory + "/device-groups/" + name + "/"
logrus.Debugf("Updating factory device group :%s", url)
_, err = a.Patch(url, data)
if herr := AsHttpError(err); herr != nil && herr.Response.StatusCode == 409 {
err = fmt.Errorf("A device group with this name already exists")
}
return err
}
func (a *Api) FactoryListDeviceGroup(factory string) (*[]DeviceGroup, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/device-groups/"
logrus.Debugf("Fetching factory device groups: %s", url)
body, err := a.Get(url)
if err != nil {
return nil, err
}
type DeviceGroupList struct {
Groups []DeviceGroup `json:"groups"`
}
resp := DeviceGroupList{}
err = json.Unmarshal(*body, &resp)
if err != nil {
return nil, err
}
return &resp.Groups, nil
}
func (a *Api) GetFoundriesTargetsKey(factory string) (*AtsKey, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/ci-targets.pub"
body, err := a.Get(url)
if err != nil {
return nil, err
}
key := AtsKey{}
err = json.Unmarshal(*body, &key)
return &key, err
}
func (a *Api) GetWireGuardIps(factory string) ([]WireGuardIp, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/wireguard-ips/"
body, err := a.Get(url)
if err != nil {
return nil, err
}
var ips []WireGuardIp
err = json.Unmarshal(*body, &ips)
return ips, err
}
func (a *Api) tufRootGet(factory string, prod bool, version int) (*AtsTufRoot, error) {
url := a.serverUrl + "/ota/repo/" + factory + "/api/v1/user_repo/"
if version > 0 {
url += fmt.Sprintf("%d.", version)
}
url += "root.json"
if prod {
url += "?production=1"
}
logrus.Debugf("Fetch root %s", url)
body, err := a.Get(url)
if err != nil {
return nil, err
}
root := AtsTufRoot{}
err = json.Unmarshal(*body, &root)
return &root, err
}
func (a *Api) TufRootGet(factory string) (*AtsTufRoot, error) {
return a.tufRootGet(factory, false, -1)
}
func (a *Api) TufRootGetVer(factory string, version int) (*AtsTufRoot, error) {
return a.tufRootGet(factory, false, version)
}
func (a *Api) TufProdRootGet(factory string) (*AtsTufRoot, error) {
return a.tufRootGet(factory, true, -1)
}
func (a *Api) tufRootPost(factory string, prod bool, root []byte) (string, error) {
url := a.serverUrl + "/ota/repo/" + factory + "/api/v1/user_repo/root"
if prod {
url += "?production=1"
}
body, err := a.Post(url, root)
if body != nil {
return string(*body), err
}
return "", err
}
func (a *Api) TufRootPost(factory string, root []byte) (string, error) {
return a.tufRootPost(factory, false, root)
}
func (a *Api) TufProdRootPost(factory string, root []byte) (string, error) {
return a.tufRootPost(factory, true, root)
}
func (a *Api) TargetsListRaw(factory string) (*[]byte, error) {
url := a.serverUrl + "/ota/repo/" + factory + "/api/v1/user_repo/targets.json"
return a.Get(url)
}
func (a *Api) TargetGet(factory string, targetName string) (*tuf.FileMeta, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/targets/" + targetName
body, err := a.Get(url)
if err != nil {
return nil, err
}
var target tuf.FileMeta
err = json.Unmarshal(*body, &target)
if err != nil {
return nil, err
}
return &target, nil
}
func (a *Api) TargetsList(factory string, version ...string) (tuf.Files, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/targets/"
if len(version) == 1 {
url += "?version=" + version[0]
}
body, err := a.Get(url)
if err != nil {
return nil, err
}
targets := make(tuf.Files)
err = json.Unmarshal(*body, &targets)
if err != nil {
return nil, err
}
return targets, nil
}
func (a *Api) TargetCustom(target tuf.FileMeta) (*TufCustom, error) {
custom := TufCustom{}
err := json.Unmarshal(*target.Custom, &custom)
if err != nil {
return nil, err
}
return &custom, nil
}
func (a *Api) TargetsPut(factory string, data []byte) (string, string, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/targets/"
resp, err := a.Put(url, data)
if err != nil {
return "", "", err
}
return parseJobServResponse(resp, err, "UpdateTargets")
}
func (a *Api) TargetUpdateTags(factory string, target_names []string, tag_names []string) (string, string, error) {
type EmptyTarget struct {
Custom TufCustom `json:"custom"`
}
tags := EmptyTarget{TufCustom{Tags: tag_names}}
type Update struct {
Targets map[string]EmptyTarget `json:"targets"`
}
update := Update{map[string]EmptyTarget{}}
for idx := range target_names {
update.Targets[target_names[idx]] = tags
}
data, err := json.Marshal(update)
if err != nil {
return "", "", err
}
url := a.serverUrl + "/ota/factories/" + factory + "/targets/"
resp, err := a.Patch(url, data)
return parseJobServResponse(resp, err, "UpdateTargets")
}
func (a *Api) TargetDeleteTargets(factory string, target_names []string) (string, string, error) {
type Update struct {
Targets []string `json:"targets"`
}
update := Update{}
update.Targets = target_names
data, err := json.Marshal(update)
if err != nil {
return "", "", err
}
url := a.serverUrl + "/ota/factories/" + factory + "/targets/"
resp, err := a.Delete(url, data)
return parseJobServResponse(resp, err, "UpdateTargets")
}
func (a *Api) TargetImageCreate(factory string, targetName string, appShortlist string) (string, string, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/targets/" + targetName + "/images/"
if len(appShortlist) > 0 {
url += "?app_shortlist=" + appShortlist
}
resp, err := a.Post(url, nil)
return parseJobServResponse(resp, err, "assemble-system-image")
}
// Return a Compose App for a given Target by a Target ID and an App name
func (a *Api) TargetComposeApp(factory string, targetName string, app string) (*ComposeAppBundle, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/targets/" + targetName + "/compose-apps/" + app + "/"
logrus.Debugf("TargetApp with url: %s", url)
body, err := a.Get(url)
if err != nil {
if herr := AsHttpError(err); herr != nil {
logrus.Debugf("HTTP error %s received, try to parse a partial response", herr.Response.Status)
} else {
return nil, err
}
}
result := ComposeAppBundle{}
if perr := json.Unmarshal(*body, &result); perr != nil {
logrus.Debugf("Parse Error: %s", perr)
if err == nil {
return nil, perr
} else {
// Most probably a parse error is caused by an HTTP error - return both
return nil, fmt.Errorf("Parse Error: %w after HTTP error %s", perr, err)
}
} else {
return &result, nil
}
}
func (a *Api) TargetDeltasCreate(factory string, toVer int, fromVers []int) (string, string, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/targets/" + strconv.Itoa(toVer) + "/static-deltas/"
type payload struct {
FromVersions []int `json:"from_versions"`
}
buf, err := json.Marshal(payload{fromVers})
if err != nil {
return "", "", err
}
resp, err := a.Post(url, buf)
return parseJobServResponse(resp, err, "generate")
}
// Return a list of Targets that have been tested
func (a *Api) TargetTesting(factory string) ([]int, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/testing/"
logrus.Debugf("TargetTesting with url: %s", url)
body, err := a.Get(url)
if err != nil {
return nil, err
}
type resp struct {
Versions []int `json:"versions"`
}
r := resp{}
if err = json.Unmarshal(*body, &r); err != nil {
return nil, err
}
return r.Versions, nil
}
func (a *Api) TargetTests(factory string, target int) (*TargetTestList, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/targets/" + strconv.Itoa(target) + "/testing/"
logrus.Debugf("TargetTests with url: %s", url)
return a.TargetTestsCont(url)
}
func (a *Api) TargetTestsCont(url string) (*TargetTestList, error) {
body, err := a.Get(url)
if err != nil {
return nil, err
}
tests := TargetTestList{}
err = json.Unmarshal(*body, &tests)
if err != nil {
return nil, err
}
return &tests, nil
}
func (a *Api) TargetTestResults(factory string, target int, testId string) (*TargetTest, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/targets/" + strconv.Itoa(target) + "/testing/" + testId + "/"
logrus.Debugf("TargetTests with url: %s", url)
body, err := a.Get(url)
if err != nil {
return nil, err
}
test := TargetTest{}
err = json.Unmarshal(*body, &test)
if err != nil {
return nil, err
}
return &test, nil
}
func (a *Api) TargetTestArtifact(factory string, target int, testId string, artifact string) (*[]byte, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/targets/" + strconv.Itoa(target) + "/testing/" + testId + "/" + artifact
logrus.Debugf("TargetTests with url: %s", url)
return a.Get(url)
}
func (a *Api) JobservRuns(factory string, build int) ([]JobservRun, error) {
url := a.serverUrl + "/projects/" + factory + "/lmp/builds/" + strconv.Itoa(build) + "/runs/"
logrus.Debugf("JobservRuns with url: %s", url)
body, err := a.Get(url)
if err != nil {
return nil, err
}
type Jsonified struct {
Data struct {
Runs []JobservRun `json:"runs"`
} `json:"data"`
}
var jsonified Jsonified
err = json.Unmarshal(*body, &jsonified)
if err != nil {
return nil, err
}
return jsonified.Data.Runs, nil
}
func (a *Api) JobservRun(runUrl string) (*JobservRun, error) {
logrus.Debugf("JobservRun with url: %s", runUrl)
body, err := a.Get(runUrl)
if err != nil {
return nil, err
}
type Jsonified struct {
Data struct {
Run JobservRun `json:"run"`
} `json:"data"`
}
var jsonified Jsonified
err = json.Unmarshal(*body, &jsonified)
if err != nil {
return nil, err
}
return &jsonified.Data.Run, nil
}
func (a *Api) JobservRunArtifact(factory string, build int, run string, artifact string) (*http.Response, error) {
url := a.serverUrl + "/projects/" + factory + "/lmp/builds/" + strconv.Itoa(build) + "/runs/" + run + "/" + artifact
logrus.Debugf("JobservRunArtifact with url: %s", url)
return a.RawGet(url, nil)
}
func (a *Api) JobservTail(url string) {
offset := 0
status := ""
for {
headers := map[string]string{"X-OFFSET": strconv.Itoa(offset)}
resp, err := a.RawGet(url, &headers)
if err != nil {
fmt.Printf("TODO LOG ERROR OR SOMETHING: %s\n", err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Printf("Unable to read body resp: %s", err)
}
if resp.StatusCode != 200 {
fmt.Printf("Unable to get '%s': HTTP_%d\n=%s", url, resp.StatusCode, body)
}
newstatus := resp.Header.Get("X-RUN-STATUS")
if newstatus == "QUEUED" {
if status == "" {
os.Stdout.Write(body)
} else {
os.Stdout.WriteString(".")
}
} else if len(newstatus) == 0 {
body = body[offset:]
os.Stdout.Write(body)
return
} else {
if newstatus != status {
fmt.Printf("\n--- Status change: %s -> %s\n", status, newstatus)
}
os.Stdout.Write(body)
offset += len(body)
}
status = newstatus
time.Sleep(5 * time.Second)
}
}
func (a *Api) FactoryTriggers(factory string) ([]ProjectTrigger, error) {
type Resp struct {
Data []ProjectTrigger `json:"data"`
}
body, err := a.Get(a.serverUrl + "/projects/" + factory + "/lmp/triggers/")
if err != nil {
return nil, err
}
r := Resp{}
err = json.Unmarshal(*body, &r)
return r.Data, err
}
func (a *Api) FactoryUpdateTrigger(factory string, t ProjectTrigger) error {
data, err := json.Marshal(t)
if err != nil {
return err
}
url := a.serverUrl + "/projects/" + factory + "/lmp/triggers/"
if t.Id == 0 {
logrus.Debugf("Creating new trigger")
_, err := a.Post(url, data)
return err
} else {
logrus.Debugf("Patching trigger %d", t.Id)
url += strconv.Itoa(t.Id) + "/"
_, err := a.Patch(url, data)
return err
}
}
func (a *Api) UsersList(factory string) ([]FactoryUser, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/users/"
body, err := a.Get(url)
if err != nil {
return nil, err
}
var users []FactoryUser
err = json.Unmarshal(*body, &users)
if err != nil {
return nil, err
}
return users, nil
}
func (a *Api) FactoryGetCA(factory string) (CaCerts, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/certs/"
logrus.Debugf("Getting certs %s", url)
var resp CaCerts
body, err := a.Get(url)
if err != nil {
return resp, err
}
err = json.Unmarshal(*body, &resp)
return resp, err
}
func (a *Api) FactoryCreateCA(factory string) (CaCerts, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/certs/"
logrus.Debugf("Creating new factory CA %s", url)
var resp CaCerts
body, err := a.Post(url, []byte("{}"))
if err != nil {
return resp, err
}
err = json.Unmarshal(*body, &resp)
return resp, err
}
func (a *Api) FactoryPatchCA(factory string, certs CaCerts) error {
url := a.serverUrl + "/ota/factories/" + factory + "/certs/"
logrus.Debugf("Patching factory CA %s", url)
data, err := json.Marshal(certs)
if err != nil {
return err
}
_, err = a.Patch(url, data)
return err
}
func (a *Api) FactoryCreateWave(factory string, wave *WaveCreate) error {
url := a.serverUrl + "/ota/factories/" + factory + "/waves/"
logrus.Debugf("Creating factory wave %s", url)
data, err := json.Marshal(wave)
if err != nil {
return err
}
_, err = a.Post(url, data)
return err
}
func (a *Api) FactoryListWaves(factory string, limit uint64) ([]Wave, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/waves/?limit=" + strconv.FormatUint(limit, 10)
logrus.Debugf("Listing factory waves %s", url)
body, err := a.Get(url)
if err != nil {
return nil, err
}
var resp []Wave
err = json.Unmarshal(*body, &resp)
return resp, err
}
func (a *Api) FactoryGetWave(factory string, wave string, showTargets bool) (*Wave, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/waves/" + wave + "/"
if showTargets {
url += "?show-targets=1"
}
logrus.Debugf("Fetching factory wave %s", url)
body, err := a.Get(url)
if err != nil {
return nil, err
}
var resp Wave
err = json.Unmarshal(*body, &resp)
return &resp, err
}
func (a *Api) FactoryRolloutWave(factory string, wave string, options WaveRolloutOptions) error {
url := a.serverUrl + "/ota/factories/" + factory + "/waves/" + wave + "/rollout/"
logrus.Debugf("Rolling out factory wave %s", url)
data, err := json.Marshal(options)
if err != nil {
return err
}
_, err = a.Post(url, data)
return err
}
func (a *Api) FactoryCancelWave(factory string, wave string) error {
url := a.serverUrl + "/ota/factories/" + factory + "/waves/" + wave + "/cancel/"
logrus.Debugf("Canceling factory wave %s", url)
_, err := a.Post(url, nil)
return err
}
func (a *Api) FactoryCompleteWave(factory string, wave string) error {
url := a.serverUrl + "/ota/factories/" + factory + "/waves/" + wave + "/complete/"
logrus.Debugf("Completing factory wave %s", url)
_, err := a.Post(url, nil)
return err
}
func (a *Api) FactoryWaveStatus(factory string, wave string, inactiveThreshold int) (*WaveStatus, error) {
url := fmt.Sprintf("%s/ota/factories/%s/waves/%s/status/?offline-threshold=%d",
a.serverUrl, factory, wave, inactiveThreshold)
logrus.Debugf("Fetching factory wave status %s", url)
body, err := a.Get(url)
if err != nil {
return nil, err
}
s := WaveStatus{}
err = json.Unmarshal(*body, &s)
if err != nil {
return nil, err
}
return &s, nil
}
func (a *Api) ProdTargetsList(factory string, failNotExist bool, tags ...string) (map[string]AtsTufTargets, error) {
url := a.serverUrl + "/ota/factories/" + factory + "/prod-targets/?tag=" + strings.Join(tags, ",")
logrus.Debugf("Fetching factory production targets %s", url)
body, err := a.Get(url)
if err != nil {
if !failNotExist {
if herr := AsHttpError(err); herr != nil && herr.Response.StatusCode == 404 {
return nil, nil
}
}
return nil, err
}
resp := make(map[string]AtsTufTargets)
err = json.Unmarshal(*body, &resp)
return resp, err
}
func (a *Api) ProdTargetsGet(factory string, tag string, failNotExist bool) (*AtsTufTargets, error) {
targetsMap, err := a.ProdTargetsList(factory, failNotExist, tag)
if err != nil || targetsMap == nil {
return nil, err
}
targets := targetsMap[tag]
return &targets, nil
}
| [
"\"TOKEN_HEADER\""
]
| []
| [
"TOKEN_HEADER"
]
| [] | ["TOKEN_HEADER"] | go | 1 | 0 | |
network/tunnel/tunnel_test.go | package tunnel
import (
"os"
"sync"
"testing"
"time"
"github.com/ship-os/ship-micro/v2/transport"
)
func testBrokenTunAccept(t *testing.T, tun Tunnel, wait chan bool, wg *sync.WaitGroup) {
defer wg.Done()
// listen on some virtual address
tl, err := tun.Listen("test-tunnel")
if err != nil {
t.Fatal(err)
}
// receiver ready; notify sender
wait <- true
// accept a connection
c, err := tl.Accept()
if err != nil {
t.Fatal(err)
}
// accept the message and close the tunnel
// we do this to simulate loss of network connection
m := new(transport.Message)
if err := c.Recv(m); err != nil {
t.Fatal(err)
}
// close all the links
for _, link := range tun.Links() {
link.Close()
}
// receiver ready; notify sender
wait <- true
// accept the message
m = new(transport.Message)
if err := c.Recv(m); err != nil {
t.Fatal(err)
}
// notify the sender we have received
wait <- true
}
func testBrokenTunSend(t *testing.T, tun Tunnel, wait chan bool, wg *sync.WaitGroup, reconnect time.Duration) {
defer wg.Done()
// wait for the listener to get ready
<-wait
// dial a new session
c, err := tun.Dial("test-tunnel")
if err != nil {
t.Fatal(err)
}
defer c.Close()
m := transport.Message{
Header: map[string]string{
"test": "send",
},
}
// send the message
if err := c.Send(&m); err != nil {
t.Fatal(err)
}
// wait for the listener to get ready
<-wait
// give it time to reconnect
time.Sleep(reconnect)
// send the message
if err := c.Send(&m); err != nil {
t.Fatal(err)
}
// wait for the listener to receive the message
// c.Send merely enqueues the message to the link send queue and returns
// in order to verify it was received we wait for the listener to tell us
<-wait
}
// testAccept will accept connections on the transport, create a new link and tunnel on top
func testAccept(t *testing.T, tun Tunnel, wait chan bool, wg *sync.WaitGroup) {
defer wg.Done()
// listen on some virtual address
tl, err := tun.Listen("test-tunnel")
if err != nil {
t.Fatal(err)
}
// receiver ready; notify sender
wait <- true
// accept a connection
c, err := tl.Accept()
if err != nil {
t.Fatal(err)
}
// get a message
for {
// accept the message
m := new(transport.Message)
if err := c.Recv(m); err != nil {
t.Fatal(err)
}
if v := m.Header["test"]; v != "send" {
t.Fatalf("Accept side expected test:send header. Received: %s", v)
}
// now respond
m.Header["test"] = "accept"
if err := c.Send(m); err != nil {
t.Fatal(err)
}
wait <- true
return
}
}
// testSend will create a new link to an address and then a tunnel on top
func testSend(t *testing.T, tun Tunnel, wait chan bool, wg *sync.WaitGroup) {
defer wg.Done()
// wait for the listener to get ready
<-wait
// dial a new session
c, err := tun.Dial("test-tunnel")
if err != nil {
t.Fatal(err)
}
defer c.Close()
m := transport.Message{
Header: map[string]string{
"test": "send",
},
}
// send the message
if err := c.Send(&m); err != nil {
t.Fatal(err)
}
// now wait for the response
mr := new(transport.Message)
if err := c.Recv(mr); err != nil {
t.Fatal(err)
}
<-wait
if v := mr.Header["test"]; v != "accept" {
t.Fatalf("Message not received from accepted side. Received: %s", v)
}
}
func TestTunnel(t *testing.T) {
// create a new tunnel client
tunA := NewTunnel(
Address("127.0.0.1:9096"),
Nodes("127.0.0.1:9097"),
)
// create a new tunnel server
tunB := NewTunnel(
Address("127.0.0.1:9097"),
)
// start tunB
err := tunB.Connect()
if err != nil {
t.Fatal(err)
}
defer tunB.Close()
// start tunA
err = tunA.Connect()
if err != nil {
t.Fatal(err)
}
defer tunA.Close()
wait := make(chan bool)
var wg sync.WaitGroup
wg.Add(1)
// start the listener
go testAccept(t, tunB, wait, &wg)
wg.Add(1)
// start the client
go testSend(t, tunA, wait, &wg)
// wait until done
wg.Wait()
}
func TestLoopbackTunnel(t *testing.T) {
// create a new tunnel
tun := NewTunnel(
Address("127.0.0.1:9096"),
Nodes("127.0.0.1:9096"),
)
// start tunnel
err := tun.Connect()
if err != nil {
t.Fatal(err)
}
defer tun.Close()
time.Sleep(500 * time.Millisecond)
wait := make(chan bool)
var wg sync.WaitGroup
wg.Add(1)
// start the listener
go testAccept(t, tun, wait, &wg)
wg.Add(1)
// start the client
go testSend(t, tun, wait, &wg)
// wait until done
wg.Wait()
}
func TestTunnelRTTRate(t *testing.T) {
// create a new tunnel client
tunA := NewTunnel(
Address("127.0.0.1:9096"),
Nodes("127.0.0.1:9097"),
)
// create a new tunnel server
tunB := NewTunnel(
Address("127.0.0.1:9097"),
)
// start tunB
err := tunB.Connect()
if err != nil {
t.Fatal(err)
}
defer tunB.Close()
// start tunA
err = tunA.Connect()
if err != nil {
t.Fatal(err)
}
defer tunA.Close()
wait := make(chan bool)
var wg sync.WaitGroup
wg.Add(1)
// start the listener
go testAccept(t, tunB, wait, &wg)
wg.Add(1)
// start the client
go testSend(t, tunA, wait, &wg)
// wait until done
wg.Wait()
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
// only needed for debug
for _, link := range tunA.Links() {
t.Logf("Link %s length %v rate %v", link.Id(), link.Length(), link.Rate())
}
for _, link := range tunB.Links() {
t.Logf("Link %s length %v rate %v", link.Id(), link.Length(), link.Rate())
}
}
}
func TestReconnectTunnel(t *testing.T) {
// we manually override the tunnel.ReconnectTime value here
// this is so that we make the reconnects faster than the default 5s
ReconnectTime = 200 * time.Millisecond
// create a new tunnel client
tunA := NewTunnel(
Address("127.0.0.1:9098"),
Nodes("127.0.0.1:9099"),
)
// create a new tunnel server
tunB := NewTunnel(
Address("127.0.0.1:9099"),
)
// start tunnel
err := tunB.Connect()
if err != nil {
t.Fatal(err)
}
defer tunB.Close()
// start tunnel
err = tunA.Connect()
if err != nil {
t.Fatal(err)
}
defer tunA.Close()
wait := make(chan bool)
var wg sync.WaitGroup
wg.Add(1)
// start tunnel listener
go testBrokenTunAccept(t, tunB, wait, &wg)
wg.Add(1)
// start tunnel sender
go testBrokenTunSend(t, tunA, wait, &wg, ReconnectTime*5)
// wait until done
wg.Wait()
}
| [
"\"IN_TRAVIS_CI\""
]
| []
| [
"IN_TRAVIS_CI"
]
| [] | ["IN_TRAVIS_CI"] | go | 1 | 0 | |
chalice/cli/factory.py | import sys
import os
import json
import importlib
import logging
import functools
import click
from botocore.config import Config as BotocoreConfig
from botocore.session import Session
from typing import Any, Optional, Dict, MutableMapping # noqa
from chalice import __version__ as chalice_version
from chalice.awsclient import TypedAWSClient
from chalice.app import Chalice # noqa
from chalice.config import Config
from chalice.config import DeployedResources # noqa
from chalice.package import create_app_packager
from chalice.package import AppPackager # noqa
from chalice.constants import DEFAULT_STAGE_NAME
from chalice.constants import DEFAULT_APIGATEWAY_STAGE_NAME
from chalice.logs import LogRetriever
from chalice import local
from chalice.utils import UI # noqa
from chalice.utils import PipeReader # noqa
from chalice.deploy import deployer # noqa
from chalice.invoke import LambdaInvokeHandler
from chalice.invoke import LambdaInvoker
from chalice.invoke import LambdaResponseFormatter
_OPT_STR = Optional[str]
_OPT_INT = Optional[int]
def create_botocore_session(profile=None, debug=False,
connection_timeout=None,
read_timeout=None,
max_retries=None):
# type: (_OPT_STR, bool, _OPT_INT, _OPT_INT, _OPT_INT) -> Session
s = Session(profile=profile)
_add_chalice_user_agent(s)
if debug:
_inject_large_request_body_filter()
config_args = {} # type: Dict[str, Any]
if connection_timeout is not None:
config_args['connect_timeout'] = connection_timeout
if read_timeout is not None:
config_args['read_timeout'] = read_timeout
if max_retries is not None:
config_args['retries'] = {'max_attempts': max_retries}
if config_args:
config = BotocoreConfig(**config_args)
s.set_default_client_config(config)
return s
def _add_chalice_user_agent(session):
# type: (Session) -> None
suffix = '%s/%s' % (session.user_agent_name, session.user_agent_version)
session.user_agent_name = 'aws-chalice'
session.user_agent_version = chalice_version
session.user_agent_extra = suffix
def _inject_large_request_body_filter():
# type: () -> None
log = logging.getLogger('botocore.endpoint')
log.addFilter(LargeRequestBodyFilter())
class NoSuchFunctionError(Exception):
"""The specified function could not be found."""
def __init__(self, name):
# type: (str) -> None
self.name = name
super(NoSuchFunctionError, self).__init__()
class UnknownConfigFileVersion(Exception):
def __init__(self, version):
# type: (str) -> None
super(UnknownConfigFileVersion, self).__init__(
"Unknown version '%s' in config.json" % version)
class LargeRequestBodyFilter(logging.Filter):
def filter(self, record):
# type: (Any) -> bool
# Note: the proper type should be "logging.LogRecord", but
# the typechecker complains about 'Invalid index type "int" for "dict"'
# so we're using Any for now.
if record.msg.startswith('Making request'):
if record.args[0].name in ['UpdateFunctionCode', 'CreateFunction']:
# When using the ZipFile argument (which is used in chalice),
# the entire deployment package zip is sent as a base64 encoded
# string. We don't want this to clutter the debug logs
# so we don't log the request body for lambda operations
# that have the ZipFile arg.
record.args = (record.args[:-1] +
('(... omitted from logs due to size ...)',))
return True
class CLIFactory(object):
def __init__(self, project_dir, debug=False, profile=None, environ=None):
# type: (str, bool, Optional[str], Optional[MutableMapping]) -> None
self.project_dir = project_dir
self.debug = debug
self.profile = profile
if environ is None:
environ = dict(os.environ)
self._environ = environ
def create_botocore_session(self, connection_timeout=None,
read_timeout=None, max_retries=None):
# type: (_OPT_INT, _OPT_INT, _OPT_INT) -> Session
return create_botocore_session(profile=self.profile,
debug=self.debug,
connection_timeout=connection_timeout,
read_timeout=read_timeout,
max_retries=max_retries)
def create_default_deployer(self, session, config, ui):
# type: (Session, Config, UI) -> deployer.Deployer
return deployer.create_default_deployer(session, config, ui)
def create_deletion_deployer(self, session, ui):
# type: (Session, UI) -> deployer.Deployer
return deployer.create_deletion_deployer(
TypedAWSClient(session), ui)
def create_deployment_reporter(self, ui):
# type: (UI) -> deployer.DeploymentReporter
return deployer.DeploymentReporter(ui=ui)
def create_config_obj(self, chalice_stage_name=DEFAULT_STAGE_NAME,
autogen_policy=None,
api_gateway_stage=None):
# type: (str, Optional[bool], str) -> Config
user_provided_params = {} # type: Dict[str, Any]
default_params = {'project_dir': self.project_dir,
'api_gateway_stage': DEFAULT_APIGATEWAY_STAGE_NAME,
'autogen_policy': True}
try:
config_from_disk = self.load_project_config()
except (OSError, IOError):
raise RuntimeError("Unable to load the project config file. "
"Are you sure this is a chalice project?")
except ValueError as err:
raise RuntimeError("Unable to load the project config file: %s"
% err)
self._validate_config_from_disk(config_from_disk)
if autogen_policy is not None:
user_provided_params['autogen_policy'] = autogen_policy
if self.profile is not None:
user_provided_params['profile'] = self.profile
if api_gateway_stage is not None:
user_provided_params['api_gateway_stage'] = api_gateway_stage
config = Config(chalice_stage=chalice_stage_name,
user_provided_params=user_provided_params,
config_from_disk=config_from_disk,
default_params=default_params)
user_provided_params['chalice_app'] = functools.partial(
self.load_chalice_app, config.environment_variables)
return config
def _validate_config_from_disk(self, config):
# type: (Dict[str, Any]) -> None
string_version = config.get('version', '1.0')
try:
version = float(string_version)
if version > 2.0:
raise UnknownConfigFileVersion(string_version)
except ValueError:
raise UnknownConfigFileVersion(string_version)
def create_app_packager(self, config):
# type: (Config) -> AppPackager
return create_app_packager(config)
def create_log_retriever(self, session, lambda_arn):
# type: (Session, str) -> LogRetriever
client = TypedAWSClient(session)
retriever = LogRetriever.create_from_lambda_arn(client, lambda_arn)
return retriever
def create_stdin_reader(self):
# type: () -> PipeReader
stream = click.get_binary_stream('stdin')
reader = PipeReader(stream)
return reader
def create_lambda_invoke_handler(self, name, stage):
# type: (str, str) -> LambdaInvokeHandler
config = self.create_config_obj(stage)
deployed = config.deployed_resources(stage)
try:
resource = deployed.resource_values(name)
arn = resource['lambda_arn']
except (KeyError, ValueError):
raise NoSuchFunctionError(name)
function_scoped_config = config.scope(stage, name)
# The session for max retries needs to be set to 0 for invoking a
# lambda function because in the case of a timeout or other retriable
# error the underlying client will call the function again.
session = self.create_botocore_session(
read_timeout=function_scoped_config.lambda_timeout,
max_retries=0,
)
client = TypedAWSClient(session)
invoker = LambdaInvoker(arn, client)
handler = LambdaInvokeHandler(
invoker,
LambdaResponseFormatter(),
UI(),
)
return handler
def load_chalice_app(self, environment_variables=None):
# type: (Optional[MutableMapping]) -> Chalice
if self.project_dir not in sys.path:
sys.path.insert(0, self.project_dir)
# The vendor directory has its contents copied up to the top level of
# the deployment package. This means that imports will work in the
# lambda function as if the vendor directory is on the python path.
# For loading the config locally we must add the vendor directory to
# the path so it will be treated the same as if it were running on
# lambda.
vendor_dir = os.path.join(self.project_dir, 'vendor')
if os.path.isdir(vendor_dir) and vendor_dir not in sys.path:
# This is a tradeoff we have to make for local use.
# The common use case of vendor/ is to include
# extension modules built for AWS Lambda. If you're
# running on a non-linux dev machine, then attempting
# to import these files will raise exceptions. As
# a workaround, the vendor is added to the end of
# sys.path so it's after `./lib/site-packages`.
# This gives you a change to install the correct
# version locally and still keep the lambda
# specific one in vendor/
sys.path.append(vendor_dir)
if environment_variables is not None:
self._environ.update(environment_variables)
try:
app = importlib.import_module('app')
chalice_app = getattr(app, 'app')
except SyntaxError as e:
message = (
'Unable to import your app.py file:\n\n'
'File "%s", line %s\n'
' %s\n'
'SyntaxError: %s'
) % (getattr(e, 'filename'), e.lineno, e.text, e.msg)
raise RuntimeError(message)
return chalice_app
def load_project_config(self):
# type: () -> Dict[str, Any]
"""Load the chalice config file from the project directory.
:raise: OSError/IOError if unable to load the config file.
"""
config_file = os.path.join(self.project_dir, '.chalice', 'config.json')
with open(config_file) as f:
return json.loads(f.read())
def create_local_server(self, app_obj, config, host, port):
# type: (Chalice, Config, str, int) -> local.LocalDevServer
return local.create_local_server(app_obj, config, host, port)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
docker/pkg/jsonmessage/jsonmessage.go | package jsonmessage // import "github.com/obalunenko/dockertest/v3/docker/pkg/jsonmessage"
import (
"encoding/json"
"fmt"
"io"
"os"
"strings"
"time"
gotty "github.com/Nvveen/Gotty"
units "github.com/docker/go-units"
"github.com/moby/term"
)
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
// ensure the formatted time isalways the same number of characters.
const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// JSONError wraps a concrete Code and Message, `Code` is
// is an integer error code, `Message` is the error message.
type JSONError struct {
Code int `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}
func (e *JSONError) Error() string {
return e.Message
}
// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
// Start is the initial value for the operation. Current is the current status and
// value of the progress made towards Total. Total is the end value describing when
// we made 100% progress for an operation.
type JSONProgress struct {
terminalFd uintptr
Current int64 `json:"current,omitempty"`
Total int64 `json:"total,omitempty"`
Start int64 `json:"start,omitempty"`
// If true, don't show xB/yB
HideCounts bool `json:"hidecounts,omitempty"`
Units string `json:"units,omitempty"`
nowFunc func() time.Time
winSize int
}
func (p *JSONProgress) String() string {
var (
width = p.width()
pbBox string
numbersBox string
timeLeftBox string
)
if p.Current <= 0 && p.Total <= 0 {
return ""
}
if p.Total <= 0 {
switch p.Units {
case "":
current := units.HumanSize(float64(p.Current))
return fmt.Sprintf("%8v", current)
default:
return fmt.Sprintf("%d %s", p.Current, p.Units)
}
}
percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
if percentage > 50 {
percentage = 50
}
if width > 110 {
// this number can't be negative gh#7136
numSpaces := 0
if 50-percentage > 0 {
numSpaces = 50 - percentage
}
pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
}
switch {
case p.HideCounts:
case p.Units == "": // no units, use bytes
current := units.HumanSize(float64(p.Current))
total := units.HumanSize(float64(p.Total))
numbersBox = fmt.Sprintf("%8v/%v", current, total)
if p.Current > p.Total {
// remove total display if the reported current is wonky.
numbersBox = fmt.Sprintf("%8v", current)
}
default:
numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units)
if p.Current > p.Total {
// remove total display if the reported current is wonky.
numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units)
}
}
if p.Current > 0 && p.Start > 0 && percentage < 50 {
fromStart := p.now().Sub(time.Unix(p.Start, 0))
perEntry := fromStart / time.Duration(p.Current)
left := time.Duration(p.Total-p.Current) * perEntry
left = (left / time.Second) * time.Second
if width > 50 {
timeLeftBox = " " + left.String()
}
}
return pbBox + numbersBox + timeLeftBox
}
// shim for testing
func (p *JSONProgress) now() time.Time {
if p.nowFunc == nil {
p.nowFunc = func() time.Time {
return time.Now().UTC()
}
}
return p.nowFunc()
}
// shim for testing
func (p *JSONProgress) width() int {
if p.winSize != 0 {
return p.winSize
}
ws, err := term.GetWinsize(p.terminalFd)
if err == nil {
return int(ws.Width)
}
return 200
}
// JSONMessage defines a message struct. It describes
// the created time, where it from, status, ID of the
// message. It's used for docker events.
type JSONMessage struct {
Stream string `json:"stream,omitempty"`
Status string `json:"status,omitempty"`
Progress *JSONProgress `json:"progressDetail,omitempty"`
ProgressMessage string `json:"progress,omitempty"` //deprecated
ID string `json:"id,omitempty"`
From string `json:"from,omitempty"`
Time int64 `json:"time,omitempty"`
TimeNano int64 `json:"timeNano,omitempty"`
Error *JSONError `json:"errorDetail,omitempty"`
ErrorMessage string `json:"error,omitempty"` //deprecated
// Aux contains out-of-band data, such as digests for push signing and image id after building.
Aux *json.RawMessage `json:"aux,omitempty"`
}
/* Satisfied by gotty.TermInfo as well as noTermInfo from below */
type termInfo interface {
Parse(attr string, params ...interface{}) (string, error)
}
type noTermInfo struct{} // canary used when no terminfo.
func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) {
return "", fmt.Errorf("noTermInfo")
}
func clearLine(out io.Writer, ti termInfo) {
// el2 (clear whole line) is not exposed by terminfo.
// First clear line from beginning to cursor
if attr, err := ti.Parse("el1"); err == nil {
fmt.Fprintf(out, "%s", attr)
} else {
fmt.Fprintf(out, "\x1b[1K")
}
// Then clear line from cursor to end
if attr, err := ti.Parse("el"); err == nil {
fmt.Fprintf(out, "%s", attr)
} else {
fmt.Fprintf(out, "\x1b[K")
}
}
func cursorUp(out io.Writer, ti termInfo, l int) {
if l == 0 { // Should never be the case, but be tolerant
return
}
if attr, err := ti.Parse("cuu", l); err == nil {
fmt.Fprintf(out, "%s", attr)
} else {
fmt.Fprintf(out, "\x1b[%dA", l)
}
}
func cursorDown(out io.Writer, ti termInfo, l int) {
if l == 0 { // Should never be the case, but be tolerant
return
}
if attr, err := ti.Parse("cud", l); err == nil {
fmt.Fprintf(out, "%s", attr)
} else {
fmt.Fprintf(out, "\x1b[%dB", l)
}
}
// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out`
// is a terminal. If this is the case, it will erase the entire current line
// when displaying the progressbar.
func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
if jm.Error != nil {
if jm.Error.Code == 401 {
return fmt.Errorf("authentication is required")
}
return jm.Error
}
var endl string
if termInfo != nil && jm.Stream == "" && jm.Progress != nil {
clearLine(out, termInfo)
endl = "\r"
fmt.Fprintf(out, endl)
} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
return nil
}
if jm.TimeNano != 0 {
fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
} else if jm.Time != 0 {
fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
}
if jm.ID != "" {
fmt.Fprintf(out, "%s: ", jm.ID)
}
if jm.From != "" {
fmt.Fprintf(out, "(from %s) ", jm.From)
}
if jm.Progress != nil && termInfo != nil {
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
} else if jm.ProgressMessage != "" { //deprecated
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
} else if jm.Stream != "" {
fmt.Fprintf(out, "%s%s", jm.Stream, endl)
} else {
fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
}
return nil
}
// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
// each line and move the cursor while displaying.
func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error {
var (
dec = json.NewDecoder(in)
ids = make(map[string]int)
)
var termInfo termInfo
if isTerminal {
term := os.Getenv("TERM")
if term == "" {
term = "vt102"
}
var err error
if termInfo, err = gotty.OpenTermInfo(term); err != nil {
termInfo = &noTermInfo{}
}
}
for {
diff := 0
var jm JSONMessage
if err := dec.Decode(&jm); err != nil {
if err == io.EOF {
break
}
return err
}
if jm.Aux != nil {
if auxCallback != nil {
auxCallback(jm.Aux)
}
continue
}
if jm.Progress != nil {
jm.Progress.terminalFd = terminalFd
}
if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
line, ok := ids[jm.ID]
if !ok {
// NOTE: This approach of using len(id) to
// figure out the number of lines of history
// only works as long as we clear the history
// when we output something that's not
// accounted for in the map, such as a line
// with no ID.
line = len(ids)
ids[jm.ID] = line
if termInfo != nil {
fmt.Fprintf(out, "\n")
}
}
diff = len(ids) - line
if termInfo != nil {
cursorUp(out, termInfo, diff)
}
} else {
// When outputting something that isn't progress
// output, clear the history of previous lines. We
// don't want progress entries from some previous
// operation to be updated (for example, pull -a
// with multiple tags).
ids = make(map[string]int)
}
err := jm.Display(out, termInfo)
if jm.ID != "" && termInfo != nil {
cursorDown(out, termInfo, diff)
}
if err != nil {
return err
}
}
return nil
}
type stream interface {
io.Writer
FD() uintptr
IsTerminal() bool
}
// DisplayJSONMessagesToStream prints json messages to the output stream
func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error {
return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback)
}
| [
"\"TERM\""
]
| []
| [
"TERM"
]
| [] | ["TERM"] | go | 1 | 0 | |
abr/client_test.go | package abr
import (
"os"
"testing"
)
func init() {
guid, ok := os.LookupEnv("TEST_ABR_GUID")
if !ok {
panic("You must set TEST_ABR_GUID in order to run tests")
}
os.Setenv("ABR_GUID", guid)
}
func TestABRClient(t *testing.T) {
client, err := NewClient()
if err != nil {
t.Error(err)
return
}
if client.BaseURL.String() != BaseURL {
t.Errorf("Expected endpoint to be %s, got %s", BaseURL, client.BaseURL.String())
}
}
func TestABRClientNoEnvSet(t *testing.T) {
guid := os.Getenv("ABR_GUID")
os.Unsetenv("ABR_GUID")
defer os.Setenv("ABR_GUID", guid)
_, err := NewClient()
if err == nil {
t.Errorf("Expected an error, none was raised")
} else if err.Error() != MissingGUIDError {
t.Error(err)
}
return
}
var abnTestCases = []struct {
abn string
name string
}{
{"99124391073", "COzero Pty Ltd"},
{"26154482283", "Oneflare Pty Ltd"},
{"65433405893", "STUART J AULD"},
}
func TestSearchByABNv201408(t *testing.T) {
client, err := NewClient()
if err != nil {
t.Error(err)
return
}
for _, c := range abnTestCases {
entity, err := client.SearchByABNv201408(c.abn, true)
if err != nil {
t.Error(err)
continue
}
if entity.Name() != c.name {
t.Errorf("Expected %v, got %v", c.name, entity.Name())
}
if entity.ABN() != c.abn {
t.Errorf("Expected %v, got %v", c.abn, entity.ABN())
}
}
return
}
| [
"\"ABR_GUID\""
]
| []
| [
"ABR_GUID"
]
| [] | ["ABR_GUID"] | go | 1 | 0 | |
api/wsgi.py | """
WSGI config for api project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
ci/__init__.py | # MIT License
#
# Copyright (c) 2021 Mathieu Imfeld
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
version = os.environ.get('MRMAT_VERSION', '0.0.0.dev0')
| []
| []
| [
"MRMAT_VERSION"
]
| [] | ["MRMAT_VERSION"] | python | 1 | 0 | |
microservices/accommodations/accommApp.py | from fastapi import FastAPI, HTTPException
from dotenv import load_dotenv
import config, uvicorn, os, datetime, accommFunctions, accommModel
accommodationApp = FastAPI()
# Our root endpoint
@accommodationApp.get("/")
def index():
return {"message": "Hello World"}
# Hotel Information Endpoint:
# The function getHotelInfo queries a booking.com api for hotel information that relates to the user-inputted
# queries and returns related hotel information.
# The function takes in optional queries of how the hotel information should be ordered,
# this includes ordering by review score, price and popularity.
@accommodationApp.get("/accom/api/v1/")
def getHotelInfo(location: str, no_of_adults: int, no_of_children: int, no_of_rooms: int,
check_in: datetime.date, checkout: datetime.date, currency: str, order_by: accommModel.OrderByTypeModel | None = None):
hotel_list = []
location_id = accommFunctions.getLocationID(location)
today_date = datetime.date.today()
no_of_days = checkout - check_in
# error checks dates entered
if check_in < today_date or checkout < today_date or no_of_days.days < 0:
raise HTTPException(status_code=404, detail="Invalid Date")
# creates query according to the user inputs
if order_by is None and no_of_children > 0:
hotel_search_query = {"dest_id": location_id, "units": "metric", "order_by": "price", "adults_number": no_of_adults, "checkin_date": check_in,
"locale": "en-gb", "dest_type": "city", "filter_by_currency": "AED", "room_number": no_of_rooms, "checkout_date": checkout,
"page_number":"0","include_adjacency":"true", "children_number":no_of_children}
elif order_by is None and no_of_children == 0:
hotel_search_query = {"dest_id": location_id, "units": "metric", "order_by": "price", "adults_number": no_of_adults, "checkin_date": check_in,
"locale": "en-gb", "dest_type": "city", "filter_by_currency": "AED", "room_number": no_of_rooms, "checkout_date": checkout,
"page_number":"0","include_adjacency":"true"}
elif order_by is not None and no_of_children == 0:
hotel_search_query = {"dest_id": location_id, "units": "metric", "order_by": order_by.value, "adults_number": no_of_adults, "checkin_date": check_in,
"locale": "en-gb", "dest_type": "city", "filter_by_currency": "AED", "room_number": no_of_rooms, "checkout_date": checkout,
"page_number":"0","include_adjacency":"true"}
else:
hotel_search_query = {"dest_id": location_id, "units": "metric", "order_by": order_by.value, "adults_number": no_of_adults, "checkin_date": check_in,
"locale": "en-gb", "dest_type": "city", "filter_by_currency": "AED", "room_number": no_of_rooms, "checkout_date": checkout,
"page_number":"0","include_adjacency":"true", "children_number":no_of_children}
# send GET request to booking api
hotel_response = config.requests.request("GET", config.hotel_info_url, headers=config.headers, params=hotel_search_query)
# stores response from booking api
hotel_information = hotel_response.json()['result']
# gets exchange rates
exchange = accommFunctions.getCurrencyExchange(hotel_response.json()['result'][0]['currency_code'], currency)
# stores hotel information in a dictionary
for hotel in hotel_information:
hotel_list.append({"hotel_name": hotel['hotel_name'], "hotel_information": hotel['unit_configuration_label'], "hotel_longitude": hotel['longitude'], "hotel_latitude": hotel['latitude'],
"hotel_price": (float(hotel['min_total_price']) * float(exchange))/no_of_days.days,
"hotel_image": hotel['max_photo_url'], "hotel_address": hotel['address'], "hotel_url": hotel['url']})
return {"hotel_information": hotel_list}
if __name__ == "__main__":
uvicorn.run(accommodationApp, host=os.getenv("ACCOMMODATION_HOST"), port=int(os.getenv("ACCOMMODATION_PORT"))) | []
| []
| [
"ACCOMMODATION_PORT",
"ACCOMMODATION_HOST"
]
| [] | ["ACCOMMODATION_PORT", "ACCOMMODATION_HOST"] | python | 2 | 0 | |
server/router.go | package server
import (
"arena/controllers"
"os"
"strings"
"github.com/gin-gonic/gin"
)
func NewRouter() *gin.Engine {
router := gin.Default()
if proxies := os.Getenv("GIN_PROXIES"); proxies != "" {
p := strings.Split(proxies, ",")
router.SetTrustedProxies(p)
}
health := new(controllers.HealthController)
duel := new(controllers.DuelController)
router.GET("/health", health.Status)
router.POST("/duel", duel.PostDuel)
return router
}
| [
"\"GIN_PROXIES\""
]
| []
| [
"GIN_PROXIES"
]
| [] | ["GIN_PROXIES"] | go | 1 | 0 | |
ranger/engine.go | // Package ranger : This implements the main Engine.
package ranger
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"regexp"
"github.com/go-gl/glfw/v3.2/glfw"
"github.com/wdevore/ranger/config"
"github.com/wdevore/ranger/graphics"
"github.com/wdevore/ranger/window"
)
// Engine is the core component for launching and running the game.
type Engine struct {
// The game is the client of the Engine.
game GameShell
fullScreen bool
config config.Settings
engineError error
// ---------------------------------------------------------------------
// Timing
// ---------------------------------------------------------------------
currentUpdateTime float64
deltaUpdateTime float64
deltaTime float64
deltaRenderTime float64
currentRenderTime float64
currentSwapTime float64
deltaSwapTime float64
// ---------------------------------------------------------------------
// Window
// ---------------------------------------------------------------------
rWindow *window.RWindow
// ---------------------------------------------------------------------
// OpenGL
// ---------------------------------------------------------------------
Viewport graphics.Viewport
Camera graphics.Camera
View graphics.View
renderContext graphics.RenderContext
// ---------------------------------------------------------------------
// Stage and Scene
// ---------------------------------------------------------------------
stage *Stage
}
// NewEngine requires a GameShell for notifying the developer that
// it is valid to configure their game.
func NewEngine(gs GameShell) *Engine {
e := new(Engine)
e.game = gs
return e
}
// Launch the game.
func (e *Engine) Launch() error {
println("Engine configuring...")
e.loadConfig()
// Now notify developer that they can configure their game.
configured := e.game.Configure(e)
// Finally start the engine.
if configured {
if !e.config.Engine.Enabled {
return errors.New("engine is NOT enabled in config file")
}
e.engineError = e.start()
if e.engineError != nil {
return e.engineError
}
}
return nil
}
func (e *Engine) loadConfig() {
// Read configuration JSON file for pre-config settings.
// ex := os.Getenv("GOPATH")
// // if err != nil {
// // panic("An error occurred trying to get directory of executable")
// // }
// fmt.Printf("executable working directory: %s\n", ex)
// workingDirectory, errw := filepath.Abs(filepath.Dir(os.Args[0]))
// if errw != nil {
// panic("An error occurred trying to get working directory")
// }
// fmt.Printf("working directory: %s\n", workingDirectory)
file, err := ioutil.ReadFile("./config.json")
if err != nil {
panic("An error occurred trying to open ./config.json")
}
// The JSON must be preprocessed first to remove non-compliant comments.
pattern := `(\/\*[\w .>=:\"\-\n\r\t]*\*\/|\/\/[ .>=:\"\w\-]*)`
re, _ := regexp.Compile(pattern)
cleanedJSON := re.ReplaceAllString(string(file), "")
// fmt.Printf("%s\n", string(cleanedJSON))
err = json.Unmarshal([]byte(cleanedJSON), &e.config)
if err != nil {
s := fmt.Sprintf("Failed to Unmarshal json object: %s\n", err.Error())
panic(s)
}
}
// ----------------------------------------------------------------------------
// Life cycles
// ----------------------------------------------------------------------------
// start will not exit until engine is told to exit.
func (e *Engine) start() error {
println("Ranger Engine is starting...")
e.rWindow = window.NewRWindow()
err := e.rWindow.Construct(&e.config)
if err != nil {
return err
}
e.configureStage(&e.config)
e.renderContext.SetClearColors(graphics.Orange)
e.loop()
return nil
}
// Stop performs any last minute resource cleanups
func (e *Engine) Stop() {
println("Engine stopping...")
println("Engine stopped")
if e.engineError == nil {
glfw.Terminate()
}
}
func (e *Engine) loop() {
for e.rWindow.IsRunning() {
e.rWindow.Poll()
// ---------------- Update BEGIN -----------------------------
e.currentUpdateTime = glfw.GetTime()
e.deltaUpdateTime = glfw.GetTime() - e.currentUpdateTime
// ---------------- Update END -----------------------------
// This clear sync locked with the vertical refresh. The clear itself
// takes ~30 microseconds on a mid-range mobile nvidia GPU.
e.renderContext.Clear()
e.rWindow.Swap()
}
}
func (e *Engine) configureStage(config *config.Settings) {
e.Viewport.SetDimensions(0, 0, config.Window.DeviceRes.Width, config.Window.DeviceRes.Height)
e.Viewport.Apply()
// Calc the aspect ratio between the physical (aka device) dimensions and the
// the virtual (aka user's design choice) dimensions.
deviceRatio := float64(config.Window.DeviceRes.Width) / float64(config.Window.DeviceRes.Height)
virtualRatio := float64(config.Window.VirtualRes.Width) / float64(config.Window.VirtualRes.Height)
xRatioCorrection := float64(config.Window.DeviceRes.Width) / float64(config.Window.VirtualRes.Width)
yRatioCorrection := float64(config.Window.DeviceRes.Height) / float64(config.Window.VirtualRes.Height)
var ratioCorrection float64
if virtualRatio < deviceRatio {
ratioCorrection = yRatioCorrection
} else {
ratioCorrection = xRatioCorrection
}
e.Camera.SetProjection(
float32(ratioCorrection),
0.0, 0.0,
float32(config.Window.DeviceRes.Height), float32(config.Window.DeviceRes.Width))
if config.Camera.Centered {
e.Camera.Centered()
}
e.View.SetProjection(config.Camera.View.X, config.Camera.View.Y, config.Camera.View.Z)
// -----------------------------------------------------------------
// Create stage
// -----------------------------------------------------------------
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
metropolis/test/launch/launch.go | // Copyright 2020 The Monogon Project Authors.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// launch implements test harnesses for running qemu VMs from tests.
package launch
import (
"bytes"
"context"
"errors"
"fmt"
"net"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
"source.monogon.dev/metropolis/node"
"source.monogon.dev/metropolis/pkg/freeport"
)
type QemuValue map[string][]string
// ToOption encodes structured data into a QEMU option. Example: "test", {"key1":
// {"val1"}, "key2": {"val2", "val3"}} returns "test,key1=val1,key2=val2,key2=val3"
func (value QemuValue) ToOption(name string) string {
var optionValues []string
if name != "" {
optionValues = append(optionValues, name)
}
for name, values := range value {
if len(values) == 0 {
optionValues = append(optionValues, name)
}
for _, val := range values {
optionValues = append(optionValues, fmt.Sprintf("%v=%v", name, val))
}
}
return strings.Join(optionValues, ",")
}
// PortMap represents where VM ports are mapped to on the host. It maps from the VM
// port number to the host port number.
type PortMap map[node.Port]uint16
// ToQemuForwards generates QEMU hostfwd values (https://qemu.weilnetz.de/doc/qemu-
// doc.html#:~:text=hostfwd=) for all mapped ports.
func (p PortMap) ToQemuForwards() []string {
var hostfwdOptions []string
for vmPort, hostPort := range p {
hostfwdOptions = append(hostfwdOptions, fmt.Sprintf("tcp::%d-:%d", hostPort, vmPort))
}
return hostfwdOptions
}
// DialGRPC creates a gRPC client for a VM port that's forwarded/mapped to the
// host. The given port is automatically resolved to the host-mapped port.
func (p PortMap) DialGRPC(port node.Port, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
mappedPort, ok := p[port]
if !ok {
return nil, fmt.Errorf("cannot dial port: port %d is not mapped/forwarded", port)
}
grpcClient, err := grpc.Dial(fmt.Sprintf("localhost:%d", mappedPort), opts...)
if err != nil {
return nil, fmt.Errorf("failed to dial port %d: %w", port, err)
}
return grpcClient, nil
}
// IdentityPortMap returns a port map where each given port is mapped onto itself
// on the host. This is mainly useful for development against Metropolis. The dbg
// command requires this mapping.
func IdentityPortMap(ports []node.Port) PortMap {
portMap := make(PortMap)
for _, port := range ports {
portMap[port] = uint16(port)
}
return portMap
}
// ConflictFreePortMap returns a port map where each given port is mapped onto a
// random free port on the host. This is intended for automated testing where
// multiple instances of Metropolis nodes might be running. Please call this
// function for each Launch command separately and as close to it as possible since
// it cannot guarantee that the ports will remain free.
func ConflictFreePortMap(ports []node.Port) (PortMap, error) {
portMap := make(PortMap)
for _, port := range ports {
mappedPort, listenCloser, err := freeport.AllocateTCPPort()
if err != nil {
return portMap, fmt.Errorf("failed to get free host port: %w", err)
}
// Defer closing of the listening port until the function is done and all ports are
// allocated
defer listenCloser.Close()
portMap[port] = mappedPort
}
return portMap, nil
}
// NewSocketPair creates a new socket pair. By connecting both ends to different
// instances you can connect them with a virtual "network cable". The ends can be
// passed into the ConnectToSocket option.
func NewSocketPair() (*os.File, *os.File, error) {
fds, err := unix.Socketpair(unix.AF_UNIX, syscall.SOCK_STREAM, 0)
if err != nil {
return nil, nil, fmt.Errorf("failed to call socketpair: %w", err)
}
fd1 := os.NewFile(uintptr(fds[0]), "network0")
fd2 := os.NewFile(uintptr(fds[1]), "network1")
return fd1, fd2, nil
}
// HostInterfaceMAC is the MAC address the host SLIRP network interface has if it
// is not disabled (see DisableHostNetworkInterface in MicroVMOptions)
var HostInterfaceMAC = net.HardwareAddr{0x02, 0x72, 0x82, 0xbf, 0xc3, 0x56}
// MicroVMOptions contains all options to start a MicroVM
type MicroVMOptions struct {
// Path to the ELF kernel binary
KernelPath string
// Path to the Initramfs
InitramfsPath string
// Cmdline contains additional kernel commandline options
Cmdline string
// SerialPort is a File(descriptor) over which you can communicate with the serial
// port of the machine It can be set to an existing file descriptor (like
// os.Stdout/os.Stderr) or you can use NewSocketPair() to get one end to talk to
// from Go.
SerialPort *os.File
// ExtraChardevs can be used similar to SerialPort, but can contain an arbitrary
// number of additional serial ports
ExtraChardevs []*os.File
// ExtraNetworkInterfaces can contain an arbitrary number of file descriptors which
// are mapped into the VM as virtio network interfaces. The first interface is
// always a SLIRP-backed interface for communicating with the host.
ExtraNetworkInterfaces []*os.File
// PortMap contains ports that are mapped to the host through the built-in SLIRP
// network interface.
PortMap PortMap
// DisableHostNetworkInterface disables the SLIRP-backed host network interface
// that is normally the first network interface. If this is set PortMap is ignored.
// Mostly useful for speeding up QEMU's startup time for tests.
DisableHostNetworkInterface bool
}
// RunMicroVM launches a tiny VM mostly intended for testing. Very quick to boot
// (<40ms).
func RunMicroVM(ctx context.Context, opts *MicroVMOptions) error {
// Generate options for all the file descriptors we'll be passing as virtio "serial
// ports"
var extraArgs []string
for idx, _ := range opts.ExtraChardevs {
idxStr := strconv.Itoa(idx)
id := "extra" + idxStr
// That this works is pretty much a hack, but upstream QEMU doesn't have a
// bidirectional chardev backend not based around files/sockets on the disk which
// are a giant pain to work with. We're using QEMU's fdset functionality to make
// FDs available as pseudo-files and then "ab"using the pipe backend's fallback
// functionality to get a single bidirectional chardev backend backed by a passed-
// down RDWR fd. Ref https://lists.gnu.org/archive/html/qemu-devel/2015-
// 12/msg01256.html
addFdConf := QemuValue{
"set": {idxStr},
"fd": {strconv.Itoa(idx + 3)},
}
chardevConf := QemuValue{
"id": {id},
"path": {"/dev/fdset/" + idxStr},
}
deviceConf := QemuValue{
"chardev": {id},
}
extraArgs = append(extraArgs, "-add-fd", addFdConf.ToOption(""),
"-chardev", chardevConf.ToOption("pipe"), "-device", deviceConf.ToOption("virtserialport"))
}
for idx, _ := range opts.ExtraNetworkInterfaces {
id := fmt.Sprintf("net%v", idx)
netdevConf := QemuValue{
"id": {id},
"fd": {strconv.Itoa(idx + 3 + len(opts.ExtraChardevs))},
}
extraArgs = append(extraArgs, "-netdev", netdevConf.ToOption("socket"), "-device", "virtio-net-device,netdev="+id)
}
// This sets up a minimum viable environment for our Linux kernel. It clears all
// standard QEMU configuration and sets up a MicroVM machine
// (https://github.com/qemu/qemu/blob/master/docs/microvm.rst) with all legacy
// emulation turned off. This means the only "hardware" the Linux kernel inside can
// communicate with is a single virtio-mmio region. Over that MMIO interface we run
// a paravirtualized RNG (since the kernel in there has nothing to gather that from
// and it delays booting), a single paravirtualized console and an arbitrary number
// of extra serial ports for talking to various things that might run inside. The
// kernel, initramfs and command line are mapped into VM memory at boot time and
// not loaded from any sort of disk. Booting and shutting off one of these VMs
// takes <100ms.
baseArgs := []string{"-nodefaults", "-no-user-config", "-nographic", "-no-reboot",
"-accel", "kvm", "-cpu", "host",
// Needed until QEMU updates their bundled qboot version (needs
// https://github.com/bonzini/qboot/pull/28)
"-bios", "external/com_github_bonzini_qboot/bios.bin",
"-M", "microvm,x-option-roms=off,pic=off,pit=off,rtc=off,isa-serial=off",
"-kernel", opts.KernelPath,
// We force using a triple-fault reboot strategy since otherwise the kernel first
// tries others (like ACPI) which are not available in this very restricted
// environment. Similarly we need to override the boot console since there's
// nothing on the ISA bus that the kernel could talk to. We also force quiet for
// performance reasons.
"-append", "reboot=t console=hvc0 quiet " + opts.Cmdline,
"-initrd", opts.InitramfsPath,
"-device", "virtio-rng-device,max-bytes=1024,period=1000",
"-device", "virtio-serial-device,max_ports=16",
"-chardev", "stdio,id=con0", "-device", "virtconsole,chardev=con0",
}
if !opts.DisableHostNetworkInterface {
qemuNetType := "user"
qemuNetConfig := QemuValue{
"id": {"usernet0"},
"net": {"10.42.0.0/24"},
"dhcpstart": {"10.42.0.10"},
}
if opts.PortMap != nil {
qemuNetConfig["hostfwd"] = opts.PortMap.ToQemuForwards()
}
baseArgs = append(baseArgs, "-netdev", qemuNetConfig.ToOption(qemuNetType),
"-device", "virtio-net-device,netdev=usernet0,mac="+HostInterfaceMAC.String())
}
var stdErrBuf bytes.Buffer
cmd := exec.CommandContext(ctx, "qemu-system-x86_64", append(baseArgs, extraArgs...)...)
cmd.Stdout = opts.SerialPort
cmd.Stderr = &stdErrBuf
cmd.ExtraFiles = append(cmd.ExtraFiles, opts.ExtraChardevs...)
cmd.ExtraFiles = append(cmd.ExtraFiles, opts.ExtraNetworkInterfaces...)
err := cmd.Run()
// If it's a context error, just quit. There's no way to tell a
// killed-due-to-context vs killed-due-to-external-reason error returned by Run,
// so we approximate by looking at the context's status.
if err != nil && ctx.Err() != nil {
return ctx.Err()
}
var exerr *exec.ExitError
if err != nil && errors.As(err, &exerr) {
exerr.Stderr = stdErrBuf.Bytes()
newErr := QEMUError(*exerr)
return &newErr
}
return err
}
// QEMUError is a special type of ExitError used when QEMU fails. In addition to
// normal ExitError features it prints stderr for debugging.
type QEMUError exec.ExitError
func (e *QEMUError) Error() string {
return fmt.Sprintf("%v: %v", e.String(), string(e.Stderr))
}
| []
| []
| []
| [] | [] | go | null | null | null |
dbus_digitalinputs.py | #!/usr/bin/python3 -u
import sys, os
import signal
from threading import Thread
from select import select, epoll, EPOLLPRI
from functools import partial
from collections import namedtuple
from argparse import ArgumentParser
import traceback
sys.path.insert(1, os.path.join(os.path.dirname(__file__), 'ext', 'velib_python'))
from dbus.mainloop.glib import DBusGMainLoop
import dbus
from gi.repository import GLib
from vedbus import VeDbusService
from settingsdevice import SettingsDevice
VERSION = '0.15'
MAXCOUNT = 2**31-1
SAVEINTERVAL = 60000
INPUT_FUNCTION_COUNTER = 1
INPUT_FUNCTION_INPUT = 2
Translation = namedtuple('Translation', ['no', 'yes'])
# Only append at the end
INPUTTYPES = [
'Disabled',
'Pulse meter',
'Door',
'Bilge pump',
'Bilge alarm',
'Burglar alarm',
'Smoke alarm',
'Fire alarm',
'CO2 alarm',
'Generator'
]
# Translations. The text will be used only for GetText, it will be translated
# in the gui.
TRANSLATIONS = [
Translation('low', 'high'),
Translation('off', 'on'),
Translation('no', 'yes'),
Translation('open', 'closed'),
Translation('ok', 'alarm'),
Translation('running', 'stopped')
]
class SystemBus(dbus.bus.BusConnection):
def __new__(cls):
return dbus.bus.BusConnection.__new__(cls, dbus.bus.BusConnection.TYPE_SYSTEM)
class SessionBus(dbus.bus.BusConnection):
def __new__(cls):
return dbus.bus.BusConnection.__new__(cls, dbus.bus.BusConnection.TYPE_SESSION)
class BasePulseCounter(object):
pass
class DebugPulseCounter(BasePulseCounter):
def __init__(self):
self.gpiomap = {}
def register(self, path, gpio):
self.gpiomap[gpio] = None
return 0
def unregister(self, gpio):
del self.gpiomap[gpio]
def registered(self, gpio):
return gpio in self.gpiomap
def __call__(self):
from itertools import cycle
from time import sleep
for level in cycle([0, 1]):
for gpio in list(self.gpiomap.keys()):
yield gpio, level
sleep(0.25/len(self.gpiomap))
class EpollPulseCounter(BasePulseCounter):
def __init__(self):
self.gpiomap = {}
self.states = {}
self.ob = epoll()
def register(self, path, gpio):
path = os.path.realpath(path)
# Set up gpio for rising edge interrupts
with open(os.path.join(path, 'edge'), 'ab') as fp:
fp.write(b'both')
fp = open(os.path.join(path, 'value'), 'rb')
level = int(fp.read()) # flush it in case it's high at startup
self.gpiomap[gpio] = fp
self.states[gpio] = level
self.ob.register(fp, EPOLLPRI)
return level
def unregister(self, gpio):
fp = self.gpiomap[gpio]
self.ob.unregister(fp)
del self.gpiomap[gpio]
del self.states[gpio]
fp.close()
def registered(self, gpio):
return gpio in self.gpiomap
def __call__(self):
while True:
# We have a timeout of 1 second on the poll, because poll() only
# looks at files in the epoll object at the time poll() was called.
# The timeout means we let other files (added via calls to
# register/unregister) into the loop at least that often.
self.ob.poll(1)
# When coming out of the epoll call, we read all the gpios to make
# sure we didn't miss any edges. This is a safety fallback that
# ensures everything is up to date once a second, but
# edge-triggered results are handled immediately.
# NOTE: There has not been a report of a missed interrupt yet.
# Belts and suspenders.
for gpio, fp in list(self.gpiomap.items()):
os.lseek(fp.fileno(), 0, os.SEEK_SET)
v = int(os.read(fp.fileno(), 1))
if v != self.states[gpio]:
self.states[gpio] = v
yield gpio, v
class PollingPulseCounter(BasePulseCounter):
def __init__(self):
self.gpiomap = {}
def register(self, path, gpio):
path = os.path.realpath(path)
fp = open(os.path.join(path, 'value'), 'rb')
level = int(fp.read())
self.gpiomap[gpio] = [fp, level]
return level
def unregister(self, gpio):
del self.gpiomap[gpio]
def registered(self, gpio):
return gpio in self.gpiomap
def __call__(self):
from itertools import cycle
from time import sleep
while True:
for gpio, (fp, level) in list(self.gpiomap.items()):
fp.seek(0, os.SEEK_SET)
v = int(fp.read())
if v != level:
self.gpiomap[gpio][1] = v
yield gpio, v
sleep(1)
class HandlerMaker(type):
""" Meta-class for keeping track of all extended classes. """
def __init__(cls, name, bases, attrs):
if not hasattr(cls, 'handlers'):
cls.handlers = {}
else:
cls.handlers[cls.type_id] = cls
class PinHandler(object, metaclass=HandlerMaker):
product_id = 0xFFFF
_product_name = 'Generic GPIO'
dbus_name = "digital"
def __init__(self, bus, base, path, gpio, settings):
self.gpio = gpio
self.path = path
self.bus = bus
self.settings = settings
self._level = 0 # Remember last state
self.service = VeDbusService(
"{}.{}.input{:02d}".format(base, self.dbus_name, gpio), bus=bus)
# Add objects required by ve-api
self.service.add_path('/Mgmt/ProcessName', __file__)
self.service.add_path('/Mgmt/ProcessVersion', VERSION)
self.service.add_path('/Mgmt/Connection', path)
self.service.add_path('/DeviceInstance', gpio)
self.service.add_path('/ProductId', self.product_id)
self.service.add_path('/ProductName', self.product_name)
self.service.add_path('/Connected', 1)
# Custom name setting
def _change_name(p, v):
# This should fire a change event that will update product_name
# below.
settings['name'] = v
return True
self.service.add_path('/CustomName', settings['name'], writeable=True,
onchangecallback=_change_name)
# We'll count the pulses for all types of services
self.service.add_path('/Count', value=settings['count'])
@property
def product_name(self):
return self.settings['name'] or self._product_name
@product_name.setter
def product_name(self, v):
# Some pin types don't have an associated service (Disabled pins for
# example)
if self.service is not None:
self.service['/ProductName'] = v or self._product_name
def deactivate(self):
self.save_count()
self.service.__del__()
del self.service
self.service = None
@property
def level(self):
return self._level
@level.setter
def level(self, l):
self._level = int(bool(l))
def toggle(self, level):
# Only increment Count on rising edge.
if level and level != self._level:
self.service['/Count'] = (self.service['/Count']+1) % MAXCOUNT
self._level = level
def refresh(self):
""" Toggle state to last remembered state. This is called if settings
are changed so the Service can recalculate paths. """
self.toggle(self._level)
def save_count(self):
if self.service is not None:
self.settings['count'] = self.count
@property
def active(self):
return self.service is not None
@property
def count(self):
return self.service['/Count']
@count.setter
def count(self, v):
self.service['/Count'] = v
@classmethod
def createHandler(cls, _type, *args, **kwargs):
if _type in cls.handlers:
return cls.handlers[_type](*args, **kwargs)
return None
class DisabledPin(PinHandler):
""" Place holder for a disabled pin. """
_product_name = 'Disabled'
type_id = 0
def __init__(self, bus, base, path, gpio, settings):
self.service = None
self.bus = bus
self.settings = settings
self._level = 0 # Remember last state
def deactivate(self):
pass
def toggle(self, level):
self._level = level
def save_count(self):
# Do nothing
pass
@property
def count(self):
return self.settings['count']
@count.setter
def count(self, v):
pass
def refresh(self):
pass
class VolumeCounter(PinHandler):
product_id = 0xA165
_product_name = "Generic pulse meter"
dbus_name = "pulsemeter"
type_id = 1
def __init__(self, bus, base, path, gpio, settings):
super(VolumeCounter, self).__init__(bus, base, path, gpio, settings)
self.service.add_path('/Aggregate', value=self.count*self.rate,
gettextcallback=lambda p, v: (str(v) + ' cubic meter'))
@property
def rate(self):
return self.settings['rate']
def toggle(self, level):
super(VolumeCounter, self).toggle(level)
self.service['/Aggregate'] = self.count * self.rate
class PinAlarm(PinHandler):
product_id = 0xA166
_product_name = "Generic digital input"
dbus_name = "digitalinput"
type_id = 0xFF
translation = 0 # low, high
def __init__(self, bus, base, path, gpio, settings):
super(PinAlarm, self).__init__(bus, base, path, gpio, settings)
self.service.add_path('/InputState', value=0)
self.service.add_path('/State', value=self.get_state(0),
gettextcallback=lambda p, v: TRANSLATIONS[v//2][v%2])
self.service.add_path('/Alarm', value=self.get_alarm_state(0))
# Also expose the type
self.service.add_path('/Type', value=self.type_id,
gettextcallback=lambda p, v: INPUTTYPES[v])
def toggle(self, level):
super(PinAlarm, self).toggle(level)
self.service['/InputState'] = bool(level)*1
self.service['/State'] = self.get_state(level)
# Ensure that the alarm flag resets if the /AlarmSetting config option
# disappears.
self.service['/Alarm'] = self.get_alarm_state(level)
def get_state(self, level):
state = level ^ self.settings['invert']
return 2 * self.translation + state
def get_alarm_state(self, level):
return 2 * bool(
(level ^ self.settings['invertalarm']) and self.settings['alarm'])
# Various types of things we might want to monitor
class DoorSensor(PinAlarm):
_product_name = "Door alarm"
type_id = 2
translation = 3 # open, closed
class BilgePump(PinAlarm):
_product_name = "Bilge pump"
type_id = 3
translation = 1 # off, on
class BilgeAlarm(PinAlarm):
_product_name = "Bilge alarm"
type_id = 4
translation = 4 # ok, alarm
class BurglarAlarm(PinAlarm):
_product_name = "Burglar alarm"
type_id = 5
translation = 4 # ok, alarm
class SmokeAlarm(PinAlarm):
_product_name = "Smoke alarm"
type_id = 6
translation = 4 # ok, alarm
class FireAlarm(PinAlarm):
_product_name = "Fire alarm"
type_id = 7
translation = 4 # ok, alarm
class CO2Alarm(PinAlarm):
_product_name = "CO2 alarm"
type_id = 8
translation = 4 # ok, alarm
class Generator(PinAlarm):
_product_name = "Generator"
type_id = 9
translation = 5 # running, stopped
def dbusconnection():
return SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else SystemBus()
def main():
parser = ArgumentParser(description=sys.argv[0])
parser.add_argument('--servicebase',
help='Base service name on dbus, default is com.victronenergy',
default='com.victronenergy')
parser.add_argument('--poll',
help='Use a different kind of polling. Options are epoll, dumb and debug',
default='epoll')
parser.add_argument('inputs', nargs='+', help='Path to digital input')
args = parser.parse_args()
PulseCounter = {
'debug': DebugPulseCounter,
'poll': PollingPulseCounter,
}.get(args.poll, EpollPulseCounter)
DBusGMainLoop(set_as_default=True)
# Keep track of enabled services
services = {}
inputs = dict(enumerate(args.inputs, 1))
pulses = PulseCounter() # callable that iterates over pulses
def register_gpio(path, gpio, bus, settings):
_type = settings['inputtype']
print ("Registering GPIO {} for type {}".format(gpio, _type))
handler = PinHandler.createHandler(_type,
bus, args.servicebase, path, gpio, settings)
services[gpio] = handler
# Only monitor if enabled
if _type > 0:
handler.level = pulses.register(path, gpio)
handler.refresh()
def unregister_gpio(gpio):
print ("unRegistering GPIO {}".format(gpio))
pulses.unregister(gpio)
services[gpio].deactivate()
def handle_setting_change(inp, setting, old, new):
if setting == 'inputtype':
if new:
# Get current bus and settings objects, to be reused
service = services[inp]
bus, settings = service.bus, service.settings
# Input enabled. If already enabled, unregister the old one first.
if pulses.registered(inp):
unregister_gpio(inp)
# Before registering the new input, reset its settings to defaults
settings['count'] = 0
settings['invert'] = 0
settings['invertalarm'] = 0
settings['alarm'] = 0
# Register it
register_gpio(inputs[inp], inp, bus, settings)
elif old:
# Input disabled
unregister_gpio(inp)
elif setting in ('rate', 'invert', 'alarm', 'invertalarm'):
services[inp].refresh()
elif setting == 'name':
services[inp].product_name = new
elif setting == 'count':
# Don't want this triggered on a period save, so only execute
# if it has changed.
v = int(new)
s = services[inp]
if s.count != v:
s.count = v
s.refresh()
for inp, pth in inputs.items():
supported_settings = {
'inputtype': ['/Settings/DigitalInput/{}/Type'.format(inp), 0, 0, len(INPUTTYPES)],
'rate': ['/Settings/DigitalInput/{}/Multiplier'.format(inp), 0.001, 0, 1.0],
'count': ['/Settings/DigitalInput/{}/Count'.format(inp), 0, 0, MAXCOUNT, 1],
'invert': ['/Settings/DigitalInput/{}/InvertTranslation'.format(inp), 0, 0, 1],
'invertalarm': ['/Settings/DigitalInput/{}/InvertAlarm'.format(inp), 0, 0, 1],
'alarm': ['/Settings/DigitalInput/{}/AlarmSetting'.format(inp), 0, 0, 1],
'name': ['/Settings/DigitalInput/{}/CustomName'.format(inp), '', '', ''],
}
bus = dbusconnection()
sd = SettingsDevice(bus, supported_settings, partial(handle_setting_change, inp), timeout=10)
register_gpio(pth, inp, bus, sd)
def poll(mainloop):
from time import time
idx = 0
try:
for inp, level in pulses():
# epoll object only resyncs once a second. We may receive
# a pulse for something that's been deregistered.
try:
services[inp].toggle(level)
except KeyError:
continue
except:
traceback.print_exc()
mainloop.quit()
# Need to run the gpio polling in separate thread. Pass in the mainloop so
# the thread can kill us if there is an exception.
mainloop = GLib.MainLoop()
poller = Thread(target=lambda: poll(mainloop))
poller.daemon = True
poller.start()
# Periodically save the counter
def save_counters():
for inp in inputs:
services[inp].save_count()
return True
GLib.timeout_add(SAVEINTERVAL, save_counters)
# Save counter on shutdown
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
mainloop.run()
except KeyboardInterrupt:
pass
finally:
save_counters()
if __name__ == "__main__":
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
db_lib.py | import os
import time
import six
import uuid
import amostra.client.commands as acc
import conftrak.client.commands as ccc
from analysisstore.client.commands import AnalysisClient
import conftrak.exceptions
import logging
logger = logging.getLogger(__name__)
#12/19 - Skinner inherited this from Hugo, who inherited it from Matt. Arman wrote the underlying DB and left BNL in 2018.
# TODO: get the beamline_id from parameter
BEAMLINE_ID = '17ID1'
sample_ref = None
container_ref = None
request_ref = None
configuration_ref = None
mds_ref = None
analysis_ref = None
main_server = os.environ['MONGODB_HOST']
services_config = {
'amostra': {'host': main_server, 'port': '7770'},
'conftrak': {'host': main_server, 'port': '7771'},
'metadataservice': {'host': main_server, 'port': '7772'},
'analysisstore': {'host': main_server, 'port': '7773'}
}
def db_connect(params=services_config):
global sample_ref,container_ref,request_ref,configuration_ref,analysis_ref
"""
recommended idiom:
"""
sample_ref = acc.SampleReference(**params['amostra'])
container_ref = acc.ContainerReference(**params['amostra'])
request_ref = acc.RequestReference(**params['amostra'])
configuration_ref = ccc.ConfigurationReference(**services_config['conftrak'])
analysis_ref = AnalysisClient(services_config['analysisstore'])
logger.info(analysis_ref)
# should be in config :(
primaryDewarName = 'primaryDewarJohn'
#connect on import
db_connect()
def setCurrentUser(beamline,userName): #for now username, although these should be unique
setBeamlineConfigParam(beamline,"user",userName)
def getCurrentUser(beamline): #for now username, although these should be unique
return getBeamlineConfigParam(beamline,"user")
def setPrimaryDewarName(dewarName):
global primaryDewarName
primaryDewarName = dewarName
def searchBeamline(**kwargs):
try:
return list(configuration_ref.find(key="beamline", **kwargs))
except StopIteration:
return None
def getBeamlineByNumber(num):
"""eg. 17id1, 17id2, 16id1"""
try:
return list(configuration_ref.find(key="beamline", number=num))
except StopIteration:
return None
def createContainer(name, capacity, owner, kind, **kwargs): #16_pin_puck, automounterDewar, shippingDewar
"""
container_name: string, name for the new container, required
kwargs: passed to constructor
"""
if capacity is not None:
kwargs['content'] = [""]*capacity
uid = container_ref.create(name=name, owner=owner, kind=kind, **kwargs)
return uid
def updateContainer(cont_info): #really updating the contents
cont = cont_info['uid']
q = {'uid': cont_info.pop('uid', '')}
cont_info.pop('time', '')
container_ref.update(q, {'content':cont_info['content']})
return cont
def createSample(sample_name, owner, kind, proposalID=None, **kwargs):
"""
sample_name: string, name for the new sample, required
kwargs: passed to constructor
"""
# initialize request count to zero
if 'request_count' not in kwargs:
kwargs['request_count'] = 0
uid = sample_ref.create(name=sample_name, owner=owner,kind=kind,proposalID=proposalID,**kwargs)
return uid
def incrementSampleRequestCount(sample_id):
"""
increment the 'request_count' attribute of the specified sample by 1
"""
# potential for race here?
#skinner - I don't understand this line sample_ref.update(query={'uid': sample_id}, update={'$inc': {'request_count': 1}})
reqCount = getSampleRequestCount(sample_id)+1
sample_ref.update({'uid': sample_id},{'request_count':reqCount})
return getSampleRequestCount(sample_id)
def getSampleRequestCount(sample_id):
"""
get the 'request_count' attribute of the specified sample
"""
s = getSampleByID(sample_id)
return s['request_count']
def getRequestsBySampleID(sample_id, active_only=True):
"""
return a list of request dictionaries for the given sample_id
"""
params = {'sample': sample_id}
if active_only:
params['state'] = "active"
reqs = list(request_ref.find(**params))
return reqs
def getSampleByID(sample_id):
"""
sample_id: required, integer
"""
s = list(sample_ref.find(uid=sample_id))
if (s):
return s[0]
else:
return {}
def getSampleNamebyID(sample_id):
"""
sample_id: required, integer
"""
s = getSampleByID(sample_id)
if (s==None):
return ''
else:
return s['name']
def getSamplesbyOwner(owner): #skinner
s = sample_ref.find(owner=owner)
return [samp['uid'] for samp in s]
def getSampleIDbyName(sampleName,owner):
"""
sample_id: required, integer
"""
samples = list(sample_ref.find(owner=owner,name=sampleName))
if (samples != []):
return samples[0]["uid"]
else:
return ""
def getContainerIDbyName(container_name,owner):
containers = list(container_ref.find(owner=owner,name=container_name))
if (containers != []):
return containers[0]["uid"]
else:
return ""
def getContainerNameByID(container_id):
"""
container_id: required, integer
"""
c = list(container_ref.find(uid=container_id))
return c[0]['name']
def createResult(result_type, owner,request_id=None, sample_id=None, result_obj=None, proposalID=None,
**kwargs):
"""
result_type: string
request_id: int
sample_id: int
result_obj: dict to attach
"""
header = analysis_ref.insert_analysis_header(result_type=result_type,owner=owner, uid=str(uuid.uuid4()),
sample=sample_id, request=request_id,
provenance={'lsdc':1}, result_obj=result_obj,proposalID=proposalID,time=time.time(),**kwargs)
logger.info("uuid of result inserted into analysisstore: %s" % header)
return header
def getResult(result_id):
"""
result_id: required, int
"""
header = list(analysis_ref.find_analysis_header(uid=result_id))
return header[0]
def getResultsforRequest(request_id):
"""
Takes an integer request_id and returns a list of matching results or [].
"""
resultGen = analysis_ref.find_analysis_header(request=request_id)
if (resultGen != None):
headers = list(resultGen)
return headers
else:
return []
def getResultsforSample(sample_id):
"""
Takes a sample_id and returns it's resultsList or [].
"""
headers = list(analysis_ref.find_analysis_header(sample=sample_id))
return headers
def getRequestByID(request_id, active_only=True):
"""
return a list of request dictionaries for the given request_id
"""
params = {'uid': request_id}
if active_only:
params['state'] = "active"
req = list(request_ref.find(**params))[0]
return req
def addResultforRequest(result_type, request_id, owner,result_obj=None, **kwargs):
"""
like createResult, but also adds it to the resultList of result['sample_id']
"""
sample = getRequestByID(request_id)['sample']
r = createResult(owner=owner,result_type=result_type, request_id=request_id, sample_id=sample, result_obj=result_obj, **kwargs)
return r
def addResulttoSample(result_type, sample_id, owner,result_obj=None, as_mongo_obj=False, proposalID=None,**kwargs):
"""
like addResulttoRequest, but without a request
"""
r = createResult(owner=owner,result_type=result_type, request_id=None, sample_id=sample_id, result_obj=result_obj, proposalID=proposalID,**kwargs)
return r
def addResulttoBL(result_type, beamline_id, owner,result_obj=None, proposalID=None,**kwargs):
"""
add result to beamline
beamline_id: the integer, 'beamline_id' field of the beamline entry
other fields are as for createRequest
"""
r = createResult(owner=owner,result_type=result_type, request_id=None, sample_id=None, result_obj=result_obj, beamline_id=beamline_id, proposalID=proposalID,**kwargs)
return r
def getResultsforBL(id=None, name=None, number=None):
"""
Retrieve results using either BL id, name, or number (tried in that order)
Returns a generator of results
"""
if id is None:
if name is None:
key = 'number'
val = number
else:
key = 'name'
val = name
query = {key: val}
b = searchBeamline(**query)
if b is None:
yield None
raise StopIteration
id = b['uid']
if id is None:
yield None
raise StopIteration
results = list(analysis_ref.find_analysis_header(beamline_id=id))
for r in results:
yield r
def addFile(data=None, filename=None):
"""
Put the file data into the GenericFile collection,
return the _id for use as an id or ReferenceField.
If a filename kwarg is given, read data from the file.
If a data kwarg is given or data is the 1st arg, store the data.
If both or neither is given, raise an error.
"""
#TODO: Decide what to do with this method
raise NotImplemented
'''
if filename is not None:
if data is not None:
raise ValueError('both filename and data kwargs given. can only use one.')
else:
with open(filename, 'r') as file: # do we need 'b' for binary?
data = file.read() # is this blocking? might not always get everything at once?!
elif data is None:
raise ValueError('neither filename or data kwargs given. need one.')
f = GenericFile(data=data)
f.save()
f.reload() # to fetch generated id
return f.to_dbref()
'''
def getFile(_id):
"""
Retrieve the data from the GenericFile collection
for the given _id or db_ref
Returns the data in Binary. If you know it's a txt file and want a string,
convert with str()
Maybe this will be automatically deref'd most of the time?
Only if they're mongoengine ReferenceFields...
"""
#TODO: Decide what to do with this method
raise NotImplemented
'''
try:
_id = _id.id
except AttributeError:
pass
f = GenericFile.objects(__raw__={'_id': _id}) # yes it's '_id' here but just 'id' below, gofigure
return _try0_dict_key(f, 'file', 'id', _id, None,
dict_key='data')
'''
def createRequest(request_type, owner, request_obj=None, as_mongo_obj=False, proposalID=None, **kwargs):
"""
request_type: required, name (string) of request type, dbref to it's db entry, or a Type object
request_obj: optional, stored as is, could be a dict of collection parameters, or whatever
priority: optional, integer priority level
anything else (priority, sample_id) can either be embedded in the
request_object or passed in as keyword args to get saved at the
top level.
"""
kwargs['request_type'] = request_type
kwargs['request_obj'] = request_obj
kwargs['owner'] = owner
kwargs['proposalID']=proposalID
uid = request_ref.create(**kwargs)
return uid
def addRequesttoSample(sample_id, request_type, owner,request_obj=None, as_mongo_obj=False, proposalID=None,**kwargs):
"""
sample_id: required, integer sample id
request_type: required, name (string) of request type, dbref to it's db entry, or a Type object
request_obj: optional, stored as is, could be a dict of collection parameters, or whatever
anything else (priority, sample_id) can either be embedded in the
request_object or passed in as keyword args to get saved at the
top level.
"""
kwargs['sample'] = sample_id
s = time.time()
r = createRequest(request_type, owner, request_obj=request_obj, as_mongo_obj=True, proposalID=proposalID,**kwargs)
t = time.time()-s
logger.info("add req = " + str(t))
return r
def insertIntoContainer(container_name, owner, position, itemID):
c = getContainerByName(container_name,owner)
if c is not None:
cnt = c['content']
cnt[position - 1] = itemID # most people don't zero index things
c['content'] = cnt
updateContainer(c)
return True
else:
logger.error("bad container name %s" % container_name)
return False
def emptyContainer(uid):
c = getContainerByID(uid)
if c is not None:
cnt = c['content']
for i in range (len(cnt)):
cnt[i] = ''
c['content'] = cnt
updateContainer(c)
return True
else:
logger.error("container not found")
return False
def getContainers(filters=None):
"""get *all* containers"""
if filters is not None:
c = list(container_ref.find(**filters)) #skinner - seems to break on compound filter
else:
c = list(container_ref.find())
return c
def getContainersByType(type_name, owner):
#TODO: group_name was not being used kept for compatibility
return getContainers(filters={"kind": type_name,"owner":owner})
def getAllPucks(owner): #shouldn't this be for owner?
# find all the types desended from 'puck'?
# and then we could do this?
return getContainersByType("16_pin_puck", owner)
def getPrimaryDewar(beamline):
"""
returns the mongo object for a container with a name matching
the global variable 'primaryDewarName'
"""
return getContainerByName(primaryDewarName,beamline)
def getContainerByName(container_name,owner):
c = getContainers(filters={'name': container_name,'owner':owner})[0] #skinner, this should return only one, not a list
return c
def getContainerByID(container_id):
c = getContainers(filters={'uid': container_id})[0]
return c
def getQueue(beamlineName):
"""
returns a list of request dicts for all the samples in the container
named by the global variable 'primaryDewarName'
"""
# seems like this would be alot simpler if it weren't for the Nones?
ret_list = []
# try to only retrieve what we need...
# Use .first() instead of [0] here because when the query returns nothing,
# .first() returns None while [0] generates an IndexError
# Nah... [0] is faster and catch Exception...
DewarItems = []
try:
DewarItems = getPrimaryDewar(beamlineName)['content']
except IndexError as AttributeError:
raise ValueError('could not find container: "{0}"!'.format(primaryDewarName))
items = []
for item in DewarItems:
if (item != ""):
items.append(item)
sample_list = []
contents = [getContainerByID(uid)['content'] for uid in items]
for samp in contents:
if (samp != ""):
sample_list += samp
for s in sample_list:
reqs = getRequestsBySampleID(s, active_only=True)
for request in reqs:
yield request
def getQueueUnorderedObsolete(beamlineName):
"""
returns a list of request dicts for all the samples in the container
named by the global variable 'primaryDewarName'
"""
# seems like this would be alot simpler if it weren't for the Nones?
ret_list = []
# try to only retrieve what we need...
# Use .first() instead of [0] here because when the query returns nothing,
# .first() returns None while [0] generates an IndexError
# Nah... [0] is faster and catch Exception...
try:
items = getPrimaryDewar(beamlineName)['content']
except IndexError as AttributeError:
raise ValueError('could not find container: "{0}"!'.format(primaryDewarName))
items = set(items)
items.discard("") # skip empty positions
sample_list = []
contents = [getContainerByID(uid)['content'] for uid in items]
for samp in contents:
sil = set(samp)
sil.discard("")
sample_list += sil
for s in sample_list:
reqs = getRequestsBySampleID(s, active_only=True)
for request in reqs:
yield request
def queueDone(beamlineName):
ql = list(getQueue(beamlineName))
for i in range (0,len(ql)):
if (ql[i]['priority'] > 0):
return 0
return 1
def getCoordsfromSampleID(beamline,sample_id):
"""
returns the container position within the dewar and position in
that container for a sample with the given id in one of the
containers in the container named by the global variable
'primaryDewarName'
"""
try:
primary_dewar_item_list = getPrimaryDewar(beamline)['content']
except IndexError as AttributeError:
raise ValueError('could not find container: "{0}"!'.format(primaryDewarName))
#john try:
# eliminate empty item_list slots
pdil_set = set(primary_dewar_item_list)
pdil_ssample_id = pdil_set.discard("")
# find container in the primary_dewar_item_list (pdil) which has the sample
filters = {'$and': [{'uid': {'$in':list(pdil_set)}}, {'content': {'$in':[sample_id]}}]}
c = getContainers(filters=filters)
# get the index of the found container in the primary dewar
i = primary_dewar_item_list.index(c[0]['uid'])
# get the index of the sample in the found container item_list
j = c[0]['content'].index(sample_id)
# get the container_id of the found container
puck_id = c[0]['uid']
return (i, j, puck_id)
def popNextRequest(beamlineName):
"""
this just gives you the next one, it doesn't
actually pop it off the stack
"""
orderedRequests = getOrderedRequestList(beamlineName)
try:
if (orderedRequests[0]["priority"] != 99999):
if orderedRequests[0]["priority"] > 0:
return orderedRequests[0]
else: #99999 priority means it's running, try next
if orderedRequests[1]["priority"] > 0:
return orderedRequests[1]
except IndexError:
pass
return {}
def getRequestObsolete(reqID): # need to get this from searching the dewar I guess
#skinner - no idea reqID = int(reqID)
"""
request_id: required, integer id
"""
r = getRequestByID(reqID)
return r
def updateRequest(request_dict):
"""
This is not recommended once results are recorded for a request!
Using a new request instead would keep the apparent history
complete and intuitive. Although it won't hurt anything if you've
also recorded the request params used inside the results and query
against that, making requests basically ephemerally useful objects.
"""
if 'uid' in request_dict:
r_uid = request_dict.pop('uid', '')
s_time = request_dict.pop('time', '')
r = request_ref.update({'uid':r_uid},request_dict)
request_dict["uid"] = r_uid
request_dict["time"] = s_time
def deleteRequest(r_id):
"""
reqObj should be a dictionary with a 'uid' field
and optionally a 'sample_uid' field.
"""
r = getRequestByID(r_id)
r['state'] = "inactive"
updateRequest(r)
def updateSample(sampleObj):
if 'uid' in sampleObj:
s_uid = sampleObj.pop('uid','')
s_time = sampleObj.pop('time','')
s = sample_ref.update({'uid': s_uid}, sampleObj)
def deleteSample(sample_uid):
s = getSampleByID(sample_uid)
s['state'] = "active"
updateSample(s)
def removePuckFromDewar(beamline,dewarPos):
dewar = getPrimaryDewar(beamline)
dewar['content'][dewarPos] = ''
updateContainer(dewar)
def updatePriority(request_id, priority):
r = getRequestByID(request_id)
r['priority'] = priority
updateRequest(r)
def getPriorityMap(beamlineName):
"""
returns a dictionary with priorities as keys and lists of requests
having those priorities as values
"""
priority_map = {}
for request in getQueue(beamlineName):
try:
priority_map[request['priority']].append(request)
except KeyError:
priority_map[request['priority']] = [request]
return priority_map
def getOrderedRequestList(beamlineName):
"""
returns a list of requests sorted by priority
"""
orderedRequestsList = []
priority_map = getPriorityMap(beamlineName)
for priority in sorted(six.iterkeys(priority_map), reverse=True):
orderedRequestsList += priority_map[priority]
#for request in priority_map[priority]:
# yield request
# or if we want this to be a generator could it be more efficient
# with itertools.chain?
# foo=['abc','def','ghi']
# [a for a in itertools.chain(*foo)]
# ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
# or [a for a in itertools.chain.from_iterable(foo)]
return orderedRequestsList
def createBeamline(bl_name, bl_num): #createBeamline("fmx", "17id1")
data = {"key": "beamline", "name": bl_name, "number": bl_num}
uid = configuration_ref.create(beamline_id=bl_num, **data)
return uid
def beamlineInfo(beamline_id, info_name, info_dict=None):
"""
to write info: beamlineInfo('x25', 'det', info_dict={'vendor':'adsc','model':'q315r'})
to fetch info: info = beamlineInfo('x25', 'det')
"""
# if it exists it's a query or update
try:
bli = list(configuration_ref.find(key='beamline_info', beamline_id=beamline_id, info_name=info_name))[0] #hugo put the [0]
if info_dict is None: # this is a query
return bli['info']
# else it's an update
bli_uid = bli.pop('uid', '')
configuration_ref.update({'uid': bli_uid},{'info':info_dict})
# else it's a create
except conftrak.exceptions.ConfTrakNotFoundException:
# edge case for 1st create in fresh database
# in which case this as actually a query
if info_dict is None:
return {}
# normal create
data = {'key': 'beamline_info', 'info_name':info_name, 'info': info_dict}
uid = configuration_ref.create(beamline_id,**data)
def setBeamlineConfigParams(paramDict, searchParams):
# get current config
beamlineConfig = beamlineInfo(**searchParams)
# update with given param dict and last_modified
paramDict['last_modified'] = time.time()
beamlineConfig.update(paramDict)
# save
beamlineInfo(info_dict=beamlineConfig, **searchParams)
def setBeamlineConfigParam(beamline_id, paramName, paramVal):
beamlineInfo(beamline_id,paramName,{"val":paramVal})
def getBeamlineConfigParam(beamline_id, paramName):
return beamlineInfo(beamline_id,paramName)["val"]
def getAllBeamlineConfigParams(beamline_id):
g = configuration_ref.find(key='beamline_info', beamline_id=beamline_id)
configList = list(g)
return configList
def printAllBeamlineConfigParams(beamline_id):
configList = getAllBeamlineConfigParams(beamline_id)
for i in range (0,len(configList)):
try:
logger.info(configList[i]['info_name'] + " " + str(configList[i]['info']['val']))
except KeyError:
pass
def deleteCompletedRequestsforSample(sid):
return #short circuit, not what they wanted
logger.info("delete request " + sid)
requestList=getRequestsBySampleID(sid)
for i in range (0,len(requestList)):
if (requestList[i]["priority"] == -1): #good to clean up completed requests after unmount
if (requestList[i]["protocol"] == "raster" or requestList[i]["protocol"] == "vector"):
deleteRequest(requestList[i]['uid'])
| []
| []
| [
"MONGODB_HOST"
]
| [] | ["MONGODB_HOST"] | python | 1 | 0 | |
tools/python/runfiles/runfiles.py | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runfiles lookup library for Bazel-built Python binaries and tests.
USAGE:
1. Depend on this runfiles library from your build rule:
py_binary(
name = "my_binary",
...
deps = ["@rules_python//python/runfiles"],
)
2. Import the runfiles library.
from rules_python.python.runfiles import runfiles
3. Create a Runfiles object and use rlocation to look up runfile paths:
r = runfiles.Create()
...
with open(r.Rlocation("my_workspace/path/to/my/data.txt"), "r") as f:
contents = f.readlines()
...
The code above creates a manifest- or directory-based implementations based
on the environment variables in os.environ. See `Create()` for more info.
If you want to explicitly create a manifest- or directory-based
implementations, you can do so as follows:
r1 = runfiles.CreateManifestBased("path/to/foo.runfiles_manifest")
r2 = runfiles.CreateDirectoryBased("path/to/foo.runfiles/")
If you want to start subprocesses that also need runfiles, you need to set
the right environment variables for them:
import subprocess
from rules_python.python.runfiles import runfiles
r = runfiles.Create()
env = {}
...
env.update(r.EnvVars())
p = subprocess.Popen([r.Rlocation("path/to/binary")], env, ...)
"""
import os
import posixpath
def CreateManifestBased(manifest_path):
return _Runfiles(_ManifestBased(manifest_path))
def CreateDirectoryBased(runfiles_dir_path):
return _Runfiles(_DirectoryBased(runfiles_dir_path))
def Create(env=None):
"""Returns a new `Runfiles` instance.
The returned object is either:
- manifest-based, meaning it looks up runfile paths from a manifest file, or
- directory-based, meaning it looks up runfile paths under a given directory
path
If `env` contains "RUNFILES_MANIFEST_FILE" with non-empty value, this method
returns a manifest-based implementation. The object eagerly reads and caches
the whole manifest file upon instantiation; this may be relevant for
performance consideration.
Otherwise, if `env` contains "RUNFILES_DIR" with non-empty value (checked in
this priority order), this method returns a directory-based implementation.
If neither cases apply, this method returns null.
Args:
env: {string: string}; optional; the map of environment variables. If None,
this function uses the environment variable map of this process.
Raises:
IOError: if some IO error occurs.
"""
env_map = os.environ if env is None else env
manifest = env_map.get("RUNFILES_MANIFEST_FILE")
if manifest:
return CreateManifestBased(manifest)
directory = env_map.get("RUNFILES_DIR")
if directory:
return CreateDirectoryBased(directory)
return None
class _Runfiles(object):
"""Returns the runtime location of runfiles.
Runfiles are data-dependencies of Bazel-built binaries and tests.
"""
def __init__(self, strategy):
self._strategy = strategy
def Rlocation(self, path):
"""Returns the runtime path of a runfile.
Runfiles are data-dependencies of Bazel-built binaries and tests.
The returned path may not be valid. The caller should check the path's
validity and that the path exists.
The function may return None. In that case the caller can be sure that the
rule does not know about this data-dependency.
Args:
path: string; runfiles-root-relative path of the runfile
Returns:
the path to the runfile, which the caller should check for existence, or
None if the method doesn't know about this runfile
Raises:
TypeError: if `path` is not a string
ValueError: if `path` is None or empty, or it's absolute or not normalized
"""
if not path:
raise ValueError()
if not isinstance(path, str):
raise TypeError()
if (path.startswith("../") or "/.." in path or path.startswith("./") or
"/./" in path or path.endswith("/.") or "//" in path):
raise ValueError("path is not normalized: \"%s\"" % path)
if path[0] == "\\":
raise ValueError("path is absolute without a drive letter: \"%s\"" % path)
if os.path.isabs(path):
return path
return self._strategy.RlocationChecked(path)
def EnvVars(self):
"""Returns environment variables for subprocesses.
The caller should set the returned key-value pairs in the environment of
subprocesses in case those subprocesses are also Bazel-built binaries that
need to use runfiles.
Returns:
{string: string}; a dict; keys are environment variable names, values are
the values for these environment variables
"""
return self._strategy.EnvVars()
class _ManifestBased(object):
"""`Runfiles` strategy that parses a runfiles-manifest to look up runfiles."""
def __init__(self, path):
if not path:
raise ValueError()
if not isinstance(path, str):
raise TypeError()
self._path = path
self._runfiles = _ManifestBased._LoadRunfiles(path)
def RlocationChecked(self, path):
"""Returns the runtime path of a runfile."""
exact_match = self._runfiles.get(path)
if exact_match:
return exact_match
# If path references a runfile that lies under a directory that itself is a
# runfile, then only the directory is listed in the manifest. Look up all
# prefixes of path in the manifest and append the relative path from the
# prefix to the looked up path.
prefix_end = len(path)
while True:
prefix_end = path.rfind("/", 0, prefix_end - 1)
if prefix_end == -1:
return None
prefix_match = self._runfiles.get(path[0:prefix_end])
if prefix_match:
return prefix_match + "/" + path[prefix_end + 1:]
@staticmethod
def _LoadRunfiles(path):
"""Loads the runfiles manifest."""
result = {}
with open(path, "r") as f:
for line in f:
line = line.strip()
if line:
tokens = line.split(" ", 1)
if len(tokens) == 1:
result[line] = line
else:
result[tokens[0]] = tokens[1]
return result
def _GetRunfilesDir(self):
if self._path.endswith("/MANIFEST") or self._path.endswith("\\MANIFEST"):
return self._path[:-len("/MANIFEST")]
elif self._path.endswith(".runfiles_manifest"):
return self._path[:-len("_manifest")]
else:
return ""
def EnvVars(self):
directory = self._GetRunfilesDir()
return {
"RUNFILES_MANIFEST_FILE": self._path,
"RUNFILES_DIR": directory,
# TODO(laszlocsomor): remove JAVA_RUNFILES once the Java launcher can
# pick up RUNFILES_DIR.
"JAVA_RUNFILES": directory,
}
class _DirectoryBased(object):
"""`Runfiles` strategy that appends runfiles paths to the runfiles root."""
def __init__(self, path):
if not path:
raise ValueError()
if not isinstance(path, str):
raise TypeError()
self._runfiles_root = path
def RlocationChecked(self, path):
# Use posixpath instead of os.path, because Bazel only creates a runfiles
# tree on Unix platforms, so `Create()` will only create a directory-based
# runfiles strategy on those platforms.
return posixpath.join(self._runfiles_root, path)
def EnvVars(self):
return {
"RUNFILES_DIR": self._runfiles_root,
# TODO(laszlocsomor): remove JAVA_RUNFILES once the Java launcher can
# pick up RUNFILES_DIR.
"JAVA_RUNFILES": self._runfiles_root,
}
def _PathsFrom(argv0, runfiles_mf, runfiles_dir, is_runfiles_manifest,
is_runfiles_directory):
"""Discover runfiles manifest and runfiles directory paths.
Args:
argv0: string; the value of sys.argv[0]
runfiles_mf: string; the value of the RUNFILES_MANIFEST_FILE environment
variable
runfiles_dir: string; the value of the RUNFILES_DIR environment variable
is_runfiles_manifest: lambda(string):bool; returns true if the argument is
the path of a runfiles manifest file
is_runfiles_directory: lambda(string):bool; returns true if the argument is
the path of a runfiles directory
Returns:
(string, string) pair, first element is the path to the runfiles manifest,
second element is the path to the runfiles directory. If the first element
is non-empty, then is_runfiles_manifest returns true for it. Same goes for
the second element and is_runfiles_directory respectively. If both elements
are empty, then this function could not find a manifest or directory for
which is_runfiles_manifest or is_runfiles_directory returns true.
"""
mf_alid = is_runfiles_manifest(runfiles_mf)
dir_valid = is_runfiles_directory(runfiles_dir)
if not mf_alid and not dir_valid:
runfiles_mf = argv0 + ".runfiles/MANIFEST"
runfiles_dir = argv0 + ".runfiles"
mf_alid = is_runfiles_manifest(runfiles_mf)
dir_valid = is_runfiles_directory(runfiles_dir)
if not mf_alid:
runfiles_mf = argv0 + ".runfiles_manifest"
mf_alid = is_runfiles_manifest(runfiles_mf)
if not mf_alid and not dir_valid:
return ("", "")
if not mf_alid:
runfiles_mf = runfiles_dir + "/MANIFEST"
mf_alid = is_runfiles_manifest(runfiles_mf)
if not mf_alid:
runfiles_mf = runfiles_dir + "_manifest"
mf_alid = is_runfiles_manifest(runfiles_mf)
if not dir_valid:
runfiles_dir = runfiles_mf[:-9] # "_manifest" or "/MANIFEST"
dir_valid = is_runfiles_directory(runfiles_dir)
return (runfiles_mf if mf_alid else "", runfiles_dir if dir_valid else "")
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/validate-krew-manifest/main.go | // Copyright 2019 The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// validate-krew-manifest makes sure a manifest file is valid.
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
"sigs.k8s.io/krew/internal/environment"
"sigs.k8s.io/krew/internal/index/indexscanner"
"sigs.k8s.io/krew/internal/index/validation"
"sigs.k8s.io/krew/internal/installation"
"sigs.k8s.io/krew/pkg/constants"
"sigs.k8s.io/krew/pkg/index"
)
var flManifest string
func init() {
flag.StringVar(&flManifest, "manifest", "", "path to plugin manifest file")
}
func main() {
// TODO(ahmetb) iterate over glog flags and hide them (not sure if possible without using pflag)
klog.InitFlags(nil)
if err := flag.Set("logtostderr", "true"); err != nil {
fmt.Printf("can't set log to stderr %+v", err)
os.Exit(1)
}
flag.Parse()
defer klog.Flush()
if flManifest == "" {
klog.Fatal("-manifest must be specified")
}
if err := validateManifestFile(flManifest); err != nil {
klog.Fatalf("%v", err) // with stack trace
}
}
func validateManifestFile(path string) error {
klog.Infof("reading file %q", path)
p, err := indexscanner.ReadPluginFromFile(path)
if err != nil {
return errors.Wrap(err, "failed to read plugin file")
}
filename := filepath.Base(path)
manifestExtension := filepath.Ext(filename)
if manifestExtension != constants.ManifestExtension {
return errors.Errorf("expected manifest extension %q but found %q", constants.ManifestExtension, manifestExtension)
}
pluginNameFromFileName := strings.TrimSuffix(filename, manifestExtension)
klog.V(4).Infof("inferred plugin name as %s", pluginNameFromFileName)
// validate plugin manifest
if err := validation.ValidatePlugin(pluginNameFromFileName, p); err != nil {
return errors.Wrap(err, "plugin validation error")
}
klog.Infof("structural validation OK")
// make sure each platform matches a supported platform
for i, p := range p.Spec.Platforms {
if env := findAnyMatchingPlatform(p.Selector); env.OS == "" || env.Arch == "" {
return errors.Errorf("spec.platform[%d]'s selector (%v) doesn't match any supported platforms", i, p.Selector)
}
}
klog.Infof("all spec.platform[] items are used")
// validate no supported <os,arch> is matching multiple platform specs
if err := isOverlappingPlatformSelectors(p.Spec.Platforms); err != nil {
return errors.Wrap(err, "overlapping platform selectors found")
}
klog.Infof("no overlapping spec.platform[].selector")
// exercise "install" for all platforms
for i, p := range p.Spec.Platforms {
klog.Infof("installing spec.platform[%d]", i)
if err := installPlatformSpec(path, p); err != nil {
return errors.Wrapf(err, "spec.platforms[%d] failed to install", i)
}
klog.Infof("installed spec.platforms[%d]", i)
}
log.Printf("all %d spec.platforms installed fine", len(p.Spec.Platforms))
return nil
}
// isOverlappingPlatformSelectors validates if multiple platforms have selectors
// that match to a supported <os,arch> pair.
func isOverlappingPlatformSelectors(platforms []index.Platform) error {
for _, env := range allPlatforms() {
var matchIndex []int
for i, p := range platforms {
if selectorMatchesOSArch(p.Selector, env) {
matchIndex = append(matchIndex, i)
}
}
if len(matchIndex) > 1 {
return errors.Errorf("multiple spec.platforms (at indexes %v) have overlapping selectors that select %s", matchIndex, env)
}
}
return nil
}
// installPlatformSpec installs the p to a temporary location on disk to verify
// by shelling out to external command.
func installPlatformSpec(manifestFile string, p index.Platform) error {
env := findAnyMatchingPlatform(p.Selector)
if env.OS == "" || env.Arch == "" {
return errors.Errorf("no supported platform matched platform selector: %+v", p.Selector)
}
tmpDir, err := ioutil.TempDir(os.TempDir(), "krew-test")
if err != nil {
return errors.Wrap(err, "failed to create temp dir for plugin install")
}
defer func() {
if err := os.RemoveAll(tmpDir); err != nil {
klog.Warningf("failed to remove temp dir: %s", tmpDir)
}
}()
cmd := exec.Command("kubectl", "krew", "install", "--manifest", manifestFile, "-v=4")
cmd.Stdin = nil
cmd.Env = []string{
"KREW_ROOT=" + tmpDir,
"KREW_OS=" + env.OS,
"KREW_ARCH=" + env.Arch,
}
klog.V(2).Infof("installing plugin with: %+v", cmd.Env)
cmd.Env = append(cmd.Env, "PATH="+os.Getenv("PATH"))
b, err := cmd.CombinedOutput()
if err != nil {
output := strings.ReplaceAll(string(b), "\n", "\n\t")
return errors.Wrapf(err, "plugin install command failed: %s", output)
}
err = validateLicenseFileExists(tmpDir)
return errors.Wrap(err, "LICENSE (or alike) file is not extracted from the archive as part of installation")
}
var licenseFiles = map[string]struct{}{
"license": {},
"license.txt": {},
"license.md": {},
"licenses": {},
"licenses.txt": {},
"licenses.md": {},
"copying": {},
"copying.txt": {},
}
func validateLicenseFileExists(krewRoot string) error {
dir := environment.NewPaths(krewRoot).InstallPath()
var files []string
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode().IsRegular() {
files = append(files, info.Name())
}
return nil
})
if err != nil {
return errors.Wrap(err, "failed to walk installation directory")
}
for _, f := range files {
klog.V(8).Infof("found installed file: %s", f)
if _, ok := licenseFiles[strings.ToLower(filepath.Base(f))]; ok {
klog.V(8).Infof("found license file %q", f)
return nil
}
}
return errors.Errorf("could not find license file among [%s]", strings.Join(files, ", "))
}
// findAnyMatchingPlatform finds an <os,arch> pair matches to given selector
func findAnyMatchingPlatform(selector *metav1.LabelSelector) installation.OSArchPair {
for _, p := range allPlatforms() {
if selectorMatchesOSArch(selector, p) {
klog.V(4).Infof("%s MATCHED <%s>", selector, p)
return p
}
klog.V(4).Infof("%s didn't match <%s>", selector, p)
}
return installation.OSArchPair{}
}
func selectorMatchesOSArch(selector *metav1.LabelSelector, env installation.OSArchPair) bool {
sel, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
// this should've been caught by validation.ValidatePlatform() earlier
klog.Warningf("Failed to convert label selector: %+v", selector)
return false
}
return sel.Matches(labels.Set{
"os": env.OS,
"arch": env.Arch,
})
}
// allPlatforms returns all <os,arch> pairs krew is supported on.
func allPlatforms() []installation.OSArchPair {
// TODO(ahmetb) find a more authoritative source for this list
return []installation.OSArchPair{
{OS: "windows", Arch: "386"},
{OS: "windows", Arch: "amd64"},
{OS: "linux", Arch: "386"},
{OS: "linux", Arch: "amd64"},
{OS: "linux", Arch: "arm"},
{OS: "linux", Arch: "arm64"},
{OS: "darwin", Arch: "386"},
{OS: "darwin", Arch: "amd64"},
{OS: "darwin", Arch: "arm64"},
}
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
django_event_project/wsgi.py | """
WSGI config for django_event_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_event_project.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tools/vendor/github.com/minio/console/restapi/admin_tenants.go | // This file is part of MinIO Kubernetes Cloud
// Copyright (c) 2021 MinIO, Inc.
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package restapi
import (
"bytes"
"context"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"net"
"net/http"
"os"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/console/pkg/auth/utils"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/api/core/v1"
"github.com/minio/console/cluster"
"github.com/minio/madmin-go"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/swag"
"github.com/minio/console/models"
"github.com/minio/console/restapi/operations"
"github.com/minio/console/restapi/operations/admin_api"
miniov2 "github.com/minio/operator/pkg/apis/minio.min.io/v2"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sJson "k8s.io/apimachinery/pkg/runtime/serializer/json"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
type imageRegistry struct {
Auths map[string]imageRegistryCredentials `json:"auths"`
}
type imageRegistryCredentials struct {
Username string `json:"username"`
Password string `json:"password"`
Auth string `json:"auth"`
}
func registerTenantHandlers(api *operations.ConsoleAPI) {
// Add Tenant
api.AdminAPICreateTenantHandler = admin_api.CreateTenantHandlerFunc(func(params admin_api.CreateTenantParams, session *models.Principal) middleware.Responder {
resp, err := getTenantCreatedResponse(session, params)
if err != nil {
return admin_api.NewCreateTenantDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewCreateTenantOK().WithPayload(resp)
})
// List All Tenants of all namespaces
api.AdminAPIListAllTenantsHandler = admin_api.ListAllTenantsHandlerFunc(func(params admin_api.ListAllTenantsParams, session *models.Principal) middleware.Responder {
resp, err := getListAllTenantsResponse(session, params)
if err != nil {
return admin_api.NewListTenantsDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewListTenantsOK().WithPayload(resp)
})
// List Tenants by namespace
api.AdminAPIListTenantsHandler = admin_api.ListTenantsHandlerFunc(func(params admin_api.ListTenantsParams, session *models.Principal) middleware.Responder {
resp, err := getListTenantsResponse(session, params)
if err != nil {
return admin_api.NewListTenantsDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewListTenantsOK().WithPayload(resp)
})
// Detail Tenant
api.AdminAPITenantDetailsHandler = admin_api.TenantDetailsHandlerFunc(func(params admin_api.TenantDetailsParams, session *models.Principal) middleware.Responder {
resp, err := getTenantDetailsResponse(session, params)
if err != nil {
return admin_api.NewTenantDetailsDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewTenantDetailsOK().WithPayload(resp)
})
// Tenant Security details
api.AdminAPITenantSecurityHandler = admin_api.TenantSecurityHandlerFunc(func(params admin_api.TenantSecurityParams, session *models.Principal) middleware.Responder {
resp, err := getTenantSecurityResponse(session, params)
if err != nil {
return admin_api.NewTenantSecurityDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewTenantSecurityOK().WithPayload(resp)
})
// Update Tenant Security configuration
api.AdminAPIUpdateTenantSecurityHandler = admin_api.UpdateTenantSecurityHandlerFunc(func(params admin_api.UpdateTenantSecurityParams, session *models.Principal) middleware.Responder {
err := getUpdateTenantSecurityResponse(session, params)
if err != nil {
return admin_api.NewUpdateTenantSecurityDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewUpdateTenantSecurityNoContent()
})
// Delete Tenant
api.AdminAPIDeleteTenantHandler = admin_api.DeleteTenantHandlerFunc(func(params admin_api.DeleteTenantParams, session *models.Principal) middleware.Responder {
err := getDeleteTenantResponse(session, params)
if err != nil {
return admin_api.NewTenantInfoDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewTenantInfoOK()
})
// Update Tenant
api.AdminAPIUpdateTenantHandler = admin_api.UpdateTenantHandlerFunc(func(params admin_api.UpdateTenantParams, session *models.Principal) middleware.Responder {
err := getUpdateTenantResponse(session, params)
if err != nil {
return admin_api.NewUpdateTenantDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewUpdateTenantCreated()
})
// Add Tenant Pools
api.AdminAPITenantAddPoolHandler = admin_api.TenantAddPoolHandlerFunc(func(params admin_api.TenantAddPoolParams, session *models.Principal) middleware.Responder {
err := getTenantAddPoolResponse(session, params)
if err != nil {
return admin_api.NewTenantAddPoolDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewTenantAddPoolCreated()
})
// Get Tenant Usage
api.AdminAPIGetTenantUsageHandler = admin_api.GetTenantUsageHandlerFunc(func(params admin_api.GetTenantUsageParams, session *models.Principal) middleware.Responder {
payload, err := getTenantUsageResponse(session, params)
if err != nil {
return admin_api.NewGetTenantUsageDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewGetTenantUsageOK().WithPayload(payload)
})
api.AdminAPIGetTenantPodsHandler = admin_api.GetTenantPodsHandlerFunc(func(params admin_api.GetTenantPodsParams, session *models.Principal) middleware.Responder {
payload, err := getTenantPodsResponse(session, params)
if err != nil {
return admin_api.NewGetTenantPodsDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewGetTenantPodsOK().WithPayload(payload)
})
api.AdminAPIGetPodLogsHandler = admin_api.GetPodLogsHandlerFunc(func(params admin_api.GetPodLogsParams, session *models.Principal) middleware.Responder {
payload, err := getPodLogsResponse(session, params)
if err != nil {
return admin_api.NewGetPodLogsDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewGetPodLogsOK().WithPayload(payload)
})
api.AdminAPIGetPodEventsHandler = admin_api.GetPodEventsHandlerFunc(func(params admin_api.GetPodEventsParams, session *models.Principal) middleware.Responder {
payload, err := getPodEventsResponse(session, params)
if err != nil {
return admin_api.NewGetPodEventsDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewGetPodEventsOK().WithPayload(payload)
})
// Update Tenant Pools
api.AdminAPITenantUpdatePoolsHandler = admin_api.TenantUpdatePoolsHandlerFunc(func(params admin_api.TenantUpdatePoolsParams, session *models.Principal) middleware.Responder {
resp, err := getTenantUpdatePoolResponse(session, params)
if err != nil {
return admin_api.NewTenantUpdatePoolsDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewTenantUpdatePoolsOK().WithPayload(resp)
})
// Update Tenant Certificates
api.AdminAPITenantUpdateCertificateHandler = admin_api.TenantUpdateCertificateHandlerFunc(func(params admin_api.TenantUpdateCertificateParams, session *models.Principal) middleware.Responder {
err := getTenantUpdateCertificatesResponse(session, params)
if err != nil {
return admin_api.NewTenantUpdateCertificateDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewTenantUpdateCertificateCreated()
})
// Update Tenant Encryption Configuration
api.AdminAPITenantUpdateEncryptionHandler = admin_api.TenantUpdateEncryptionHandlerFunc(func(params admin_api.TenantUpdateEncryptionParams, session *models.Principal) middleware.Responder {
err := getTenantUpdateEncryptionResponse(session, params)
if err != nil {
return admin_api.NewTenantUpdateEncryptionDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewTenantUpdateEncryptionCreated()
})
// Get Tenant YAML
api.AdminAPIGetTenantYAMLHandler = admin_api.GetTenantYAMLHandlerFunc(func(params admin_api.GetTenantYAMLParams, principal *models.Principal) middleware.Responder {
payload, err := getTenantYAML(principal, params)
if err != nil {
return admin_api.NewGetTenantYAMLDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewGetTenantYAMLOK().WithPayload(payload)
})
// Update Tenant YAML
api.AdminAPIPutTenantYAMLHandler = admin_api.PutTenantYAMLHandlerFunc(func(params admin_api.PutTenantYAMLParams, principal *models.Principal) middleware.Responder {
err := getUpdateTenantYAML(principal, params)
if err != nil {
return admin_api.NewPutTenantYAMLDefault(int(err.Code)).WithPayload(err)
}
return admin_api.NewPutTenantYAMLCreated()
})
}
// getDeleteTenantResponse gets the output of deleting a minio instance
func getDeleteTenantResponse(session *models.Principal, params admin_api.DeleteTenantParams) *models.Error {
opClientClientSet, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return prepareError(err)
}
// get Kubernetes Client
clientset, err := cluster.K8sClient(session.STSSessionToken)
if err != nil {
return prepareError(err)
}
opClient := &operatorClient{
client: opClientClientSet,
}
deleteTenantPVCs := false
if params.Body != nil {
deleteTenantPVCs = params.Body.DeletePvcs
}
if err = deleteTenantAction(context.Background(), opClient, clientset.CoreV1(), params.Namespace, params.Tenant, deleteTenantPVCs); err != nil {
return prepareError(err)
}
return nil
}
// deleteTenantAction performs the actions of deleting a tenant
//
// It also adds the option of deleting the tenant's underlying pvcs if deletePvcs set
func deleteTenantAction(
ctx context.Context,
operatorClient OperatorClientI,
clientset v1.CoreV1Interface,
namespace, tenantName string,
deletePvcs bool) error {
err := operatorClient.TenantDelete(ctx, namespace, tenantName, metav1.DeleteOptions{})
if err != nil {
// try to delete pvc even if the tenant doesn't exist anymore but only if deletePvcs is set to true,
// else, we return the error
if (deletePvcs && !k8sErrors.IsNotFound(err)) || !deletePvcs {
return err
}
}
if deletePvcs {
opts := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", miniov2.TenantLabel, tenantName),
}
err = clientset.PersistentVolumeClaims(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, opts)
if err != nil {
return err
}
// delete all tenant's secrets only if deletePvcs = true
return clientset.Secrets(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, opts)
}
return nil
}
// GetTenantServiceURL gets tenant's service url with the proper scheme and port
func GetTenantServiceURL(mi *miniov2.Tenant) (svcURL string) {
scheme := "http"
port := miniov2.MinIOPortLoadBalancerSVC
if mi.AutoCert() || mi.ExternalCert() {
scheme = "https"
port = miniov2.MinIOTLSPortLoadBalancerSVC
}
return fmt.Sprintf("%s://%s", scheme, net.JoinHostPort(mi.MinIOFQDNServiceName(), strconv.Itoa(port)))
}
func getTenantAdminClient(ctx context.Context, client K8sClientI, tenant *miniov2.Tenant, svcURL string) (*madmin.AdminClient, error) {
tenantCreds, err := getTenantCreds(ctx, client, tenant)
if err != nil {
return nil, err
}
sessionToken := ""
mAdmin, pErr := NewAdminClientWithInsecure(svcURL, tenantCreds.accessKey, tenantCreds.secretKey, sessionToken, false)
if pErr != nil {
return nil, pErr.Cause
}
return mAdmin, nil
}
type tenantKeys struct {
accessKey string
secretKey string
}
func getTenantCreds(ctx context.Context, client K8sClientI, tenant *miniov2.Tenant) (*tenantKeys, error) {
if tenant == nil || tenant.Spec.CredsSecret == nil {
return nil, errors.New("invalid arguments")
}
// get admin credentials from secret
creds, err := client.getSecret(ctx, tenant.Namespace, tenant.Spec.CredsSecret.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
tenantAccessKey, ok := creds.Data["accesskey"]
if !ok {
LogError("tenant's secret doesn't contain accesskey")
return nil, errorGeneric
}
tenantSecretKey, ok := creds.Data["secretkey"]
if !ok {
LogError("tenant's secret doesn't contain secretkey")
return nil, errorGeneric
}
// TODO:
// We need to avoid using minio root credentials to talk to tenants, and instead use a different user credentials
// when that its implemented we also need to check here if the tenant has LDAP enabled so we authenticate first against AD
return &tenantKeys{accessKey: string(tenantAccessKey), secretKey: string(tenantSecretKey)}, nil
}
func getTenant(ctx context.Context, operatorClient OperatorClientI, namespace, tenantName string) (*miniov2.Tenant, error) {
minInst, err := operatorClient.TenantGet(ctx, namespace, tenantName, metav1.GetOptions{})
if err != nil {
return nil, err
}
return minInst, nil
}
func isPrometheusEnabled(annotations map[string]string) bool {
if annotations == nil {
return false
}
// if one of the following prometheus annotations are not present
// we consider the tenant as not integrated with prometheus
if _, ok := annotations[prometheusPath]; !ok {
return false
}
if _, ok := annotations[prometheusPort]; !ok {
return false
}
if _, ok := annotations[prometheusScrape]; !ok {
return false
}
return true
}
func getTenantInfo(tenant *miniov2.Tenant) *models.Tenant {
var pools []*models.Pool
consoleImage := ""
var totalSize int64
for _, p := range tenant.Spec.Pools {
pools = append(pools, parseTenantPool(&p))
poolSize := int64(p.Servers) * int64(p.VolumesPerServer) * p.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value()
totalSize = totalSize + poolSize
}
var deletion string
if tenant.ObjectMeta.DeletionTimestamp != nil {
deletion = tenant.ObjectMeta.DeletionTimestamp.Format(time.RFC3339)
}
if tenant.HasConsoleEnabled() {
consoleImage = tenant.Spec.Console.Image
}
return &models.Tenant{
CreationDate: tenant.ObjectMeta.CreationTimestamp.Format(time.RFC3339),
DeletionDate: deletion,
Name: tenant.Name,
TotalSize: totalSize,
CurrentState: tenant.Status.CurrentState,
Pools: pools,
Namespace: tenant.ObjectMeta.Namespace,
Image: tenant.Spec.Image,
ConsoleImage: consoleImage,
EnablePrometheus: isPrometheusEnabled(tenant.Annotations),
}
}
func getTenantDetailsResponse(session *models.Principal, params admin_api.TenantDetailsParams) (*models.Tenant, *models.Error) {
// 5 seconds timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
opClientClientSet, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
opClient := &operatorClient{
client: opClientClientSet,
}
minTenant, err := getTenant(ctx, opClient, params.Namespace, params.Tenant)
if err != nil {
return nil, prepareError(err)
}
info := getTenantInfo(minTenant)
// detect if AD is enabled
adEnabled := false
for _, env := range minTenant.Spec.Env {
if env.Name == "MINIO_IDENTITY_LDAP_SERVER_ADDR" && env.Value != "" {
adEnabled = true
}
}
// get Kubernetes Client
clientSet, err := cluster.K8sClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
k8sClient := k8sClient{
client: clientSet,
}
// detect if OpenID is enabled
oicEnabled := false
consoleSelector := fmt.Sprintf("%s-console", minTenant.Name)
consoleSecretName := fmt.Sprintf("%s-secret", consoleSelector)
consoleSecret, err := clientSet.CoreV1().Secrets(minTenant.Namespace).Get(ctx, consoleSecretName, metav1.GetOptions{})
// we can tolerate not getting this secret
if err != nil {
LogError("unable to fetch existing secrets for %s: %v", minTenant.Name, err)
}
if consoleSecret != nil {
if _, ok := consoleSecret.Data["CONSOLE_IDP_URL"]; ok {
oicEnabled = true
}
}
if minTenant.HasConsoleEnabled() {
for _, env := range minTenant.Spec.Console.Env {
if env.Name == "CONSOLE_IDP_URL" {
oicEnabled = true
}
}
}
info.LogEnabled = minTenant.HasLogEnabled()
info.MonitoringEnabled = minTenant.HasPrometheusEnabled()
info.EncryptionEnabled = minTenant.HasKESEnabled()
info.IdpAdEnabled = adEnabled
info.IdpOicEnabled = oicEnabled
info.MinioTLS = minTenant.TLS()
info.ConsoleTLS = minTenant.AutoCert() || minTenant.ConsoleExternalCert()
info.ConsoleEnabled = minTenant.HasConsoleEnabled()
if minTenant.Spec.Console != nil {
// obtain current subnet license for tenant (if exists)
license, _ := getSubscriptionLicense(context.Background(), &k8sClient, params.Namespace, minTenant.Spec.Console.ConsoleSecret.Name)
if license != "" {
client := &cluster.HTTPClient{
Client: GetConsoleSTSClient(),
}
licenseInfo, _, _ := subscriptionValidate(client, license, "", "")
// if licenseInfo is present attach it to the tenantInfo response
if licenseInfo != nil {
info.SubnetLicense = licenseInfo
}
}
}
// attach status information
info.Status = &models.TenantStatus{
HealthStatus: string(minTenant.Status.HealthStatus),
DrivesHealing: minTenant.Status.DrivesHealing,
DrivesOffline: minTenant.Status.DrivesOffline,
DrivesOnline: minTenant.Status.DrivesOnline,
WriteQuorum: minTenant.Status.WriteQuorum,
}
// get tenant service
minTenant.EnsureDefaults()
//minio service
minSvc, err := k8sClient.getService(ctx, minTenant.Namespace, minTenant.MinIOCIServiceName(), metav1.GetOptions{})
if err != nil {
// we can tolerate this error
LogError("Unable to get MinIO service name: %v, continuing", err)
}
//console service
conSvc, err := k8sClient.getService(ctx, minTenant.Namespace, minTenant.ConsoleCIServiceName(), metav1.GetOptions{})
if err != nil {
// we can tolerate this error
LogError("Unable to get MinIO console service name: %v, continuing", err)
}
schema := "http"
consoleSchema := "http"
consolePort := ":9090"
if minTenant.TLS() {
schema = "https"
}
if minTenant.AutoCert() || minTenant.ConsoleExternalCert() {
consoleSchema = "https"
consolePort = ":9443"
}
var minioEndpoint string
var consoleEndpoint string
if minSvc != nil && len(minSvc.Status.LoadBalancer.Ingress) > 0 {
if minSvc.Status.LoadBalancer.Ingress[0].IP != "" {
minioEndpoint = fmt.Sprintf("%s://%s", schema, minSvc.Status.LoadBalancer.Ingress[0].IP)
}
if minSvc.Status.LoadBalancer.Ingress[0].Hostname != "" {
minioEndpoint = fmt.Sprintf("%s://%s", schema, minSvc.Status.LoadBalancer.Ingress[0].Hostname)
}
}
if conSvc != nil && len(conSvc.Status.LoadBalancer.Ingress) > 0 {
if conSvc.Status.LoadBalancer.Ingress[0].IP != "" {
consoleEndpoint = fmt.Sprintf("%s://%s%s", consoleSchema, conSvc.Status.LoadBalancer.Ingress[0].IP, consolePort)
}
if conSvc.Status.LoadBalancer.Ingress[0].Hostname != "" {
consoleEndpoint = fmt.Sprintf("%s://%s%s", consoleSchema, conSvc.Status.LoadBalancer.Ingress[0].Hostname, consolePort)
}
}
if minioEndpoint != "" || consoleEndpoint != "" {
info.Endpoints = &models.TenantEndpoints{
Console: consoleEndpoint,
Minio: minioEndpoint,
}
}
return info, nil
}
// parseTenantCertificates convert public key pem certificates stored in k8s secrets for a given Tenant into x509 certificates
func parseTenantCertificates(ctx context.Context, clientSet K8sClientI, namespace string, secrets []*miniov2.LocalCertificateReference) ([]*models.CertificateInfo, error) {
var certificates []*models.CertificateInfo
publicKey := "public.crt"
// Iterate over TLS secrets and build array of CertificateInfo structure
// that will be used to display information about certs in the UI
for _, secret := range secrets {
keyPair, err := clientSet.getSecret(ctx, namespace, secret.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if secret.Type == "kubernetes.io/tls" || secret.Type == "cert-manager.io/v1alpha2" {
publicKey = "tls.crt"
}
// Extract public key from certificate TLS secret
if rawCert, ok := keyPair.Data[publicKey]; ok {
block, _ := pem.Decode(rawCert)
if block == nil {
// If certificate failed to decode skip
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certificates = append(certificates, &models.CertificateInfo{
SerialNumber: cert.SerialNumber.String(),
Name: secret.Name,
Domains: cert.DNSNames,
Expiry: cert.NotAfter.String(),
})
}
}
return certificates, nil
}
func getTenantSecurity(ctx context.Context, clientSet K8sClientI, tenant *miniov2.Tenant) (response *models.TenantSecurityResponse, err error) {
var minioExternalCertificates []*models.CertificateInfo
var minioExternalCaCertificates []*models.CertificateInfo
var consoleExternalCertificates []*models.CertificateInfo
var consoleExternalCaCertificates []*models.CertificateInfo
// Certificates used by MinIO server
if minioExternalCertificates, err = parseTenantCertificates(ctx, clientSet, tenant.Namespace, tenant.Spec.ExternalCertSecret); err != nil {
return nil, err
}
// CA Certificates used by MinIO server
if minioExternalCaCertificates, err = parseTenantCertificates(ctx, clientSet, tenant.Namespace, tenant.Spec.ExternalCaCertSecret); err != nil {
return nil, err
}
if tenant.HasConsoleEnabled() {
// Certificate used by Console server
if tenant.Spec.Console.ExternalCertSecret != nil {
if consoleExternalCertificates, err = parseTenantCertificates(ctx, clientSet, tenant.Namespace, []*miniov2.LocalCertificateReference{tenant.Spec.Console.ExternalCertSecret}); err != nil {
return nil, err
}
}
// CA Certificates used by Console server
if consoleExternalCaCertificates, err = parseTenantCertificates(ctx, clientSet, tenant.Namespace, tenant.Spec.Console.ExternalCaCertSecret); err != nil {
return nil, err
}
}
return &models.TenantSecurityResponse{
AutoCert: tenant.AutoCert(),
CustomCertificates: &models.TenantSecurityResponseCustomCertificates{
Minio: minioExternalCertificates,
MinioCAs: minioExternalCaCertificates,
Console: consoleExternalCertificates,
ConsoleCAs: consoleExternalCaCertificates,
},
}, nil
}
func getTenantSecurityResponse(session *models.Principal, params admin_api.TenantSecurityParams) (*models.TenantSecurityResponse, *models.Error) {
// 5 seconds timeout
ctx := context.Background()
//ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
//defer cancel()
opClientClientSet, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
opClient := &operatorClient{
client: opClientClientSet,
}
minTenant, err := getTenant(ctx, opClient, params.Namespace, params.Tenant)
if err != nil {
return nil, prepareError(err)
}
// get Kubernetes Client
clientSet, err := cluster.K8sClient(session.STSSessionToken)
k8sClient := k8sClient{
client: clientSet,
}
if err != nil {
return nil, prepareError(err)
}
info, err := getTenantSecurity(ctx, &k8sClient, minTenant)
if err != nil {
return nil, prepareError(err)
}
return info, nil
}
func getUpdateTenantSecurityResponse(session *models.Principal, params admin_api.UpdateTenantSecurityParams) *models.Error {
// 5 seconds timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
opClientClientSet, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return prepareError(err)
}
// get Kubernetes Client
clientSet, err := cluster.K8sClient(session.STSSessionToken)
if err != nil {
return prepareError(err)
}
k8sClient := k8sClient{
client: clientSet,
}
opClient := &operatorClient{
client: opClientClientSet,
}
if err := updateTenantSecurity(ctx, opClient, &k8sClient, params.Namespace, params); err != nil {
return prepareError(err, errors.New("unable to update tenant"))
}
return nil
}
// updateTenantSecurity
func updateTenantSecurity(ctx context.Context, operatorClient OperatorClientI, client K8sClientI, namespace string, params admin_api.UpdateTenantSecurityParams) error {
minInst, err := operatorClient.TenantGet(ctx, namespace, params.Tenant, metav1.GetOptions{})
if err != nil {
return err
}
// Update AutoCert
minInst.Spec.RequestAutoCert = ¶ms.Body.AutoCert
var newMinIOExternalCertSecret []*miniov2.LocalCertificateReference
var newMinIOExternalCaCertSecret []*miniov2.LocalCertificateReference
var newConsoleExternalCertSecret *miniov2.LocalCertificateReference
var newConsoleExternalCaCertSecret []*miniov2.LocalCertificateReference
// Remove Certificate Secrets from MinIO (Tenant.Spec.ExternalCertSecret)
for _, certificate := range minInst.Spec.ExternalCertSecret {
skip := false
for _, certificateToBeDeleted := range params.Body.CustomCertificates.SecretsToBeDeleted {
if certificate.Name == certificateToBeDeleted {
skip = true
break
}
}
if skip {
continue
}
newMinIOExternalCertSecret = append(newMinIOExternalCertSecret, certificate)
}
// Remove Certificate Secrets from MinIO CAs (Tenant.Spec.ExternalCaCertSecret)
for _, certificate := range minInst.Spec.ExternalCaCertSecret {
skip := false
for _, certificateToBeDeleted := range params.Body.CustomCertificates.SecretsToBeDeleted {
if certificate.Name == certificateToBeDeleted {
skip = true
break
}
}
if skip {
continue
}
newMinIOExternalCaCertSecret = append(newMinIOExternalCaCertSecret, certificate)
}
if minInst.HasConsoleEnabled() {
// Remove Certificate Secrets from Console (Tenant.Spec.Console.ExternalCertSecret)
if minInst.ConsoleExternalCert() {
newConsoleExternalCertSecret = minInst.Spec.Console.ExternalCertSecret
for _, certificateToBeDeleted := range params.Body.CustomCertificates.SecretsToBeDeleted {
if newConsoleExternalCertSecret.Name == certificateToBeDeleted {
newConsoleExternalCertSecret = nil
break
}
}
}
// Remove Certificate Secrets from Console CAs (Tenant.Spec.Console.ExternalCaCertSecret)
for _, certificate := range minInst.Spec.Console.ExternalCaCertSecret {
skip := false
for _, certificateToBeDeleted := range params.Body.CustomCertificates.SecretsToBeDeleted {
if certificate.Name == certificateToBeDeleted {
skip = true
break
}
}
if skip {
continue
}
newConsoleExternalCaCertSecret = append(newConsoleExternalCaCertSecret, certificate)
}
}
//Create new Certificate Secrets for MinIO
secretName := fmt.Sprintf("%s-%s", minInst.Name, strings.ToLower(utils.RandomCharString(5)))
externalCertSecretName := fmt.Sprintf("%s-external-certificates", secretName)
externalCertSecrets, err := createOrReplaceExternalCertSecrets(ctx, client, minInst.Namespace, params.Body.CustomCertificates.Minio, externalCertSecretName, minInst.Name)
if err != nil {
return err
}
newMinIOExternalCertSecret = append(newMinIOExternalCertSecret, externalCertSecrets...)
// Create new CAs Certificate Secrets for MinIO
var caCertificates []tenantSecret
for i, caCertificate := range params.Body.CustomCertificates.MinioCAs {
certificateContent, err := base64.StdEncoding.DecodeString(caCertificate)
if err != nil {
return err
}
caCertificates = append(caCertificates, tenantSecret{
Name: fmt.Sprintf("%s-ca-certificate-%d", secretName, i),
Content: map[string][]byte{
"public.crt": certificateContent,
},
})
}
if len(caCertificates) > 0 {
certificateSecrets, err := createOrReplaceSecrets(ctx, client, minInst.Namespace, caCertificates, minInst.Name)
if err != nil {
return err
}
newMinIOExternalCaCertSecret = append(newMinIOExternalCaCertSecret, certificateSecrets...)
}
// Create new Certificate Secrets for Console
consoleExternalCertSecretName := fmt.Sprintf("%s-console-external-certificates", secretName)
consoleExternalCertSecrets, err := createOrReplaceExternalCertSecrets(ctx, client, minInst.Namespace, params.Body.CustomCertificates.Console, consoleExternalCertSecretName, minInst.Name)
if err != nil {
return err
}
if len(consoleExternalCertSecrets) > 0 {
newConsoleExternalCertSecret = consoleExternalCertSecrets[0]
}
// Create new CAs Certificate Secrets for Console
var consoleCaCertificates []tenantSecret
for i, caCertificate := range params.Body.CustomCertificates.ConsoleCAs {
certificateContent, err := base64.StdEncoding.DecodeString(caCertificate)
if err != nil {
return err
}
consoleCaCertificates = append(consoleCaCertificates, tenantSecret{
Name: fmt.Sprintf("%s-console-ca-certificate-%d", secretName, i),
Content: map[string][]byte{
"public.crt": certificateContent,
},
})
}
if len(consoleCaCertificates) > 0 {
certificateSecrets, err := createOrReplaceSecrets(ctx, client, minInst.Namespace, consoleCaCertificates, minInst.Name)
if err != nil {
return err
}
newConsoleExternalCaCertSecret = append(newConsoleExternalCaCertSecret, certificateSecrets...)
}
// Update External Certificates
minInst.Spec.ExternalCertSecret = newMinIOExternalCertSecret
minInst.Spec.ExternalCaCertSecret = newMinIOExternalCaCertSecret
if minInst.HasConsoleEnabled() {
minInst.Spec.Console.ExternalCertSecret = newConsoleExternalCertSecret
minInst.Spec.Console.ExternalCaCertSecret = newConsoleExternalCaCertSecret
}
_, err = operatorClient.TenantUpdate(ctx, minInst, metav1.UpdateOptions{})
if err != nil {
return err
}
// Remove Certificate Secrets from Tenant namespace
for _, secretName := range params.Body.CustomCertificates.SecretsToBeDeleted {
err = client.deleteSecret(ctx, minInst.Namespace, secretName, metav1.DeleteOptions{})
if err != nil {
LogError("error deleting secret: %v", err)
}
}
return nil
}
func listTenants(ctx context.Context, operatorClient OperatorClientI, namespace string, limit *int32) (*models.ListTenantsResponse, error) {
listOpts := metav1.ListOptions{
Limit: 10,
}
if limit != nil {
listOpts.Limit = int64(*limit)
}
minTenants, err := operatorClient.TenantList(ctx, namespace, listOpts)
if err != nil {
return nil, err
}
var tenants []*models.TenantList
for _, tenant := range minTenants.Items {
var totalSize int64
var instanceCount int64
var volumeCount int64
for _, pool := range tenant.Spec.Pools {
instanceCount = instanceCount + int64(pool.Servers)
volumeCount = volumeCount + int64(pool.Servers*pool.VolumesPerServer)
if pool.VolumeClaimTemplate != nil {
poolSize := int64(pool.VolumesPerServer) * int64(pool.Servers) * pool.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value()
totalSize = totalSize + poolSize
}
}
var deletion string
if tenant.ObjectMeta.DeletionTimestamp != nil {
deletion = tenant.ObjectMeta.DeletionTimestamp.Format(time.RFC3339)
}
tenants = append(tenants, &models.TenantList{
CreationDate: tenant.ObjectMeta.CreationTimestamp.Format(time.RFC3339),
DeletionDate: deletion,
Name: tenant.ObjectMeta.Name,
PoolCount: int64(len(tenant.Spec.Pools)),
InstanceCount: instanceCount,
VolumeCount: volumeCount,
CurrentState: tenant.Status.CurrentState,
Namespace: tenant.ObjectMeta.Namespace,
TotalSize: totalSize,
HealthStatus: string(tenant.Status.HealthStatus),
})
}
return &models.ListTenantsResponse{
Tenants: tenants,
Total: int64(len(tenants)),
}, nil
}
func getListAllTenantsResponse(session *models.Principal, params admin_api.ListAllTenantsParams) (*models.ListTenantsResponse, *models.Error) {
ctx := context.Background()
opClientClientSet, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
opClient := &operatorClient{
client: opClientClientSet,
}
listT, err := listTenants(ctx, opClient, "", params.Limit)
if err != nil {
return nil, prepareError(err)
}
return listT, nil
}
// getListTenantsResponse list tenants by namespace
func getListTenantsResponse(session *models.Principal, params admin_api.ListTenantsParams) (*models.ListTenantsResponse, *models.Error) {
ctx := context.Background()
opClientClientSet, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
opClient := &operatorClient{
client: opClientClientSet,
}
listT, err := listTenants(ctx, opClient, params.Namespace, params.Limit)
if err != nil {
return nil, prepareError(err)
}
return listT, nil
}
func getTenantCreatedResponse(session *models.Principal, params admin_api.CreateTenantParams) (response *models.CreateTenantResponse, mError *models.Error) {
tenantReq := params.Body
minioImage := tenantReq.Image
ctx := context.Background()
consoleHasTLS := false
if minioImage == "" {
minImg, err := cluster.GetMinioImage()
// we can live without figuring out the latest version of MinIO, Operator will use a hardcoded value
if err == nil {
minioImage = *minImg
}
}
// get Kubernetes Client
clientSet, err := cluster.K8sClient(session.STSSessionToken)
k8sClient := k8sClient{
client: clientSet,
}
if err != nil {
return nil, prepareError(err)
}
ns := *tenantReq.Namespace
// if access/secret are provided, use them, else create a random pair
accessKey := RandomCharString(16)
secretKey := RandomCharString(32)
if tenantReq.AccessKey != "" {
accessKey = tenantReq.AccessKey
}
if tenantReq.SecretKey != "" {
secretKey = tenantReq.SecretKey
}
tenantName := *tenantReq.Name
imm := true
var instanceSecret corev1.Secret
var users []*corev1.LocalObjectReference
// Create the secret for the root credentials
secretName := fmt.Sprintf("%s-secret", tenantName)
instanceSecret = corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Labels: map[string]string{
miniov2.TenantLabel: tenantName,
},
},
Immutable: &imm,
Data: map[string][]byte{
"accesskey": []byte(accessKey),
"secretkey": []byte(secretKey),
},
}
_, err = clientSet.CoreV1().Secrets(ns).Create(ctx, &instanceSecret, metav1.CreateOptions{})
if err != nil {
return nil, prepareError(err)
}
// delete secrets created if an error occurred during tenant creation,
defer func() {
if mError != nil {
LogError("deleting secrets created for failed tenant: %s if any: %v", tenantName, mError)
opts := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", miniov2.TenantLabel, tenantName),
}
err = clientSet.CoreV1().Secrets(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, opts)
if err != nil {
LogError("error deleting tenant's secrets: %v", err)
}
}
}()
var environmentVariables []corev1.EnvVar
// Check the Erasure Coding Parity for validity and pass it to Tenant
if tenantReq.ErasureCodingParity > 0 {
if tenantReq.ErasureCodingParity < 2 || tenantReq.ErasureCodingParity > 8 {
return nil, prepareError(errorInvalidErasureCodingValue)
}
environmentVariables = append(environmentVariables, corev1.EnvVar{
Name: "MINIO_STORAGE_CLASS_STANDARD",
Value: fmt.Sprintf("EC:%d", tenantReq.ErasureCodingParity),
})
}
//Construct a MinIO Instance with everything we are getting from parameters
minInst := miniov2.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: tenantName,
Labels: tenantReq.Labels,
},
Spec: miniov2.TenantSpec{
Image: minioImage,
Mountpath: "/export",
CredsSecret: &corev1.LocalObjectReference{
Name: secretName,
},
Env: environmentVariables,
},
}
idpEnabled := false
// Enable IDP (Active Directory) for MinIO
if tenantReq.Idp != nil && tenantReq.Idp.ActiveDirectory != nil {
url := *tenantReq.Idp.ActiveDirectory.URL
userNameFormat := *tenantReq.Idp.ActiveDirectory.UsernameFormat
userSearchFilter := *tenantReq.Idp.ActiveDirectory.UserSearchFilter
tlsSkipVerify := tenantReq.Idp.ActiveDirectory.SkipTLSVerification
serverInsecure := tenantReq.Idp.ActiveDirectory.ServerInsecure
groupSearchDN := tenantReq.Idp.ActiveDirectory.GroupSearchBaseDn
groupSearchFilter := tenantReq.Idp.ActiveDirectory.GroupSearchFilter
groupNameAttribute := tenantReq.Idp.ActiveDirectory.GroupNameAttribute
if url != "" && userNameFormat != "" && userSearchFilter != "" {
// CONSOLE_LDAP_ENABLED
idpEnabled = true
minInst.Spec.Env = append(minInst.Spec.Env, corev1.EnvVar{
Name: "MINIO_IDENTITY_LDAP_SERVER_ADDR",
Value: userNameFormat,
}, corev1.EnvVar{
Name: "MINIO_IDENTITY_LDAP_USERNAME_FORMAT",
Value: userNameFormat,
}, corev1.EnvVar{
Name: "MINIO_IDENTITY_LDAP_USERNAME_SEARCH_FILTER",
Value: userSearchFilter,
}, corev1.EnvVar{
Name: "MINIO_IDENTITY_LDAP_USERNAME_SEARCH_FILTER",
Value: userSearchFilter,
}, corev1.EnvVar{
Name: "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN",
Value: groupSearchDN,
}, corev1.EnvVar{
Name: "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER",
Value: groupSearchFilter,
}, corev1.EnvVar{
Name: "MINIO_IDENTITY_LDAP_GROUP_NAME_ATTRIBUTE",
Value: groupNameAttribute,
})
if tlsSkipVerify {
minInst.Spec.Env = append(minInst.Spec.Env, corev1.EnvVar{
Name: "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY",
Value: "on",
})
}
if serverInsecure {
minInst.Spec.Env = append(minInst.Spec.Env, corev1.EnvVar{
Name: "MINIO_IDENTITY_LDAP_SERVER_INSECURE",
Value: "on",
})
}
}
}
// Create the secret any built-in user passed if no external IDP was configured
if tenantReq.Idp != nil && len(tenantReq.Idp.Keys) > 0 && tenantReq.Idp.ActiveDirectory == nil && tenantReq.Idp.Oidc == nil {
for i := 0; i < len(tenantReq.Idp.Keys); i++ {
userSecretName := fmt.Sprintf("%s-user-%d", tenantName, i)
users = append(users, &corev1.LocalObjectReference{Name: userSecretName})
userSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: userSecretName,
Labels: map[string]string{
miniov2.TenantLabel: tenantName,
},
},
Immutable: &imm,
Data: map[string][]byte{
"CONSOLE_ACCESS_KEY": []byte(*tenantReq.Idp.Keys[i].AccessKey),
"CONSOLE_SECRET_KEY": []byte(*tenantReq.Idp.Keys[i].SecretKey),
},
}
_, err := clientSet.CoreV1().Secrets(ns).Create(ctx, &userSecret, metav1.CreateOptions{})
if err != nil {
return nil, prepareError(err)
}
}
// attach the users to the tenant
minInst.Spec.Users = users
}
isEncryptionEnabled := false
if tenantReq.EnableTLS != nil {
// if enableTLS is defined in the create tenant request we assign the value
// to the RequestAutoCert attribute in the tenant spec
minInst.Spec.RequestAutoCert = tenantReq.EnableTLS
if *tenantReq.EnableTLS {
// requestAutoCert is enabled, MinIO will be deployed with TLS enabled and encryption can be enabled
isEncryptionEnabled = true
consoleHasTLS = true
}
}
// External TLS certificates for MinIO
if tenantReq.TLS != nil && len(tenantReq.TLS.Minio) > 0 {
isEncryptionEnabled = true
// Certificates used by the MinIO instance
externalCertSecretName := fmt.Sprintf("%s-instance-external-certificates", secretName)
externalCertSecret, err := createOrReplaceExternalCertSecrets(ctx, &k8sClient, ns, tenantReq.TLS.Minio, externalCertSecretName, tenantName)
if err != nil {
return nil, prepareError(err)
}
minInst.Spec.ExternalCertSecret = externalCertSecret
}
// If encryption configuration is present and TLS will be enabled (using AutoCert or External certificates)
if tenantReq.Encryption != nil && isEncryptionEnabled {
// KES client mTLSCertificates used by MinIO instance
if tenantReq.Encryption.Client != nil {
tenantExternalClientCertSecretName := fmt.Sprintf("%s-tenant-external-client-cert", secretName)
certificates := []*models.KeyPairConfiguration{tenantReq.Encryption.Client}
certificateSecrets, err := createOrReplaceExternalCertSecrets(ctx, &k8sClient, ns, certificates, tenantExternalClientCertSecretName, tenantName)
if err != nil {
return nil, prepareError(errorGeneric)
}
if len(certificateSecrets) > 0 {
minInst.Spec.ExternalClientCertSecret = certificateSecrets[0]
}
}
// KES configuration for Tenant instance
minInst.Spec.KES, err = getKESConfiguration(ctx, &k8sClient, ns, tenantReq.Encryption, secretName, tenantName)
if err != nil {
return nil, prepareError(errorGeneric)
}
// Set Labels, Annotations and Node Selector for KES
minInst.Spec.KES.Labels = tenantReq.Encryption.Labels
minInst.Spec.KES.Annotations = tenantReq.Encryption.Annotations
minInst.Spec.KES.NodeSelector = tenantReq.Encryption.NodeSelector
}
// External TLS CA certificates for MinIO
if tenantReq.TLS != nil && len(tenantReq.TLS.CaCertificates) > 0 {
var caCertificates []tenantSecret
for i, caCertificate := range tenantReq.TLS.CaCertificates {
certificateContent, err := base64.StdEncoding.DecodeString(caCertificate)
if err != nil {
return nil, prepareError(errorGeneric, nil, err)
}
caCertificates = append(caCertificates, tenantSecret{
Name: fmt.Sprintf("ca-certificate-%d", i),
Content: map[string][]byte{
"public.crt": certificateContent,
},
})
}
if len(caCertificates) > 0 {
certificateSecrets, err := createOrReplaceSecrets(ctx, &k8sClient, ns, caCertificates, tenantName)
if err != nil {
return nil, prepareError(errorGeneric, nil, err)
}
minInst.Spec.ExternalCaCertSecret = certificateSecrets
}
}
// optionals are set below
var tenantUserAccessKey string
var tenantUserSecretKey string
keyElementEmpty := len(tenantReq.Idp.Keys) == 1 && (*tenantReq.Idp.Keys[0].AccessKey == "" && *tenantReq.Idp.Keys[0].SecretKey == "")
enableConsole := true
if tenantReq.EnableConsole != nil && *tenantReq.EnableConsole {
enableConsole = *tenantReq.EnableConsole
}
if enableConsole {
consoleSelector := fmt.Sprintf("%s-console", tenantName)
consoleSecretName := fmt.Sprintf("%s-secret", consoleSelector)
consoleSecretData := map[string][]byte{
"CONSOLE_PBKDF_PASSPHRASE": []byte(RandomCharString(16)),
"CONSOLE_PBKDF_SALT": []byte(RandomCharString(8)),
}
// If Subnet License is present in k8s secrets, copy that to the CONSOLE_SUBNET_LICENSE env variable
// of the console tenant
license, _ := getSubscriptionLicense(ctx, &k8sClient, cluster.Namespace, OperatorSubnetLicenseSecretName)
if license != "" {
consoleSecretData[ConsoleSubnetLicense] = []byte(license)
}
imm := true
instanceSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: consoleSecretName,
Labels: map[string]string{
miniov2.TenantLabel: tenantName,
},
},
Immutable: &imm,
Data: consoleSecretData,
}
minInst.Spec.Console = &miniov2.ConsoleConfiguration{
Replicas: 1,
Image: getConsoleImage(),
ConsoleSecret: &corev1.LocalObjectReference{Name: consoleSecretName},
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
"memory": resource.MustParse("64Mi"),
},
},
}
if tenantReq.TLS != nil && tenantReq.TLS.Console != nil {
consoleHasTLS = true
// Certificates used by the console instance
externalCertSecretName := fmt.Sprintf("%s-console-external-certificates", secretName)
certificates := []*models.KeyPairConfiguration{tenantReq.TLS.Console}
externalCertSecret, err := createOrReplaceExternalCertSecrets(ctx, &k8sClient, ns, certificates, externalCertSecretName, tenantName)
if err != nil {
return nil, prepareError(errorGeneric)
}
if len(externalCertSecret) > 0 {
minInst.Spec.Console.ExternalCertSecret = externalCertSecret[0]
}
}
// If IDP is not already enabled via LDAP (Active Directory) and OIDC configuration is present then
// enable oidc for console
if !idpEnabled && tenantReq.Idp != nil && tenantReq.Idp.Oidc != nil {
url := *tenantReq.Idp.Oidc.URL
clientID := *tenantReq.Idp.Oidc.ClientID
secretID := *tenantReq.Idp.Oidc.SecretID
if url != "" && clientID != "" && secretID != "" {
instanceSecret.Data["CONSOLE_IDP_URL"] = []byte(url)
instanceSecret.Data["CONSOLE_IDP_CLIENT_ID"] = []byte(clientID)
instanceSecret.Data["CONSOLE_IDP_SECRET"] = []byte(secretID)
consoleScheme := "http"
consolePort := 9090
// If Console will be deployed with TLS enabled (using AutoCert or External certificates)
if consoleHasTLS {
consoleScheme = "https"
consolePort = 9443
}
// https://[HOSTNAME]:9443 will be replaced by javascript in the browser to use the actual hostname
// assigned to Console, eg: https://localhost:9443
instanceSecret.Data["CONSOLE_IDP_CALLBACK"] = []byte(fmt.Sprintf("%s://[HOSTNAME]:%d/oauth_callback", consoleScheme, consolePort))
}
}
_, err = clientSet.CoreV1().Secrets(ns).Create(ctx, &instanceSecret, metav1.CreateOptions{})
if err != nil {
return nil, prepareError(errorGeneric)
}
// Set Labels, Annotations and Node Selector for Console
if tenantReq.Console != nil {
minInst.Spec.Console.Annotations = tenantReq.Console.Annotations
minInst.Spec.Console.Labels = tenantReq.Console.Labels
minInst.Spec.Console.NodeSelector = tenantReq.Console.NodeSelector
}
// External TLS CA certificates for Console
if tenantReq.TLS != nil && len(tenantReq.TLS.ConsoleCaCertificates) > 0 {
var caCertificates []tenantSecret
for i, caCertificate := range tenantReq.TLS.ConsoleCaCertificates {
certificateContent, err := base64.StdEncoding.DecodeString(caCertificate)
if err != nil {
return nil, prepareError(errorGeneric, nil, err)
}
caCertificates = append(caCertificates, tenantSecret{
Name: fmt.Sprintf("console-ca-certificate-%d", i),
Content: map[string][]byte{
"public.crt": certificateContent,
},
})
}
if len(caCertificates) > 0 {
certificateSecrets, err := createOrReplaceSecrets(ctx, &k8sClient, ns, caCertificates, tenantName)
if err != nil {
return nil, prepareError(errorGeneric, nil, err)
}
minInst.Spec.Console.ExternalCaCertSecret = certificateSecrets
}
}
}
// add annotations
var annotations map[string]string
if len(tenantReq.Annotations) > 0 {
annotations = tenantReq.Annotations
minInst.Annotations = annotations
}
// set the pools if they are provided
for _, pool := range tenantReq.Pools {
pool, err := parseTenantPoolRequest(pool)
if err != nil {
LogError("parseTenantPoolRequest failed: %v", err)
return nil, prepareError(err)
}
minInst.Spec.Pools = append(minInst.Spec.Pools, *pool)
}
// Set Mount Path if provided
if tenantReq.MounthPath != "" {
minInst.Spec.Mountpath = tenantReq.MounthPath
}
// We accept either `image_pull_secret` or the individual details of the `image_registry` but not both
var imagePullSecret string
if tenantReq.ImagePullSecret != "" {
imagePullSecret = tenantReq.ImagePullSecret
} else if imagePullSecret, err = setImageRegistry(ctx, tenantReq.ImageRegistry, clientSet.CoreV1(), ns, tenantName); err != nil {
return nil, prepareError(err)
}
// pass the image pull secret to the Tenant
if imagePullSecret != "" {
minInst.Spec.ImagePullSecret = corev1.LocalObjectReference{
Name: imagePullSecret,
}
}
// prometheus annotations support
if tenantReq.EnablePrometheus != nil && *tenantReq.EnablePrometheus && minInst.Annotations != nil {
minInst.Annotations[prometheusPath] = "/minio/prometheus/metrics"
minInst.Annotations[prometheusPort] = fmt.Sprint(miniov2.MinIOPort)
minInst.Annotations[prometheusScrape] = "true"
}
// set console image if provided
if tenantReq.ConsoleImage != "" {
minInst.Spec.Console.Image = tenantReq.ConsoleImage
}
//Default class name for Log search
diskSpaceFromAPI := int64(5) // Default is 5
logSearchStorageClass := "" // Default is ""
logSearchImage := ""
logSearchPgImage := ""
if tenantReq.LogSearchConfiguration != nil {
if tenantReq.LogSearchConfiguration.StorageSize != nil {
diskSpaceFromAPI = int64(*tenantReq.LogSearchConfiguration.StorageSize)
}
if tenantReq.LogSearchConfiguration.StorageClass != "" {
logSearchStorageClass = tenantReq.LogSearchConfiguration.StorageClass
}
if tenantReq.LogSearchConfiguration.StorageClass == "" && len(tenantReq.Pools) > 0 {
logSearchStorageClass = tenantReq.Pools[0].VolumeConfiguration.StorageClassName
}
if tenantReq.LogSearchConfiguration.Image != "" {
logSearchImage = tenantReq.LogSearchConfiguration.Image
}
if tenantReq.LogSearchConfiguration.PostgresImage != "" {
logSearchPgImage = tenantReq.LogSearchConfiguration.PostgresImage
}
}
logSearchDiskSpace := resource.NewQuantity(diskSpaceFromAPI, resource.DecimalExponent)
// default activate lgo search and prometheus
minInst.Spec.Log = &miniov2.LogConfig{
Audit: &miniov2.AuditConfig{DiskCapacityGB: swag.Int(10)},
Db: &miniov2.LogDbConfig{
VolumeClaimTemplate: &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: tenantName + "-log",
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: *logSearchDiskSpace,
},
},
StorageClassName: &logSearchStorageClass,
},
},
},
}
if logSearchImage != "" {
minInst.Spec.Log.Image = logSearchImage
}
if logSearchPgImage != "" {
minInst.Spec.Log.Db.Image = logSearchPgImage
}
prometheusDiskSpace := 5 // Default is 5 by API
prometheusStorageClass := "" // Default is ""
prometheusImage := "" // Default is ""
if tenantReq.PrometheusConfiguration != nil {
if tenantReq.PrometheusConfiguration.StorageSize != nil {
prometheusDiskSpace = int(*tenantReq.PrometheusConfiguration.StorageSize)
}
if tenantReq.PrometheusConfiguration.StorageClass != "" {
prometheusStorageClass = tenantReq.PrometheusConfiguration.StorageClass
}
// Default class name for prometheus
if tenantReq.PrometheusConfiguration.StorageClass == "" && len(tenantReq.Pools) > 0 {
prometheusStorageClass = tenantReq.Pools[0].VolumeConfiguration.StorageClassName
}
if tenantReq.PrometheusConfiguration.Image != "" {
prometheusImage = tenantReq.PrometheusConfiguration.Image
}
}
minInst.Spec.Prometheus = &miniov2.PrometheusConfig{
DiskCapacityDB: swag.Int(prometheusDiskSpace),
StorageClassName: &prometheusStorageClass,
}
if prometheusImage != "" {
minInst.Spec.Prometheus.Image = prometheusImage
}
// expose services
if tenantReq.ExposeMinio || tenantReq.ExposeConsole {
minInst.Spec.ExposeServices = &miniov2.ExposeServices{
MinIO: tenantReq.ExposeMinio,
Console: tenantReq.ExposeConsole,
}
}
opClient, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
_, err = opClient.MinioV2().Tenants(ns).Create(context.Background(), &minInst, metav1.CreateOptions{})
if err != nil {
LogError("Creating new tenant failed with: %v", err)
return nil, prepareError(err)
}
// Integrations
if os.Getenv("GKE_INTEGRATION") != "" {
err := gkeIntegration(clientSet, tenantName, ns, session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
}
response = &models.CreateTenantResponse{}
// Attach Console Credentials
if enableConsole {
var itemsToReturn []*models.TenantResponseItem
if len(tenantReq.Idp.Keys) == 0 || keyElementEmpty {
itemsToReturn = append(itemsToReturn, &models.TenantResponseItem{AccessKey: tenantUserAccessKey, SecretKey: tenantUserSecretKey})
} else { // IDP Keys
for _, item := range tenantReq.Idp.Keys {
itemsToReturn = append(itemsToReturn, &models.TenantResponseItem{AccessKey: *item.AccessKey, SecretKey: *item.SecretKey})
}
}
response.Console = itemsToReturn
}
return response, nil
}
// setImageRegistry creates a secret to store the private registry credentials, if one exist it updates the existing one
// returns the name of the secret created/updated
func setImageRegistry(ctx context.Context, req *models.ImageRegistry, clientset v1.CoreV1Interface, namespace, tenantName string) (string, error) {
if req == nil || req.Registry == nil || req.Username == nil || req.Password == nil {
return "", nil
}
credentials := make(map[string]imageRegistryCredentials)
// username:password encoded
authData := []byte(fmt.Sprintf("%s:%s", *req.Username, *req.Password))
authStr := base64.StdEncoding.EncodeToString(authData)
credentials[*req.Registry] = imageRegistryCredentials{
Username: *req.Username,
Password: *req.Password,
Auth: authStr,
}
imRegistry := imageRegistry{
Auths: credentials,
}
imRegistryJSON, err := json.Marshal(imRegistry)
if err != nil {
return "", err
}
pullSecretName := fmt.Sprintf("%s-regcred", tenantName)
secretCredentials := map[string][]byte{
corev1.DockerConfigJsonKey: []byte(string(imRegistryJSON)),
}
// Get or Create secret if it doesn't exist
currentSecret, err := clientset.Secrets(namespace).Get(ctx, pullSecretName, metav1.GetOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
instanceSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: pullSecretName,
Labels: map[string]string{
miniov2.TenantLabel: tenantName,
},
},
Data: secretCredentials,
Type: corev1.SecretTypeDockerConfigJson,
}
_, err = clientset.Secrets(namespace).Create(ctx, &instanceSecret, metav1.CreateOptions{})
if err != nil {
return "", err
}
return pullSecretName, nil
}
return "", err
}
currentSecret.Data = secretCredentials
_, err = clientset.Secrets(namespace).Update(ctx, currentSecret, metav1.UpdateOptions{})
if err != nil {
return "", err
}
return pullSecretName, nil
}
// updateTenantAction does an update on the minioTenant by patching the desired changes
func updateTenantAction(ctx context.Context, operatorClient OperatorClientI, clientset v1.CoreV1Interface, httpCl cluster.HTTPClientI, namespace string, params admin_api.UpdateTenantParams) error {
imageToUpdate := params.Body.Image
imageRegistryReq := params.Body.ImageRegistry
minInst, err := operatorClient.TenantGet(ctx, namespace, params.Tenant, metav1.GetOptions{})
if err != nil {
return err
}
// we can take either the `image_pull_secret` of the `image_registry` but not both
if params.Body.ImagePullSecret != "" {
minInst.Spec.ImagePullSecret.Name = params.Body.ImagePullSecret
} else {
// update the image pull secret content
if _, err := setImageRegistry(ctx, imageRegistryReq, clientset, namespace, params.Tenant); err != nil {
LogError("error setting image registry secret: %v", err)
return err
}
}
// update the console image
if strings.TrimSpace(params.Body.ConsoleImage) != "" && minInst.Spec.Console != nil {
minInst.Spec.Console.Image = params.Body.ConsoleImage
}
// if image to update is empty we'll use the latest image by default
if strings.TrimSpace(imageToUpdate) != "" {
minInst.Spec.Image = imageToUpdate
} else {
im, err := cluster.GetLatestMinioImage(httpCl)
// if we can't get the MinIO image, we won' auto-update it unless it's explicit by name
if err == nil {
minInst.Spec.Image = *im
}
}
// Prometheus Annotations
currentAnnotations := minInst.Annotations
prometheusAnnotations := map[string]string{
prometheusPath: "/minio/prometheus/metrics",
prometheusPort: fmt.Sprint(miniov2.MinIOPort),
prometheusScrape: "true",
}
if params.Body.EnablePrometheus && currentAnnotations != nil {
// add prometheus annotations to the tenant
minInst.Annotations = addAnnotations(currentAnnotations, prometheusAnnotations)
// add prometheus annotations to the each pool
if minInst.Spec.Pools != nil {
for _, pool := range minInst.Spec.Pools {
poolAnnotations := pool.VolumeClaimTemplate.GetObjectMeta().GetAnnotations()
pool.VolumeClaimTemplate.GetObjectMeta().SetAnnotations(addAnnotations(poolAnnotations, prometheusAnnotations))
}
}
} else {
// remove prometheus annotations to the tenant
minInst.Annotations = removeAnnotations(currentAnnotations, prometheusAnnotations)
// add prometheus annotations from each pool
if minInst.Spec.Pools != nil {
for _, pool := range minInst.Spec.Pools {
poolAnnotations := pool.VolumeClaimTemplate.GetObjectMeta().GetAnnotations()
pool.VolumeClaimTemplate.GetObjectMeta().SetAnnotations(removeAnnotations(poolAnnotations, prometheusAnnotations))
}
}
}
payloadBytes, err := json.Marshal(minInst)
if err != nil {
return err
}
_, err = operatorClient.TenantPatch(ctx, namespace, minInst.Name, types.MergePatchType, payloadBytes, metav1.PatchOptions{})
if err != nil {
return err
}
return nil
}
// addAnnotations will merge two annotation maps
func addAnnotations(annotationsOne, annotationsTwo map[string]string) map[string]string {
if annotationsOne == nil {
annotationsOne = map[string]string{}
}
for key, value := range annotationsTwo {
annotationsOne[key] = value
}
return annotationsOne
}
// removeAnnotations will remove keys from the first annotations map based on the second one
func removeAnnotations(annotationsOne, annotationsTwo map[string]string) map[string]string {
if annotationsOne == nil {
annotationsOne = map[string]string{}
}
for key := range annotationsTwo {
delete(annotationsOne, key)
}
return annotationsOne
}
func getUpdateTenantResponse(session *models.Principal, params admin_api.UpdateTenantParams) *models.Error {
ctx := context.Background()
opClientClientSet, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return prepareError(err)
}
// get Kubernetes Client
clientSet, err := cluster.K8sClient(session.STSSessionToken)
if err != nil {
return prepareError(err)
}
opClient := &operatorClient{
client: opClientClientSet,
}
httpC := &cluster.HTTPClient{
Client: &http.Client{
Timeout: 4 * time.Second,
},
}
if err := updateTenantAction(ctx, opClient, clientSet.CoreV1(), httpC, params.Namespace, params); err != nil {
return prepareError(err, errors.New("unable to update tenant"))
}
return nil
}
// addTenantPool creates a pool to a defined tenant
func addTenantPool(ctx context.Context, operatorClient OperatorClientI, params admin_api.TenantAddPoolParams) error {
tenant, err := operatorClient.TenantGet(ctx, params.Namespace, params.Tenant, metav1.GetOptions{})
if err != nil {
return err
}
poolParams := params.Body
pool, err := parseTenantPoolRequest(poolParams)
if err != nil {
return err
}
tenant.Spec.Pools = append(tenant.Spec.Pools, *pool)
payloadBytes, err := json.Marshal(tenant)
if err != nil {
return err
}
_, err = operatorClient.TenantPatch(ctx, params.Namespace, tenant.Name, types.MergePatchType, payloadBytes, metav1.PatchOptions{})
if err != nil {
return err
}
return nil
}
func getTenantAddPoolResponse(session *models.Principal, params admin_api.TenantAddPoolParams) *models.Error {
ctx := context.Background()
opClientClientSet, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return prepareError(err)
}
opClient := &operatorClient{
client: opClientClientSet,
}
if err := addTenantPool(ctx, opClient, params); err != nil {
return prepareError(err, errors.New("unable to add pool"))
}
return nil
}
// getTenantUsageResponse returns the usage of a tenant
func getTenantUsageResponse(session *models.Principal, params admin_api.GetTenantUsageParams) (*models.TenantUsage, *models.Error) {
// 5 seconds timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
opClientClientSet, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err, errorUnableToGetTenantUsage)
}
clientSet, err := cluster.K8sClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err, errorUnableToGetTenantUsage)
}
opClient := &operatorClient{
client: opClientClientSet,
}
k8sClient := &k8sClient{
client: clientSet,
}
minTenant, err := getTenant(ctx, opClient, params.Namespace, params.Tenant)
if err != nil {
return nil, prepareError(err, errorUnableToGetTenantUsage)
}
minTenant.EnsureDefaults()
svcURL := GetTenantServiceURL(minTenant)
// getTenantAdminClient will use all certificates under ~/.console/certs/CAs to trust the TLS connections with MinIO tenants
mAdmin, err := getTenantAdminClient(
ctx,
k8sClient,
minTenant,
svcURL,
)
if err != nil {
return nil, prepareError(err, errorUnableToGetTenantUsage)
}
// create a minioClient interface implementation
// defining the client to be used
adminClient := adminClient{client: mAdmin}
// serialize output
adminInfo, err := getAdminInfo(ctx, adminClient)
if err != nil {
return nil, prepareError(err, errorUnableToGetTenantUsage)
}
info := &models.TenantUsage{Used: adminInfo.Usage, DiskUsed: adminInfo.DisksUsage}
return info, nil
}
func getTenantPodsResponse(session *models.Principal, params admin_api.GetTenantPodsParams) ([]*models.TenantPod, *models.Error) {
ctx := context.Background()
clientset, err := cluster.K8sClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
listOpts := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", miniov2.TenantLabel, params.Tenant),
}
pods, err := clientset.CoreV1().Pods(params.Namespace).List(ctx, listOpts)
if err != nil {
return nil, prepareError(err)
}
retval := []*models.TenantPod{}
for _, pod := range pods.Items {
var restarts int64
if len(pod.Status.ContainerStatuses) > 0 {
restarts = int64(pod.Status.ContainerStatuses[0].RestartCount)
}
retval = append(retval, &models.TenantPod{
Name: swag.String(pod.Name),
Status: string(pod.Status.Phase),
TimeCreated: pod.CreationTimestamp.Unix(),
PodIP: pod.Status.PodIP,
Restarts: restarts,
Node: pod.Spec.NodeName})
}
return retval, nil
}
func getPodLogsResponse(session *models.Principal, params admin_api.GetPodLogsParams) (string, *models.Error) {
ctx := context.Background()
clientset, err := cluster.K8sClient(session.STSSessionToken)
if err != nil {
return "", prepareError(err)
}
listOpts := &corev1.PodLogOptions{}
logs := clientset.CoreV1().Pods(params.Namespace).GetLogs(params.PodName, listOpts)
buff, err := logs.DoRaw(ctx)
if err != nil {
return "", prepareError(err)
}
return string(buff), nil
}
func getPodEventsResponse(session *models.Principal, params admin_api.GetPodEventsParams) (models.EventListWrapper, *models.Error) {
ctx := context.Background()
clientset, err := cluster.K8sClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
pod, err := clientset.CoreV1().Pods(params.Namespace).Get(ctx, params.PodName, metav1.GetOptions{})
if err != nil {
return nil, prepareError(err)
}
events, err := clientset.CoreV1().Events(params.Namespace).List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.uid=%s", pod.UID)})
if err != nil {
return nil, prepareError(err)
}
retval := models.EventListWrapper{}
for i := 0; i < len(events.Items); i++ {
retval = append(retval, &models.EventListElement{
Namespace: events.Items[i].Namespace,
LastSeen: events.Items[i].LastTimestamp.Unix(),
Message: events.Items[i].Message,
EventType: events.Items[i].Type,
Reason: events.Items[i].Reason,
})
}
sort.SliceStable(retval, func(i int, j int) bool {
return retval[i].LastSeen < retval[j].LastSeen
})
return retval, nil
}
// parseTenantPoolRequest parse pool request and returns the equivalent
// miniov2.Pool object
func parseTenantPoolRequest(poolParams *models.Pool) (*miniov2.Pool, error) {
if poolParams.VolumeConfiguration == nil {
return nil, errors.New("a volume configuration must be specified")
}
if poolParams.VolumeConfiguration.Size == nil || *poolParams.VolumeConfiguration.Size <= int64(0) {
return nil, errors.New("volume size must be greater than 0")
}
if poolParams.Servers == nil || *poolParams.Servers <= 0 {
return nil, errors.New("number of servers must be greater than 0")
}
if poolParams.VolumesPerServer == nil || *poolParams.VolumesPerServer <= 0 {
return nil, errors.New("number of volumes per server must be greater than 0")
}
volumeSize := resource.NewQuantity(*poolParams.VolumeConfiguration.Size, resource.DecimalExponent)
volTemp := corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: *volumeSize,
},
},
}
if poolParams.VolumeConfiguration.StorageClassName != "" {
volTemp.StorageClassName = &poolParams.VolumeConfiguration.StorageClassName
}
// parse resources' requests
resourcesRequests := make(corev1.ResourceList)
resourcesLimits := make(corev1.ResourceList)
if poolParams.Resources != nil {
for key, val := range poolParams.Resources.Requests {
resourcesRequests[corev1.ResourceName(key)] = *resource.NewQuantity(val, resource.BinarySI)
}
for key, val := range poolParams.Resources.Limits {
resourcesLimits[corev1.ResourceName(key)] = *resource.NewQuantity(val, resource.BinarySI)
}
}
// parse Node Affinity
nodeSelectorTerms := []corev1.NodeSelectorTerm{}
preferredSchedulingTerm := []corev1.PreferredSchedulingTerm{}
if poolParams.Affinity != nil && poolParams.Affinity.NodeAffinity != nil {
if poolParams.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
for _, elem := range poolParams.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
term := parseModelsNodeSelectorTerm(elem)
nodeSelectorTerms = append(nodeSelectorTerms, term)
}
}
for _, elem := range poolParams.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
pst := corev1.PreferredSchedulingTerm{
Weight: *elem.Weight,
Preference: parseModelsNodeSelectorTerm(elem.Preference),
}
preferredSchedulingTerm = append(preferredSchedulingTerm, pst)
}
}
var nodeAffinity *corev1.NodeAffinity
if len(nodeSelectorTerms) > 0 || len(preferredSchedulingTerm) > 0 {
nodeAffinity = &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: nodeSelectorTerms,
},
PreferredDuringSchedulingIgnoredDuringExecution: preferredSchedulingTerm,
}
}
// parse Pod Affinity
podAffinityTerms := []corev1.PodAffinityTerm{}
weightedPodAffinityTerms := []corev1.WeightedPodAffinityTerm{}
if poolParams.Affinity != nil && poolParams.Affinity.PodAffinity != nil {
for _, elem := range poolParams.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
podAffinityTerms = append(podAffinityTerms, parseModelPodAffinityTerm(elem))
}
for _, elem := range poolParams.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
wAffinityTerm := corev1.WeightedPodAffinityTerm{
Weight: *elem.Weight,
PodAffinityTerm: parseModelPodAffinityTerm(elem.PodAffinityTerm),
}
weightedPodAffinityTerms = append(weightedPodAffinityTerms, wAffinityTerm)
}
}
var podAffinity *corev1.PodAffinity
if len(podAffinityTerms) > 0 || len(weightedPodAffinityTerms) > 0 {
podAffinity = &corev1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: podAffinityTerms,
PreferredDuringSchedulingIgnoredDuringExecution: weightedPodAffinityTerms,
}
}
// parse Pod Anti Affinity
podAntiAffinityTerms := []corev1.PodAffinityTerm{}
weightedPodAntiAffinityTerms := []corev1.WeightedPodAffinityTerm{}
if poolParams.Affinity != nil && poolParams.Affinity.PodAntiAffinity != nil {
for _, elem := range poolParams.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
podAntiAffinityTerms = append(podAntiAffinityTerms, parseModelPodAffinityTerm(elem))
}
for _, elem := range poolParams.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
wAffinityTerm := corev1.WeightedPodAffinityTerm{
Weight: *elem.Weight,
PodAffinityTerm: parseModelPodAffinityTerm(elem.PodAffinityTerm),
}
weightedPodAntiAffinityTerms = append(weightedPodAntiAffinityTerms, wAffinityTerm)
}
}
var podAntiAffinity *corev1.PodAntiAffinity
if len(podAntiAffinityTerms) > 0 || len(weightedPodAntiAffinityTerms) > 0 {
podAntiAffinity = &corev1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: podAntiAffinityTerms,
PreferredDuringSchedulingIgnoredDuringExecution: weightedPodAntiAffinityTerms,
}
}
var affinity *corev1.Affinity
if nodeAffinity != nil || podAffinity != nil || podAntiAffinity != nil {
affinity = &corev1.Affinity{
NodeAffinity: nodeAffinity,
PodAffinity: podAffinity,
PodAntiAffinity: podAntiAffinity,
}
}
// parse tolerations
tolerations := []corev1.Toleration{}
for _, elem := range poolParams.Tolerations {
var tolerationSeconds *int64
if elem.TolerationSeconds != nil {
// elem.TolerationSeconds.Seconds is allowed to be nil
tolerationSeconds = elem.TolerationSeconds.Seconds
}
toleration := corev1.Toleration{
Key: elem.Key,
Operator: corev1.TolerationOperator(elem.Operator),
Value: elem.Value,
Effect: corev1.TaintEffect(elem.Effect),
TolerationSeconds: tolerationSeconds,
}
tolerations = append(tolerations, toleration)
}
// Pass annotations to the volume
vct := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "data",
Labels: poolParams.VolumeConfiguration.Labels,
Annotations: poolParams.VolumeConfiguration.Annotations,
},
Spec: volTemp,
}
pool := &miniov2.Pool{
Name: poolParams.Name,
Servers: int32(*poolParams.Servers),
VolumesPerServer: *poolParams.VolumesPerServer,
VolumeClaimTemplate: vct,
Resources: corev1.ResourceRequirements{
Requests: resourcesRequests,
Limits: resourcesLimits,
},
NodeSelector: poolParams.NodeSelector,
Affinity: affinity,
Tolerations: tolerations,
}
return pool, nil
}
func parseModelPodAffinityTerm(term *models.PodAffinityTerm) corev1.PodAffinityTerm {
labelMatchExpressions := []metav1.LabelSelectorRequirement{}
for _, exp := range term.LabelSelector.MatchExpressions {
labelSelectorReq := metav1.LabelSelectorRequirement{
Key: *exp.Key,
Operator: metav1.LabelSelectorOperator(*exp.Operator),
Values: exp.Values,
}
labelMatchExpressions = append(labelMatchExpressions, labelSelectorReq)
}
podAffinityTerm := corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: labelMatchExpressions,
MatchLabels: term.LabelSelector.MatchLabels,
},
Namespaces: term.Namespaces,
TopologyKey: *term.TopologyKey,
}
return podAffinityTerm
}
func parseModelsNodeSelectorTerm(elem *models.NodeSelectorTerm) corev1.NodeSelectorTerm {
var term corev1.NodeSelectorTerm
for _, matchExpression := range elem.MatchExpressions {
matchExp := corev1.NodeSelectorRequirement{
Key: *matchExpression.Key,
Operator: corev1.NodeSelectorOperator(*matchExpression.Operator),
Values: matchExpression.Values,
}
term.MatchExpressions = append(term.MatchExpressions, matchExp)
}
for _, matchField := range elem.MatchFields {
matchF := corev1.NodeSelectorRequirement{
Key: *matchField.Key,
Operator: corev1.NodeSelectorOperator(*matchField.Operator),
Values: matchField.Values,
}
term.MatchFields = append(term.MatchFields, matchF)
}
return term
}
// parseTenantPool miniov2 pool object and returns the equivalent
// models.Pool object
func parseTenantPool(pool *miniov2.Pool) *models.Pool {
var size *int64
var storageClassName string
if pool.VolumeClaimTemplate != nil {
size = swag.Int64(pool.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value())
if pool.VolumeClaimTemplate.Spec.StorageClassName != nil {
storageClassName = *pool.VolumeClaimTemplate.Spec.StorageClassName
}
}
// parse resources' requests
var resources *models.PoolResources
resourcesRequests := make(map[string]int64)
resourcesLimits := make(map[string]int64)
for key, val := range pool.Resources.Requests {
resourcesRequests[key.String()] = val.Value()
}
for key, val := range pool.Resources.Limits {
resourcesLimits[key.String()] = val.Value()
}
if len(resourcesRequests) > 0 || len(resourcesLimits) > 0 {
resources = &models.PoolResources{
Limits: resourcesLimits,
Requests: resourcesRequests,
}
}
// parse Node Affinity
nodeSelectorTerms := []*models.NodeSelectorTerm{}
preferredSchedulingTerm := []*models.PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{}
if pool.Affinity != nil && pool.Affinity.NodeAffinity != nil {
if pool.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
for _, elem := range pool.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
term := parseNodeSelectorTerm(&elem)
nodeSelectorTerms = append(nodeSelectorTerms, term)
}
}
for _, elem := range pool.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
pst := &models.PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{
Weight: swag.Int32(elem.Weight),
Preference: parseNodeSelectorTerm(&elem.Preference),
}
preferredSchedulingTerm = append(preferredSchedulingTerm, pst)
}
}
var nodeAffinity *models.PoolAffinityNodeAffinity
if len(nodeSelectorTerms) > 0 || len(preferredSchedulingTerm) > 0 {
nodeAffinity = &models.PoolAffinityNodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &models.PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution{
NodeSelectorTerms: nodeSelectorTerms,
},
PreferredDuringSchedulingIgnoredDuringExecution: preferredSchedulingTerm,
}
}
// parse Pod Affinity
podAffinityTerms := []*models.PodAffinityTerm{}
weightedPodAffinityTerms := []*models.PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{}
if pool.Affinity != nil && pool.Affinity.PodAffinity != nil {
for _, elem := range pool.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
podAffinityTerms = append(podAffinityTerms, parsePodAffinityTerm(&elem))
}
for _, elem := range pool.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
wAffinityTerm := &models.PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{
Weight: swag.Int32(elem.Weight),
PodAffinityTerm: parsePodAffinityTerm(&elem.PodAffinityTerm),
}
weightedPodAffinityTerms = append(weightedPodAffinityTerms, wAffinityTerm)
}
}
var podAffinity *models.PoolAffinityPodAffinity
if len(podAffinityTerms) > 0 || len(weightedPodAffinityTerms) > 0 {
podAffinity = &models.PoolAffinityPodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: podAffinityTerms,
PreferredDuringSchedulingIgnoredDuringExecution: weightedPodAffinityTerms,
}
}
// parse Pod Anti Affinity
podAntiAffinityTerms := []*models.PodAffinityTerm{}
weightedPodAntiAffinityTerms := []*models.PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{}
if pool.Affinity != nil && pool.Affinity.PodAntiAffinity != nil {
for _, elem := range pool.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
podAntiAffinityTerms = append(podAntiAffinityTerms, parsePodAffinityTerm(&elem))
}
for _, elem := range pool.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
wAffinityTerm := &models.PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{
Weight: swag.Int32(elem.Weight),
PodAffinityTerm: parsePodAffinityTerm(&elem.PodAffinityTerm),
}
weightedPodAntiAffinityTerms = append(weightedPodAntiAffinityTerms, wAffinityTerm)
}
}
var podAntiAffinity *models.PoolAffinityPodAntiAffinity
if len(podAntiAffinityTerms) > 0 || len(weightedPodAntiAffinityTerms) > 0 {
podAntiAffinity = &models.PoolAffinityPodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: podAntiAffinityTerms,
PreferredDuringSchedulingIgnoredDuringExecution: weightedPodAntiAffinityTerms,
}
}
// build affinity object
var affinity *models.PoolAffinity
if nodeAffinity != nil || podAffinity != nil || podAntiAffinity != nil {
affinity = &models.PoolAffinity{
NodeAffinity: nodeAffinity,
PodAffinity: podAffinity,
PodAntiAffinity: podAntiAffinity,
}
}
// parse tolerations
var tolerations models.PoolTolerations
for _, elem := range pool.Tolerations {
var tolerationSecs *models.PoolTolerationSeconds
if elem.TolerationSeconds != nil {
tolerationSecs = &models.PoolTolerationSeconds{
Seconds: elem.TolerationSeconds,
}
}
toleration := &models.PoolTolerationsItems0{
Key: elem.Key,
Operator: string(elem.Operator),
Value: elem.Value,
Effect: string(elem.Effect),
TolerationSeconds: tolerationSecs,
}
tolerations = append(tolerations, toleration)
}
poolModel := &models.Pool{
Name: pool.Name,
Servers: swag.Int64(int64(pool.Servers)),
VolumesPerServer: swag.Int32(pool.VolumesPerServer),
VolumeConfiguration: &models.PoolVolumeConfiguration{
Size: size,
StorageClassName: storageClassName,
},
NodeSelector: pool.NodeSelector,
Resources: resources,
Affinity: affinity,
Tolerations: tolerations,
}
return poolModel
}
func parsePodAffinityTerm(term *corev1.PodAffinityTerm) *models.PodAffinityTerm {
labelMatchExpressions := []*models.PodAffinityTermLabelSelectorMatchExpressionsItems0{}
for _, exp := range term.LabelSelector.MatchExpressions {
labelSelectorReq := &models.PodAffinityTermLabelSelectorMatchExpressionsItems0{
Key: swag.String(exp.Key),
Operator: swag.String(string(exp.Operator)),
Values: exp.Values,
}
labelMatchExpressions = append(labelMatchExpressions, labelSelectorReq)
}
podAffinityTerm := &models.PodAffinityTerm{
LabelSelector: &models.PodAffinityTermLabelSelector{
MatchExpressions: labelMatchExpressions,
MatchLabels: term.LabelSelector.MatchLabels,
},
Namespaces: term.Namespaces,
TopologyKey: swag.String(term.TopologyKey),
}
return podAffinityTerm
}
func parseNodeSelectorTerm(term *corev1.NodeSelectorTerm) *models.NodeSelectorTerm {
var t models.NodeSelectorTerm
for _, matchExpression := range term.MatchExpressions {
matchExp := &models.NodeSelectorTermMatchExpressionsItems0{
Key: swag.String(matchExpression.Key),
Operator: swag.String(string(matchExpression.Operator)),
Values: matchExpression.Values,
}
t.MatchExpressions = append(t.MatchExpressions, matchExp)
}
for _, matchField := range term.MatchFields {
matchF := &models.NodeSelectorTermMatchFieldsItems0{
Key: swag.String(matchField.Key),
Operator: swag.String(string(matchField.Operator)),
Values: matchField.Values,
}
t.MatchFields = append(t.MatchFields, matchF)
}
return &t
}
func getTenantUpdatePoolResponse(session *models.Principal, params admin_api.TenantUpdatePoolsParams) (*models.Tenant, *models.Error) {
ctx := context.Background()
opClientClientSet, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
opClient := &operatorClient{
client: opClientClientSet,
}
t, err := updateTenantPools(ctx, opClient, params.Namespace, params.Tenant, params.Body.Pools)
if err != nil {
LogError("error updating Tenant's pools: %v", err)
return nil, prepareError(err)
}
// parse it to models.Tenant
tenant := getTenantInfo(t)
return tenant, nil
}
// updateTenantPools Sets the Tenant's pools to the ones provided by the request
//
// It does the equivalent to a PUT request on Tenant's pools
func updateTenantPools(
ctx context.Context,
operatorClient OperatorClientI,
namespace string,
tenantName string,
poolsReq []*models.Pool) (*miniov2.Tenant, error) {
minInst, err := operatorClient.TenantGet(ctx, namespace, tenantName, metav1.GetOptions{})
if err != nil {
return nil, err
}
// set the pools if they are provided
var newPoolArray []miniov2.Pool
for _, pool := range poolsReq {
pool, err := parseTenantPoolRequest(pool)
if err != nil {
return nil, err
}
newPoolArray = append(newPoolArray, *pool)
}
// replace pools array
minInst.Spec.Pools = newPoolArray
minInst = minInst.DeepCopy()
minInst.EnsureDefaults()
payloadBytes, err := json.Marshal(minInst)
if err != nil {
return nil, err
}
tenantUpdated, err := operatorClient.TenantPatch(ctx, namespace, minInst.Name, types.MergePatchType, payloadBytes, metav1.PatchOptions{})
if err != nil {
return nil, err
}
return tenantUpdated, nil
}
func getTenantYAML(session *models.Principal, params admin_api.GetTenantYAMLParams) (*models.TenantYAML, *models.Error) {
// get Kubernetes Client
opClient, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return nil, prepareError(err)
}
tenant, err := opClient.MinioV2().Tenants(params.Namespace).Get(params.HTTPRequest.Context(), params.Tenant, metav1.GetOptions{})
if err != nil {
return nil, prepareError(err)
}
// remove managed fields
tenant.ManagedFields = []metav1.ManagedFieldsEntry{}
//yb, err := yaml.Marshal(tenant)
serializer := k8sJson.NewSerializerWithOptions(
k8sJson.DefaultMetaFactory, nil, nil,
k8sJson.SerializerOptions{
Yaml: true,
Pretty: true,
Strict: true,
},
)
buf := new(bytes.Buffer)
err = serializer.Encode(tenant, buf)
if err != nil {
return nil, prepareError(err)
}
yb := buf.String()
return &models.TenantYAML{Yaml: yb}, nil
}
func getUpdateTenantYAML(session *models.Principal, params admin_api.PutTenantYAMLParams) *models.Error {
// https://godoc.org/k8s.io/apimachinery/pkg/runtime#Scheme
scheme := runtime.NewScheme()
// https://godoc.org/k8s.io/apimachinery/pkg/runtime/serializer#CodecFactory
codecFactory := serializer.NewCodecFactory(scheme)
// https://godoc.org/k8s.io/apimachinery/pkg/runtime#Decoder
deserializer := codecFactory.UniversalDeserializer()
tenantObject, _, err := deserializer.Decode([]byte(params.Body.Yaml), nil, &miniov2.Tenant{})
if err != nil {
return &models.Error{Code: 400, Message: swag.String(err.Error())}
}
inTenant := tenantObject.(*miniov2.Tenant)
// get Kubernetes Client
opClient, err := cluster.OperatorClient(session.STSSessionToken)
if err != nil {
return prepareError(err)
}
tenant, err := opClient.MinioV2().Tenants(params.Namespace).Get(params.HTTPRequest.Context(), params.Tenant, metav1.GetOptions{})
if err != nil {
return prepareError(err)
}
upTenant := tenant.DeepCopy()
// only update safe fields: spec, metadata.finalizers, metadata.labels and metadata.annotations
upTenant.Labels = inTenant.Labels
upTenant.Annotations = inTenant.Annotations
upTenant.Finalizers = inTenant.Finalizers
upTenant.Spec = inTenant.Spec
_, err = opClient.MinioV2().Tenants(params.Namespace).Update(params.HTTPRequest.Context(), upTenant, metav1.UpdateOptions{})
if err != nil {
return &models.Error{Code: 400, Message: swag.String(err.Error())}
}
return nil
}
| [
"\"GKE_INTEGRATION\""
]
| []
| [
"GKE_INTEGRATION"
]
| [] | ["GKE_INTEGRATION"] | go | 1 | 0 | |
main.go | package picofeed
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"regexp"
"sort"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/mmcdole/gofeed"
"github.com/pkg/errors"
"gocloud.dev/blob"
"gocloud.dev/blob/s3blob"
)
const VERSION = "1.1"
const FETCH_TIMEOUT = 10 * time.Second
// setupAWS creates a connection to Simple Cloud Storage Service (S3).
func setupAWS(ctx context.Context, bucket string) (b *blob.Bucket, err error) {
sess := session.New()
profile := os.Getenv("AWS_PROFILE")
if profile == "" {
profile = "mine"
}
creds := credentials.NewChainCredentials(
[]credentials.Provider{
// If you want to set AWS_ACCESS_KEY_ID & AWS_SECRET_ACCESS_KEY envs
&credentials.EnvProvider{},
// For when I use cmd/
&credentials.SharedCredentialsProvider{Filename: "", Profile: profile},
// IIUC, this is how IAM role is assumed in the Lambda env
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess)},
})
cfg := &aws.Config{
Region: aws.String("ap-southeast-1"),
Credentials: creds,
CredentialsChainVerboseErrors: aws.Bool(true),
}
sess, err = session.NewSession(cfg)
b, err = s3blob.OpenBucket(ctx, sess, bucket, nil)
return
}
// Refresh fetches the feeds in feeds.txt and places it on $BUCKET/feeds/index.html
func Refresh(ctx context.Context) error {
feedsList := []string{"feeds.txt"}
feeds := []*url.URL{}
for _, f := range feedsList {
newFeeds, err := parseFeedArg(f)
if err != nil {
return err
}
feeds = append(feeds, newFeeds...)
}
posts := fetchAll(ctx, feeds)
output := &bytes.Buffer{}
renderHtml(output, posts, "Jan 2006")
// https://github.com/google/go-cloud/tree/master/samples/tutorial
b, err := setupAWS(ctx, os.Getenv("BUCKET"))
if err != nil {
log.Fatalf("Failed to setup bucket: %s", err)
}
err = b.WriteAll(ctx, "feeds/index.html", output.Bytes(), nil)
if err != nil {
log.Fatalf("Failed to write: %s", err)
}
return nil
}
func render(posts []*Post, dateFormat string) {
grouped := groupByDate(posts, dateFormat)
for _, group := range grouped {
for i, p := range group {
if i == 0 {
fmt.Printf("%s\n", p.Timestamp.Format(dateFormat))
}
if len(p.Title) > 70 {
fmt.Printf(" %v\n", p.Title)
fmt.Printf(" %70v %s\n", "", p.Link)
} else {
fmt.Printf(" %-70v %s\n", p.Title, p.Link)
}
}
}
}
func renderHtml(f io.Writer, posts []*Post, dateFormat string) {
fmt.Fprintf(f, `<!DOCTYPE html>
<head>
<style>
body {
margin: 0 auto;
padding: 2em 0px;
max-width: 800px;
color: #888;
font-family: -apple-system,system-ui,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif;
font-size: 14px;
line-height: 1.4em;
}
h4 {color: #000;}
a {color: #000;}
a:visited {color: #888;}
</style>
</head>
<body>
<h4 style="padding-bottom: 2em">Picofeed</h4>
`)
grouped := groupByDate(posts, dateFormat)
for _, group := range grouped {
for i, p := range group {
if i == 0 {
fmt.Fprintf(f, "<h4>%s</h4>\n", p.Timestamp.Format(dateFormat))
}
fmt.Fprintf(f, "<div><a href=\"%s\">%s</a> (%s)</div>\n", p.Link, p.Title, p.shortFeedLink())
}
}
fmt.Fprintf(f, `</body>
</html>
`)
}
type Post struct {
Title string
Link string
Timestamp *time.Time
FeedLink string
FeedTitle string
}
func (p *Post) shortFeedLink() string {
u, err := url.Parse(p.FeedLink)
if err != nil {
return ""
}
return u.Host
}
type Posts []*Post
func (posts Posts) Len() int { return len(posts) }
func (posts Posts) Swap(i, j int) { posts[i], posts[j] = posts[j], posts[i] }
type ByTimestamp struct{ Posts }
func (posts ByTimestamp) Less(i, j int) bool {
return posts.Posts[i].Timestamp.After(*posts.Posts[j].Timestamp)
}
// Return list of lists of posts, where each given list has the same date
// E.g. [Dec 2018 -> []*Post, Nov 2018 -> []*Post, ...]
// Mutates posts (sorts) before running
func groupByDate(posts []*Post, dateFormat string) [][]*Post {
sort.Sort(ByTimestamp{posts})
// Initialize with 1 list
grouped := [][]*Post{[]*Post{}}
lastDate := ""
for _, p := range posts {
date := p.Timestamp.Format(dateFormat)
if date != lastDate {
// New date, make new list
grouped = append(grouped, []*Post{})
lastDate = date
}
current := len(grouped) - 1
grouped[current] = append(grouped[current], p)
}
return grouped
}
// Fetch list of feeds in parallel, aggregate results
func fetchAll(ctx context.Context, feeds []*url.URL) []*Post {
ctxTimeout, timeoutCancel := context.WithTimeout(ctx, FETCH_TIMEOUT)
defer timeoutCancel()
var wg sync.WaitGroup
postChan := make(chan *Post, 10000)
for _, f := range feeds {
wg.Add(1)
go func(feed *url.URL) {
defer wg.Done()
feedData, err := fetchFeed(ctxTimeout, feed, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
return
}
posts, err := parseFeed(feed, feedData)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: failed reading feed data %q: %v\n", feed, err)
}
for _, p := range posts {
postChan <- p
}
}(f)
}
wg.Wait()
close(postChan)
posts := []*Post{}
for p := range postChan {
posts = append(posts, p)
}
return posts
}
// Fetch a single feed into a list of posts
func fetchFeed(ctx context.Context, feedUrl *url.URL, depth int) (*gofeed.Feed, error) {
feedParser := gofeed.NewParser()
client := &http.Client{}
req, _ := http.NewRequest("GET", feedUrl.String(), nil)
req.Header.Set("User-Agent", fmt.Sprintf("picofeed/%s", VERSION))
req = req.WithContext(ctx)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return nil, fmt.Errorf("%d: %s", resp.StatusCode, resp.Status)
}
contents, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.Wrapf(err, "Failed reading response body")
}
feed, err := feedParser.ParseString(string(contents))
if err == gofeed.ErrFeedTypeNotDetected && depth == 0 {
// User possibly tried to pass in a non-feed page, try to look for link to feed in header
// If found, recurse
newFeed := extractFeedLink(feedUrl, string(contents))
if newFeed == nil {
return nil, errors.New("Feed type not recognized, could not extract feed from <head>")
}
fmt.Fprintf(os.Stderr, "Autodiscovering feed %q for %q\n", newFeed, feedUrl)
return fetchFeed(ctx, newFeed, 1)
}
return feed, err
}
func extractFeedLink(baseUrl *url.URL, contents string) *url.URL {
regexes := []string{
`\s*<link.*type="application/rss\+xml.*href="([^"]*)"`,
`\s*<link.*type="application/atom\+xml.*href="([^"]*)"`,
}
for _, r := range regexes {
re := regexp.MustCompile(r)
matches := re.FindStringSubmatch(contents)
if len(matches) > 1 {
if strings.HasPrefix(matches[1], "/") {
// relative path
newUrl := *baseUrl
newUrl.Path = matches[1]
return &newUrl
}
u, err := url.Parse(matches[1])
if err != nil {
fmt.Fprintf(os.Stderr, "Autodetected %q for %q but could not parse url", matches[1], baseUrl)
continue
}
return u
}
}
return nil
}
func parseFeed(feedUrl *url.URL, feed *gofeed.Feed) ([]*Post, error) {
posts := []*Post{}
for _, i := range feed.Items {
t := i.PublishedParsed
if i.PublishedParsed == nil {
if i.UpdatedParsed != nil {
t = i.UpdatedParsed
} else {
fmt.Fprintf(os.Stderr, "Invalid time (%q): %v", i.Title, i.PublishedParsed)
continue
}
}
posts = append(posts, &Post{
Title: i.Title,
Link: i.Link,
Timestamp: t,
FeedTitle: feed.Title,
FeedLink: feedUrl.String(),
})
}
fmt.Fprintf(os.Stderr, "Fetched %q: %d posts\n", feedUrl, len(feed.Items))
return posts, nil
}
// If feed is a path to a file, attempt to read it as a newline separated list of urls
// Otherwise try parsing as a url itself
func parseFeedArg(feed string) ([]*url.URL, error) {
f, err := os.Stat(feed)
if os.IsNotExist(err) || (err == nil && !f.Mode().IsRegular()) {
// feed is not a file, treat as url
u, err := url.Parse(feed)
if err != nil {
return nil, errors.Wrapf(err, "%q is not a file, url.Parse() failed", feed)
}
return []*url.URL{u}, nil
}
// feed is a file, read as newline separated urls
contents, err := ioutil.ReadFile(feed)
if err != nil {
return nil, errors.Wrapf(err, "ReadFile(%q)", feed)
}
lines := strings.Split(string(contents), "\n")
urls := []*url.URL{}
for _, l := range lines {
if l == "" {
continue
}
u, err := url.Parse(l)
if err != nil {
return nil, errors.Wrapf(err, "url.Parse(%q)", l)
}
urls = append(urls, u)
}
return urls, nil
}
| [
"\"AWS_PROFILE\"",
"\"BUCKET\""
]
| []
| [
"AWS_PROFILE",
"BUCKET"
]
| [] | ["AWS_PROFILE", "BUCKET"] | go | 2 | 0 | |
.mvn/wrapper/MavenWrapperDownloader.java | /*
* Copyright 2007-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.net.*;
import java.io.*;
import java.nio.channels.*;
import java.util.Properties;
public class MavenWrapperDownloader {
private static final String WRAPPER_VERSION = "0.5.6";
/**
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
*/
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
/**
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
".mvn/wrapper/maven-wrapper.properties";
/**
* Path where the maven-wrapper.jar will be saved to.
*/
private static final String MAVEN_WRAPPER_JAR_PATH =
".mvn/wrapper/maven-wrapper.jar";
/**
* Name of the property which should be used to override the default download url for the wrapper.
*/
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
public static void main(String args[]) {
System.out.println("- Downloader started");
File baseDirectory = new File(args[0]);
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
String url = DEFAULT_DOWNLOAD_URL;
if(mavenWrapperPropertyFile.exists()) {
FileInputStream mavenWrapperPropertyFileInputStream = null;
try {
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
Properties mavenWrapperProperties = new Properties();
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
} catch (IOException e) {
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
} finally {
try {
if(mavenWrapperPropertyFileInputStream != null) {
mavenWrapperPropertyFileInputStream.close();
}
} catch (IOException e) {
// Ignore ...
}
}
}
System.out.println("- Downloading from: " + url);
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
if(!outputFile.getParentFile().exists()) {
if(!outputFile.getParentFile().mkdirs()) {
System.out.println(
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
}
}
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
try {
downloadFileFromURL(url, outputFile);
System.out.println("Done");
System.exit(0);
} catch (Throwable e) {
System.out.println("- Error downloading");
e.printStackTrace();
System.exit(1);
}
}
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
String username = System.getenv("MVNW_USERNAME");
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
Authenticator.setDefault(new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(username, password);
}
});
}
URL website = new URL(urlString);
ReadableByteChannel rbc;
rbc = Channels.newChannel(website.openStream());
FileOutputStream fos = new FileOutputStream(destination);
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
fos.close();
rbc.close();
}
}
| [
"\"MVNW_USERNAME\"",
"\"MVNW_PASSWORD\"",
"\"MVNW_USERNAME\"",
"\"MVNW_PASSWORD\""
]
| []
| [
"MVNW_USERNAME",
"MVNW_PASSWORD"
]
| [] | ["MVNW_USERNAME", "MVNW_PASSWORD"] | java | 2 | 0 | |
beep/tests/test_secrets_manager.py | # Copyright 2019 Toyota Research Institute. All rights reserved.
"""Unit tests related to Splicing files"""
import os
import unittest
from beep import ENVIRONMENT
from beep.config import config
from beep.utils.secrets_manager import secret_accessible, get_secret, event_setup
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, "test_files")
class SecretTest(unittest.TestCase):
def setUp(self):
pass
def test_secret_accessible(self):
available = secret_accessible(ENVIRONMENT)
if available:
secret_name = config[ENVIRONMENT]['kinesis']['stream']
get_secret(secret_name)
else:
self.assertFalse(available)
| []
| []
| []
| [] | [] | python | null | null | null |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'goodshare.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
go/src/github.com/docker/docker/integration-cli/requirements.go | package main
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strings"
"time"
"github.com/go-check/check"
)
type testCondition func() bool
type testRequirement struct {
Condition testCondition
SkipMessage string
}
// List test requirements
var (
DaemonIsWindows = testRequirement{
func() bool { return daemonPlatform == "windows" },
"Test requires a Windows daemon",
}
DaemonIsLinux = testRequirement{
func() bool { return daemonPlatform == "linux" },
"Test requires a Linux daemon",
}
NotArm = testRequirement{
func() bool { return os.Getenv("DOCKER_ENGINE_GOARCH") != "arm" },
"Test requires a daemon not running on ARM",
}
NotPpc64le = testRequirement{
func() bool { return os.Getenv("DOCKER_ENGINE_GOARCH") != "ppc64le" },
"Test requires a daemon not running on ppc64le",
}
SameHostDaemon = testRequirement{
func() bool { return isLocalDaemon },
"Test requires docker daemon to run on the same machine as CLI",
}
UnixCli = testRequirement{
func() bool { return isUnixCli },
"Test requires posix utilities or functionality to run.",
}
ExecSupport = testRequirement{
func() bool { return supportsExec },
"Test requires 'docker exec' capabilities on the tested daemon.",
}
Network = testRequirement{
func() bool {
// Set a timeout on the GET at 15s
var timeout = time.Duration(15 * time.Second)
var url = "https://hub.docker.com"
client := http.Client{
Timeout: timeout,
}
resp, err := client.Get(url)
if err != nil && strings.Contains(err.Error(), "use of closed network connection") {
panic(fmt.Sprintf("Timeout for GET request on %s", url))
}
if resp != nil {
resp.Body.Close()
}
return err == nil
},
"Test requires network availability, environment variable set to none to run in a non-network enabled mode.",
}
Apparmor = testRequirement{
func() bool {
buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
return err == nil && len(buf) > 1 && buf[0] == 'Y'
},
"Test requires apparmor is enabled.",
}
RegistryHosting = testRequirement{
func() bool {
// for now registry binary is built only if we're running inside
// container through `make test`. Figure that out by testing if
// registry binary is in PATH.
_, err := exec.LookPath(v2binary)
return err == nil
},
fmt.Sprintf("Test requires an environment that can host %s in the same host", v2binary),
}
NotaryHosting = testRequirement{
func() bool {
// for now notary binary is built only if we're running inside
// container through `make test`. Figure that out by testing if
// notary-server binary is in PATH.
_, err := exec.LookPath(notaryBinary)
return err == nil
},
fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryBinary),
}
NotOverlay = testRequirement{
func() bool {
cmd := exec.Command("grep", "^overlay / overlay", "/proc/mounts")
if err := cmd.Run(); err != nil {
return true
}
return false
},
"Test requires underlying root filesystem not be backed by overlay.",
}
Devicemapper = testRequirement{
func() bool {
cmd := exec.Command("grep", "^devicemapper / devicemapper", "/proc/mounts")
if err := cmd.Run(); err != nil {
return false
}
return true
},
"Test requires underlying root filesystem to be backed by devicemapper.",
}
IPv6 = testRequirement{
func() bool {
cmd := exec.Command("test", "-f", "/proc/net/if_inet6")
if err := cmd.Run(); err != nil {
return true
}
return false
},
"Test requires support for IPv6",
}
NotGCCGO = testRequirement{
func() bool {
out, err := exec.Command("go", "version").Output()
if err == nil && strings.Contains(string(out), "gccgo") {
return false
}
return true
},
"Test requires native Golang compiler instead of GCCGO",
}
NotUserNamespace = testRequirement{
func() bool {
root := os.Getenv("DOCKER_REMAP_ROOT")
if root != "" {
return false
}
return true
},
"Test cannot be run when remapping root",
}
)
// testRequires checks if the environment satisfies the requirements
// for the test to run or skips the tests.
func testRequires(c *check.C, requirements ...testRequirement) {
for _, r := range requirements {
if !r.Condition() {
c.Skip(r.SkipMessage)
}
}
}
| [
"\"DOCKER_ENGINE_GOARCH\"",
"\"DOCKER_ENGINE_GOARCH\"",
"\"DOCKER_REMAP_ROOT\""
]
| []
| [
"DOCKER_ENGINE_GOARCH",
"DOCKER_REMAP_ROOT"
]
| [] | ["DOCKER_ENGINE_GOARCH", "DOCKER_REMAP_ROOT"] | go | 2 | 0 | |
src/app/api/endpoint/static.go | package endpoint
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
)
// StaticEndpoint .
type StaticEndpoint struct {
Core
}
// SetupStatic .
func SetupStatic(core Core) {
p := new(StaticEndpoint)
p.Core = core
p.Router.Get("/api/v1", p.Index)
p.Router.Get("/404", p.Error404)
p.Router.Get("/static...", p.Static)
}
// Index .
// swagger:route GET /api/v1 healthcheck Ready
//
// API is ready.
//
// Responses:
// 200: OKResponse
func (p StaticEndpoint) Index(w http.ResponseWriter, r *http.Request) (int, error) {
return p.Response.OK(w, "ready")
}
// Error404 .
func (p StaticEndpoint) Error404(w http.ResponseWriter, r *http.Request) (int, error) {
// Get the environment variable in production.
basepath := os.Getenv("APP_ROOT")
if len(basepath) == 0 {
gopath := os.Getenv("GOPATH")
if len(gopath) == 0 {
return http.StatusInternalServerError, errors.New("could not find $APP_ROOT or $GOPATH environment variables")
}
basepath = filepath.Join(gopath, "src/app/ui/dist")
}
// Serve the index file.
b, err := ioutil.ReadFile(basepath + "/index.html")
if err != nil {
return http.StatusInternalServerError, errors.New("could not find index.html")
}
// Return a 404 and serve the index.html file.
w.WriteHeader(http.StatusNotFound)
_, err = fmt.Fprint(w, string(b))
if err != nil {
return http.StatusInternalServerError, err
}
return 0, nil
}
// Static .
func (p StaticEndpoint) Static(w http.ResponseWriter, r *http.Request) (int, error) {
if r.URL.Path == "/static/" {
return http.StatusNotFound, nil
}
// Get the environment variable in production.
basepath := os.Getenv("APP_ROOT")
if len(basepath) == 0 {
gopath := os.Getenv("GOPATH")
if len(gopath) == 0 {
return http.StatusInternalServerError, errors.New("could not find $APP_ROOT or $GOPATH environment variables")
}
basepath = filepath.Join(gopath, "src/app/ui/dist")
}
// If the file doesn't exist, serve the UI error message.
fullPath := basepath + r.URL.Path
if _, err := os.Stat(fullPath); err != nil {
b, err := ioutil.ReadFile(basepath + "/index.html")
if err != nil {
return http.StatusInternalServerError, errors.New("could not find index.html")
}
// Return a 404 and serve the index.html file.
w.WriteHeader(http.StatusNotFound)
_, err = fmt.Fprint(w, string(b))
if err != nil {
return http.StatusInternalServerError, err
}
return 0, nil
}
// Serve the file to the user. Don't use filepath.join to protect against
// "../" in the URL path.
http.ServeFile(w, r, fullPath)
return 0, nil
}
| [
"\"APP_ROOT\"",
"\"GOPATH\"",
"\"APP_ROOT\"",
"\"GOPATH\""
]
| []
| [
"GOPATH",
"APP_ROOT"
]
| [] | ["GOPATH", "APP_ROOT"] | go | 2 | 0 | |
cloudfunc/client.py | # coding=utf-8
import os
import pickle
import requests
from .errors import CloudFuncError
class CloudFuncClient:
def __init__(self, serve_address: str = None):
if serve_address is None:
serve_address = os.environ['CLOUDFUNC_SERVE_ADDRESS']
assert serve_address is not None, 'cloudfunc-serve address is not given'
self.serve_address = serve_address
self.session = requests.Session()
def run(self, cloud_func_name: str, *args, **kwargs):
data = pickle.dumps((args, kwargs))
try:
resp = self.session.post(
f'http://{self.serve_address}/cloud-funcs/run',
params={'name': cloud_func_name},
data=data,
headers={'Content-Type': 'application/octet-stream'}
)
except Exception as e:
raise CloudFuncError(e)
else:
try:
resp.raise_for_status()
except requests.HTTPError:
raise CloudFuncError(resp.text)
return pickle.loads(resp.content)
| []
| []
| [
"CLOUDFUNC_SERVE_ADDRESS"
]
| [] | ["CLOUDFUNC_SERVE_ADDRESS"] | python | 1 | 0 | |
src/cmd/dist/test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
func cmdtest() {
gogcflags = os.Getenv("GO_GCFLAGS")
var t tester
var noRebuild bool
flag.BoolVar(&t.listMode, "list", false, "list available tests")
flag.BoolVar(&t.rebuild, "rebuild", false, "rebuild everything first")
flag.BoolVar(&noRebuild, "no-rebuild", false, "overrides -rebuild (historical dreg)")
flag.BoolVar(&t.keepGoing, "k", false, "keep going even when error occurred")
flag.BoolVar(&t.race, "race", false, "run in race builder mode (different set of tests)")
flag.BoolVar(&t.compileOnly, "compile-only", false, "compile tests, but don't run them. This is for some builders. Not all dist tests respect this flag, but most do.")
flag.StringVar(&t.banner, "banner", "##### ", "banner prefix; blank means no section banners")
flag.StringVar(&t.runRxStr, "run", os.Getenv("GOTESTONLY"),
"run only those tests matching the regular expression; empty means to run all. "+
"Special exception: if the string begins with '!', the match is inverted.")
xflagparse(-1) // any number of args
if noRebuild {
t.rebuild = false
}
t.run()
}
// tester executes cmdtest.
type tester struct {
race bool
listMode bool
rebuild bool
failed bool
keepGoing bool
compileOnly bool // just try to compile all tests, but no need to run
runRxStr string
runRx *regexp.Regexp
runRxWant bool // want runRx to match (true) or not match (false)
runNames []string // tests to run, exclusive with runRx; empty means all
banner string // prefix, or "" for none
lastHeading string // last dir heading printed
cgoEnabled bool
partial bool
haveTime bool // the 'time' binary is available
tests []distTest
timeoutScale int
worklist []*work
}
type work struct {
dt *distTest
cmd *exec.Cmd
start chan bool
out []byte
err error
end chan bool
}
// A distTest is a test run by dist test.
// Each test has a unique name and belongs to a group (heading)
type distTest struct {
name string // unique test name; may be filtered with -run flag
heading string // group section; this header is printed before the test is run.
fn func(*distTest) error
}
func (t *tester) run() {
timelog("start", "dist test")
var exeSuffix string
if goos == "windows" {
exeSuffix = ".exe"
}
if _, err := os.Stat(filepath.Join(gobin, "go"+exeSuffix)); err == nil {
os.Setenv("PATH", fmt.Sprintf("%s%c%s", gobin, os.PathListSeparator, os.Getenv("PATH")))
}
slurp, err := exec.Command("go", "env", "CGO_ENABLED").Output()
if err != nil {
log.Fatalf("Error running go env CGO_ENABLED: %v", err)
}
t.cgoEnabled, _ = strconv.ParseBool(strings.TrimSpace(string(slurp)))
if flag.NArg() > 0 && t.runRxStr != "" {
log.Fatalf("the -run regular expression flag is mutually exclusive with test name arguments")
}
t.runNames = flag.Args()
if t.hasBash() {
if _, err := exec.LookPath("time"); err == nil {
t.haveTime = true
}
}
if t.rebuild {
t.out("Building packages and commands.")
// Force rebuild the whole toolchain.
goInstall("go", append([]string{"-a", "-i"}, toolchain...)...)
}
// Complete rebuild bootstrap, even with -no-rebuild.
// If everything is up-to-date, this is a no-op.
// If everything is not up-to-date, the first checkNotStale
// during the test process will kill the tests, so we might
// as well install the world.
// Now that for example "go install cmd/compile" does not
// also install runtime (you need "go install -i cmd/compile"
// for that), it's easy for previous workflows like
// "rebuild the compiler and then run run.bash"
// to break if we don't automatically refresh things here.
// Rebuilding is a shortened bootstrap.
// See cmdbootstrap for a description of the overall process.
//
// But don't do this if we're running in the Go build system,
// where cmd/dist is invoked many times. This just slows that
// down (Issue 24300).
if !t.listMode && os.Getenv("GO_BUILDER_NAME") == "" {
goInstall("go", append([]string{"-i"}, toolchain...)...)
goInstall("go", append([]string{"-i"}, toolchain...)...)
goInstall("go", "std", "cmd")
checkNotStale("go", "std", "cmd")
}
t.timeoutScale = 1
switch goarch {
case "arm":
t.timeoutScale = 2
case "mips", "mipsle", "mips64", "mips64le":
t.timeoutScale = 4
}
if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
t.timeoutScale, err = strconv.Atoi(s)
if err != nil {
log.Fatalf("failed to parse $GO_TEST_TIMEOUT_SCALE = %q as integer: %v", s, err)
}
}
if t.runRxStr != "" {
if t.runRxStr[0] == '!' {
t.runRxWant = false
t.runRxStr = t.runRxStr[1:]
} else {
t.runRxWant = true
}
t.runRx = regexp.MustCompile(t.runRxStr)
}
t.registerTests()
if t.listMode {
for _, tt := range t.tests {
fmt.Println(tt.name)
}
return
}
// We must unset GOROOT_FINAL before tests, because runtime/debug requires
// correct access to source code, so if we have GOROOT_FINAL in effect,
// at least runtime/debug test will fail.
// If GOROOT_FINAL was set before, then now all the commands will appear stale.
// Nothing we can do about that other than not checking them below.
// (We call checkNotStale but only with "std" not "cmd".)
os.Setenv("GOROOT_FINAL_OLD", os.Getenv("GOROOT_FINAL")) // for cmd/link test
os.Unsetenv("GOROOT_FINAL")
for _, name := range t.runNames {
if !t.isRegisteredTestName(name) {
log.Fatalf("unknown test %q", name)
}
}
for _, dt := range t.tests {
if !t.shouldRunTest(dt.name) {
t.partial = true
continue
}
dt := dt // dt used in background after this iteration
if err := dt.fn(&dt); err != nil {
t.runPending(&dt) // in case that hasn't been done yet
t.failed = true
if t.keepGoing {
log.Printf("Failed: %v", err)
} else {
log.Fatalf("Failed: %v", err)
}
}
}
t.runPending(nil)
timelog("end", "dist test")
if t.failed {
fmt.Println("\nFAILED")
os.Exit(1)
} else if incomplete[goos+"/"+goarch] {
fmt.Println("\nFAILED (incomplete port)")
os.Exit(1)
} else if t.partial {
fmt.Println("\nALL TESTS PASSED (some were excluded)")
} else {
fmt.Println("\nALL TESTS PASSED")
}
}
func (t *tester) shouldRunTest(name string) bool {
if t.runRx != nil {
return t.runRx.MatchString(name) == t.runRxWant
}
if len(t.runNames) == 0 {
return true
}
for _, runName := range t.runNames {
if runName == name {
return true
}
}
return false
}
// short returns a -short flag to pass to 'go test'.
// It returns "-short", unless the environment variable
// GO_TEST_SHORT is set to a non-empty, false-ish string.
//
// This environment variable is meant to be an internal
// detail between the Go build system and cmd/dist
// and is not intended for use by users.
func short() string {
if v := os.Getenv("GO_TEST_SHORT"); v != "" {
short, err := strconv.ParseBool(v)
if err != nil {
log.Fatalf("invalid GO_TEST_SHORT %q: %v", v, err)
}
if !short {
return "-short=false"
}
}
return "-short"
}
// goTest returns the beginning of the go test command line.
// Callers should use goTest and then pass flags overriding these
// defaults as later arguments in the command line.
func (t *tester) goTest() []string {
return []string{
"go", "test", short(), "-count=1", t.tags(), t.runFlag(""),
}
}
func (t *tester) tags() string {
if t.iOS() {
return "-tags=lldb"
}
return "-tags="
}
func (t *tester) timeout(sec int) string {
return "-timeout=" + fmt.Sprint(time.Duration(sec)*time.Second*time.Duration(t.timeoutScale))
}
// ranGoTest and stdMatches are state closed over by the stdlib
// testing func in registerStdTest below. The tests are run
// sequentially, so there's no need for locks.
//
// ranGoBench and benchMatches are the same, but are only used
// in -race mode.
var (
ranGoTest bool
stdMatches []string
ranGoBench bool
benchMatches []string
)
func (t *tester) registerStdTest(pkg string) {
testName := "go_test:" + pkg
if t.runRx == nil || t.runRx.MatchString(testName) == t.runRxWant {
stdMatches = append(stdMatches, pkg)
}
t.tests = append(t.tests, distTest{
name: testName,
heading: "Testing packages.",
fn: func(dt *distTest) error {
if ranGoTest {
return nil
}
t.runPending(dt)
timelog("start", dt.name)
defer timelog("end", dt.name)
ranGoTest = true
timeoutSec := 180
for _, pkg := range stdMatches {
if pkg == "cmd/go" {
timeoutSec *= 3
break
}
}
args := []string{
"test",
short(),
t.tags(),
t.timeout(timeoutSec),
"-gcflags=all=" + gogcflags,
}
if t.race {
args = append(args, "-race")
}
if t.compileOnly {
args = append(args, "-run=^$")
} else if goos == "js" && goarch == "wasm" {
args = append(args, "-run=^Test") // exclude examples; Issue 25913
}
args = append(args, stdMatches...)
cmd := exec.Command("go", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
},
})
}
func (t *tester) registerRaceBenchTest(pkg string) {
testName := "go_test_bench:" + pkg
if t.runRx == nil || t.runRx.MatchString(testName) == t.runRxWant {
benchMatches = append(benchMatches, pkg)
}
t.tests = append(t.tests, distTest{
name: testName,
heading: "Running benchmarks briefly.",
fn: func(dt *distTest) error {
if ranGoBench {
return nil
}
t.runPending(dt)
timelog("start", dt.name)
defer timelog("end", dt.name)
ranGoBench = true
args := []string{
"test",
short(),
"-race",
t.timeout(1200), // longer timeout for race with benchmarks
"-run=^$", // nothing. only benchmarks.
"-benchtime=.1s",
"-cpu=4",
}
if !t.compileOnly {
args = append(args, "-bench=.*")
}
args = append(args, benchMatches...)
cmd := exec.Command("go", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
},
})
}
// stdOutErrAreTerminals is defined in test_linux.go, to report
// whether stdout & stderr are terminals.
var stdOutErrAreTerminals func() bool
func (t *tester) registerTests() {
if strings.HasSuffix(os.Getenv("GO_BUILDER_NAME"), "-vetall") {
// Run vet over std and cmd and call it quits.
for k := range cgoEnabled {
osarch := k
t.tests = append(t.tests, distTest{
name: "vet/" + osarch,
heading: "cmd/vet/all",
fn: func(dt *distTest) error {
t.addCmd(dt, "src/cmd/vet/all", "go", "run", "main.go", "-p="+osarch)
return nil
},
})
}
return
}
// Fast path to avoid the ~1 second of `go list std cmd` when
// the caller lists specific tests to run. (as the continuous
// build coordinator does).
if len(t.runNames) > 0 {
for _, name := range t.runNames {
if strings.HasPrefix(name, "go_test:") {
t.registerStdTest(strings.TrimPrefix(name, "go_test:"))
}
if strings.HasPrefix(name, "go_test_bench:") {
t.registerRaceBenchTest(strings.TrimPrefix(name, "go_test_bench:"))
}
}
} else {
// Use a format string to only list packages and commands that have tests.
const format = "{{if (or .TestGoFiles .XTestGoFiles)}}{{.ImportPath}}{{end}}"
cmd := exec.Command("go", "list", "-f", format)
if t.race {
cmd.Args = append(cmd.Args, "-tags=race")
}
cmd.Args = append(cmd.Args, "std")
if !t.race {
cmd.Args = append(cmd.Args, "cmd")
}
cmd.Stderr = new(bytes.Buffer)
all, err := cmd.Output()
if err != nil {
log.Fatalf("Error running go list std cmd: %v:\n%s", err, cmd.Stderr)
}
pkgs := strings.Fields(string(all))
for _, pkg := range pkgs {
t.registerStdTest(pkg)
}
if t.race {
for _, pkg := range pkgs {
if t.packageHasBenchmarks(pkg) {
t.registerRaceBenchTest(pkg)
}
}
}
}
// Test the os/user package in the pure-Go mode too.
if !t.compileOnly {
t.tests = append(t.tests, distTest{
name: "osusergo",
heading: "os/user with tag osusergo",
fn: func(dt *distTest) error {
t.addCmd(dt, "src", t.goTest(), t.timeout(300), "-tags=osusergo", "os/user")
return nil
},
})
}
if t.race {
return
}
// Runtime CPU tests.
if !t.compileOnly && goos != "js" { // js can't handle -cpu != 1
testName := "runtime:cpu124"
t.tests = append(t.tests, distTest{
name: testName,
heading: "GOMAXPROCS=2 runtime -cpu=1,2,4 -quick",
fn: func(dt *distTest) error {
cmd := t.addCmd(dt, "src", t.goTest(), t.timeout(300), "runtime", "-cpu=1,2,4", "-quick")
// We set GOMAXPROCS=2 in addition to -cpu=1,2,4 in order to test runtime bootstrap code,
// creation of first goroutines and first garbage collections in the parallel setting.
cmd.Env = append(os.Environ(), "GOMAXPROCS=2")
return nil
},
})
}
// This test needs its stdout/stderr to be terminals, so we don't run it from cmd/go's tests.
// See issue 18153.
if goos == "linux" {
t.tests = append(t.tests, distTest{
name: "cmd_go_test_terminal",
heading: "cmd/go terminal test",
fn: func(dt *distTest) error {
t.runPending(dt)
timelog("start", dt.name)
defer timelog("end", dt.name)
if !stdOutErrAreTerminals() {
fmt.Println("skipping terminal test; stdout/stderr not terminals")
return nil
}
cmd := exec.Command("go", "test")
cmd.Dir = filepath.Join(os.Getenv("GOROOT"), "src/cmd/go/testdata/testterminal18153")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
},
})
}
// On the builders only, test that a moved GOROOT still works.
// Fails on iOS because CC_FOR_TARGET refers to clangwrap.sh
// in the unmoved GOROOT.
// Fails on Android and js/wasm with an exec format error.
// Fails on plan9 with "cannot find GOROOT" (issue #21016).
if os.Getenv("GO_BUILDER_NAME") != "" && goos != "android" && !t.iOS() && goos != "plan9" && goos != "js" {
t.tests = append(t.tests, distTest{
name: "moved_goroot",
heading: "moved GOROOT",
fn: func(dt *distTest) error {
t.runPending(dt)
timelog("start", dt.name)
defer timelog("end", dt.name)
moved := goroot + "-moved"
if err := os.Rename(goroot, moved); err != nil {
if goos == "windows" {
// Fails on Windows (with "Access is denied") if a process
// or binary is in this directory. For instance, using all.bat
// when run from c:\workdir\go\src fails here
// if GO_BUILDER_NAME is set. Our builders invoke tests
// a different way which happens to work when sharding
// tests, but we should be tolerant of the non-sharded
// all.bat case.
log.Printf("skipping test on Windows")
return nil
}
return err
}
// Run `go test fmt` in the moved GOROOT.
cmd := exec.Command(filepath.Join(moved, "bin", "go"), "test", "fmt")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
// Don't set GOROOT in the environment.
for _, e := range os.Environ() {
if !strings.HasPrefix(e, "GOROOT=") && !strings.HasPrefix(e, "GOCACHE=") {
cmd.Env = append(cmd.Env, e)
}
}
err := cmd.Run()
if rerr := os.Rename(moved, goroot); rerr != nil {
log.Fatalf("failed to restore GOROOT: %v", rerr)
}
return err
},
})
}
// Test that internal linking of standard packages does not
// require libgcc. This ensures that we can install a Go
// release on a system that does not have a C compiler
// installed and still build Go programs (that don't use cgo).
for _, pkg := range cgoPackages {
if !t.internalLink() {
break
}
// ARM libgcc may be Thumb, which internal linking does not support.
if goarch == "arm" {
break
}
pkg := pkg
var run string
if pkg == "net" {
run = "TestTCPStress"
}
t.tests = append(t.tests, distTest{
name: "nolibgcc:" + pkg,
heading: "Testing without libgcc.",
fn: func(dt *distTest) error {
t.addCmd(dt, "src", t.goTest(), "-ldflags=-linkmode=internal -libgcc=none", pkg, t.runFlag(run))
return nil
},
})
}
// Test internal linking of PIE binaries where it is supported.
if goos == "linux" && goarch == "amd64" {
t.tests = append(t.tests, distTest{
name: "pie_internal",
heading: "internal linking of -buildmode=pie",
fn: func(dt *distTest) error {
t.addCmd(dt, "src", t.goTest(), "reflect", "-buildmode=pie", "-ldflags=-linkmode=internal", t.timeout(60))
return nil
},
})
// Also test a cgo package.
if t.cgoEnabled {
t.tests = append(t.tests, distTest{
name: "pie_internal_cgo",
heading: "internal linking of -buildmode=pie",
fn: func(dt *distTest) error {
t.addCmd(dt, "src", t.goTest(), "os/user", "-buildmode=pie", "-ldflags=-linkmode=internal", t.timeout(60))
return nil
},
})
}
}
// sync tests
if goos != "js" { // js doesn't support -cpu=10
t.tests = append(t.tests, distTest{
name: "sync_cpu",
heading: "sync -cpu=10",
fn: func(dt *distTest) error {
t.addCmd(dt, "src", t.goTest(), "sync", t.timeout(120), "-cpu=10", t.runFlag(""))
return nil
},
})
}
if t.raceDetectorSupported() {
t.tests = append(t.tests, distTest{
name: "race",
heading: "Testing race detector",
fn: t.raceTest,
})
}
if t.cgoEnabled && !t.iOS() {
// Disabled on iOS. golang.org/issue/15919
t.registerHostTest("cgo_stdio", "../misc/cgo/stdio", "misc/cgo/stdio", ".")
t.registerHostTest("cgo_life", "../misc/cgo/life", "misc/cgo/life", ".")
fortran := os.Getenv("FC")
if fortran == "" {
fortran, _ = exec.LookPath("gfortran")
}
if t.hasBash() && goos != "android" && fortran != "" {
t.tests = append(t.tests, distTest{
name: "cgo_fortran",
heading: "../misc/cgo/fortran",
fn: func(dt *distTest) error {
t.addCmd(dt, "misc/cgo/fortran", "./test.bash", fortran)
return nil
},
})
}
if t.hasSwig() && goos != "android" {
t.tests = append(t.tests, distTest{
name: "swig_stdio",
heading: "../misc/swig/stdio",
fn: func(dt *distTest) error {
t.addCmd(dt, "misc/swig/stdio", t.goTest())
return nil
},
})
if t.hasCxx() {
t.tests = append(t.tests, distTest{
name: "swig_callback",
heading: "../misc/swig/callback",
fn: func(dt *distTest) error {
t.addCmd(dt, "misc/swig/callback", t.goTest())
return nil
},
})
}
}
}
if t.cgoEnabled {
t.tests = append(t.tests, distTest{
name: "cgo_test",
heading: "../misc/cgo/test",
fn: t.cgoTest,
})
}
if t.hasBash() && t.cgoEnabled && goos != "android" && goos != "darwin" {
t.registerTest("testgodefs", "../misc/cgo/testgodefs", "./test.bash")
}
// Don't run these tests with $GO_GCFLAGS because most of them
// assume that they can run "go install" with no -gcflags and not
// recompile the entire standard library. If make.bash ran with
// special -gcflags, that's not true.
if t.cgoEnabled && gogcflags == "" {
t.registerTest("testso", "../misc/cgo/testso", t.goTest(), t.timeout(600))
t.registerTest("testsovar", "../misc/cgo/testsovar", t.goTest(), t.timeout(600))
if t.supportedBuildmode("c-archive") {
t.registerHostTest("testcarchive", "../misc/cgo/testcarchive", "misc/cgo/testcarchive", ".")
}
if t.supportedBuildmode("c-shared") {
t.registerHostTest("testcshared", "../misc/cgo/testcshared", "misc/cgo/testcshared", ".")
}
if t.supportedBuildmode("shared") {
t.registerTest("testshared", "../misc/cgo/testshared", t.goTest(), t.timeout(600))
}
if t.supportedBuildmode("plugin") {
t.registerTest("testplugin", "../misc/cgo/testplugin", t.goTest(), t.timeout(600))
}
if gohostos == "linux" && goarch == "amd64" {
t.registerTest("testasan", "../misc/cgo/testasan", "go", "run", "main.go")
}
if mSanSupported(goos, goarch) {
t.registerHostTest("testsanitizers/msan", "../misc/cgo/testsanitizers", "misc/cgo/testsanitizers", ".")
}
if t.hasBash() && goos != "android" && !t.iOS() && gohostos != "windows" {
t.registerHostTest("cgo_errors", "../misc/cgo/errors", "misc/cgo/errors", ".")
}
if gohostos == "linux" && t.extLink() {
t.registerTest("testsigfwd", "../misc/cgo/testsigfwd", "go", "run", "main.go")
}
}
// Doc tests only run on builders.
// They find problems approximately never.
if t.hasBash() && goos != "nacl" && goos != "js" && goos != "android" && !t.iOS() && os.Getenv("GO_BUILDER_NAME") != "" {
t.registerTest("doc_progs", "../doc/progs", "time", "go", "run", "run.go")
t.registerTest("wiki", "../doc/articles/wiki", "./test.bash")
t.registerTest("codewalk", "../doc/codewalk", "time", "./run")
}
if goos != "android" && !t.iOS() {
t.registerTest("bench_go1", "../test/bench/go1", t.goTest(), t.timeout(600))
}
if goos != "android" && !t.iOS() {
// Only start multiple test dir shards on builders,
// where they get distributed to multiple machines.
// See issue 20141.
nShards := 1
if os.Getenv("GO_BUILDER_NAME") != "" {
nShards = 10
}
for shard := 0; shard < nShards; shard++ {
shard := shard
t.tests = append(t.tests, distTest{
name: fmt.Sprintf("test:%d_%d", shard, nShards),
heading: "../test",
fn: func(dt *distTest) error { return t.testDirTest(dt, shard, nShards) },
})
}
}
if goos != "nacl" && goos != "android" && !t.iOS() && goos != "js" {
t.tests = append(t.tests, distTest{
name: "api",
heading: "API check",
fn: func(dt *distTest) error {
if t.compileOnly {
t.addCmd(dt, "src", "go", "build", filepath.Join(goroot, "src/cmd/api/run.go"))
return nil
}
t.addCmd(dt, "src", "go", "run", filepath.Join(goroot, "src/cmd/api/run.go"))
return nil
},
})
}
}
// isRegisteredTestName reports whether a test named testName has already
// been registered.
func (t *tester) isRegisteredTestName(testName string) bool {
for _, tt := range t.tests {
if tt.name == testName {
return true
}
}
return false
}
func (t *tester) registerTest1(seq bool, name, dirBanner string, cmdline ...interface{}) {
bin, args := flattenCmdline(cmdline)
if bin == "time" && !t.haveTime {
bin, args = args[0], args[1:]
}
if t.isRegisteredTestName(name) {
panic("duplicate registered test name " + name)
}
t.tests = append(t.tests, distTest{
name: name,
heading: dirBanner,
fn: func(dt *distTest) error {
if seq {
t.runPending(dt)
timelog("start", name)
defer timelog("end", name)
return t.dirCmd(filepath.Join(goroot, "src", dirBanner), bin, args).Run()
}
t.addCmd(dt, filepath.Join(goroot, "src", dirBanner), bin, args)
return nil
},
})
}
func (t *tester) registerTest(name, dirBanner string, cmdline ...interface{}) {
t.registerTest1(false, name, dirBanner, cmdline...)
}
func (t *tester) registerSeqTest(name, dirBanner string, cmdline ...interface{}) {
t.registerTest1(true, name, dirBanner, cmdline...)
}
func (t *tester) bgDirCmd(dir, bin string, args ...string) *exec.Cmd {
cmd := exec.Command(bin, args...)
if filepath.IsAbs(dir) {
cmd.Dir = dir
} else {
cmd.Dir = filepath.Join(goroot, dir)
}
return cmd
}
func (t *tester) dirCmd(dir string, cmdline ...interface{}) *exec.Cmd {
bin, args := flattenCmdline(cmdline)
cmd := t.bgDirCmd(dir, bin, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if vflag > 1 {
errprintf("%s\n", strings.Join(cmd.Args, " "))
}
return cmd
}
// flattenCmdline flattens a mixture of string and []string as single list
// and then interprets it as a command line: first element is binary, then args.
func flattenCmdline(cmdline []interface{}) (bin string, args []string) {
var list []string
for _, x := range cmdline {
switch x := x.(type) {
case string:
list = append(list, x)
case []string:
list = append(list, x...)
default:
panic("invalid addCmd argument type: " + reflect.TypeOf(x).String())
}
}
// The go command is too picky about duplicated flags.
// Drop all but the last of the allowed duplicated flags.
drop := make([]bool, len(list))
have := map[string]int{}
for i := 1; i < len(list); i++ {
j := strings.Index(list[i], "=")
if j < 0 {
continue
}
flag := list[i][:j]
switch flag {
case "-run", "-tags":
if have[flag] != 0 {
drop[have[flag]] = true
}
have[flag] = i
}
}
out := list[:0]
for i, x := range list {
if !drop[i] {
out = append(out, x)
}
}
list = out
return list[0], list[1:]
}
func (t *tester) addCmd(dt *distTest, dir string, cmdline ...interface{}) *exec.Cmd {
bin, args := flattenCmdline(cmdline)
w := &work{
dt: dt,
cmd: t.bgDirCmd(dir, bin, args...),
}
t.worklist = append(t.worklist, w)
return w.cmd
}
func (t *tester) iOS() bool {
return goos == "darwin" && (goarch == "arm" || goarch == "arm64")
}
func (t *tester) out(v string) {
if t.banner == "" {
return
}
fmt.Println("\n" + t.banner + v)
}
func (t *tester) extLink() bool {
pair := gohostos + "-" + goarch
switch pair {
case "android-arm",
"darwin-386", "darwin-amd64", "darwin-arm", "darwin-arm64",
"dragonfly-amd64",
"freebsd-386", "freebsd-amd64", "freebsd-arm",
"linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-mips64", "linux-mips64le", "linux-mips", "linux-mipsle", "linux-s390x",
"netbsd-386", "netbsd-amd64",
"openbsd-386", "openbsd-amd64",
"windows-386", "windows-amd64":
return true
}
return false
}
func (t *tester) internalLink() bool {
if gohostos == "dragonfly" {
// linkmode=internal fails on dragonfly since errno is a TLS relocation.
return false
}
if gohostarch == "ppc64le" {
// linkmode=internal fails on ppc64le because cmd/link doesn't
// handle the TOC correctly (issue 15409).
return false
}
if goos == "android" {
return false
}
if goos == "darwin" && (goarch == "arm" || goarch == "arm64") {
return false
}
// Internally linking cgo is incomplete on some architectures.
// https://golang.org/issue/10373
// https://golang.org/issue/14449
if goarch == "arm64" || goarch == "mips64" || goarch == "mips64le" || goarch == "mips" || goarch == "mipsle" {
return false
}
return true
}
func (t *tester) supportedBuildmode(mode string) bool {
pair := goos + "-" + goarch
switch mode {
case "c-archive":
if !t.extLink() {
return false
}
switch pair {
case "darwin-386", "darwin-amd64", "darwin-arm", "darwin-arm64",
"linux-amd64", "linux-386", "linux-ppc64le", "linux-s390x",
"freebsd-amd64",
"windows-amd64", "windows-386":
return true
}
return false
case "c-shared":
switch pair {
case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x",
"darwin-amd64", "darwin-386",
"freebsd-amd64",
"android-arm", "android-arm64", "android-386",
"windows-amd64", "windows-386":
return true
}
return false
case "shared":
switch pair {
case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x":
return true
}
return false
case "plugin":
// linux-arm64 is missing because it causes the external linker
// to crash, see https://golang.org/issue/17138
switch pair {
case "linux-386", "linux-amd64", "linux-arm", "linux-s390x", "linux-ppc64le":
return true
case "darwin-amd64":
return true
}
return false
case "pie":
switch pair {
case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x",
"android-amd64", "android-arm", "android-arm64", "android-386":
return true
case "darwin-amd64":
return true
}
return false
default:
log.Fatalf("internal error: unknown buildmode %s", mode)
return false
}
}
func (t *tester) registerHostTest(name, heading, dir, pkg string) {
t.tests = append(t.tests, distTest{
name: name,
heading: heading,
fn: func(dt *distTest) error {
t.runPending(dt)
timelog("start", name)
defer timelog("end", name)
return t.runHostTest(dir, pkg)
},
})
}
func (t *tester) runHostTest(dir, pkg string) error {
defer os.Remove(filepath.Join(goroot, dir, "test.test"))
cmd := t.dirCmd(dir, t.goTest(), "-c", "-o", "test.test", pkg)
cmd.Env = append(os.Environ(), "GOARCH="+gohostarch, "GOOS="+gohostos)
if err := cmd.Run(); err != nil {
return err
}
return t.dirCmd(dir, "./test.test").Run()
}
func (t *tester) cgoTest(dt *distTest) error {
t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=auto")
if t.internalLink() {
t.addCmd(dt, "misc/cgo/test", t.goTest(), "-tags=internal", "-ldflags", "-linkmode=internal")
}
pair := gohostos + "-" + goarch
switch pair {
case "darwin-386", "darwin-amd64",
"openbsd-386", "openbsd-amd64",
"windows-386", "windows-amd64":
// test linkmode=external, but __thread not supported, so skip testtls.
if !t.extLink() {
break
}
t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external")
t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external -s")
case "android-arm",
"dragonfly-amd64",
"freebsd-386", "freebsd-amd64", "freebsd-arm",
"linux-386", "linux-amd64", "linux-arm", "linux-ppc64le", "linux-s390x",
"netbsd-386", "netbsd-amd64":
cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external")
// A -g argument in CGO_CFLAGS should not affect how the test runs.
cmd.Env = append(os.Environ(), "CGO_CFLAGS=-g0")
t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=auto")
t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=external")
switch pair {
case "netbsd-386", "netbsd-amd64":
// no static linking
case "freebsd-arm":
// -fPIC compiled tls code will use __tls_get_addr instead
// of __aeabi_read_tp, however, on FreeBSD/ARM, __tls_get_addr
// is implemented in rtld-elf, so -fPIC isn't compatible with
// static linking on FreeBSD/ARM with clang. (cgo depends on
// -fPIC fundamentally.)
default:
cmd := t.dirCmd("misc/cgo/test",
compilerEnvLookup(defaultcc, goos, goarch), "-xc", "-o", "/dev/null", "-static", "-")
cmd.Stdin = strings.NewReader("int main() {}")
if err := cmd.Run(); err != nil {
fmt.Println("No support for static linking found (lacks libc.a?), skip cgo static linking test.")
} else {
if goos != "android" {
t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", `-linkmode=external -extldflags "-static -pthread"`)
}
t.addCmd(dt, "misc/cgo/nocgo", t.goTest())
t.addCmd(dt, "misc/cgo/nocgo", t.goTest(), "-ldflags", `-linkmode=external`)
if goos != "android" {
t.addCmd(dt, "misc/cgo/nocgo", t.goTest(), "-ldflags", `-linkmode=external -extldflags "-static -pthread"`)
t.addCmd(dt, "misc/cgo/test", t.goTest(), "-tags=static", "-ldflags", `-linkmode=external -extldflags "-static -pthread"`)
// -static in CGO_LDFLAGS triggers a different code path
// than -static in -extldflags, so test both.
// See issue #16651.
cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-tags=static")
cmd.Env = append(os.Environ(), "CGO_LDFLAGS=-static -pthread")
}
}
if t.supportedBuildmode("pie") {
t.addCmd(dt, "misc/cgo/test", t.goTest(), "-buildmode=pie")
t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-buildmode=pie")
t.addCmd(dt, "misc/cgo/nocgo", t.goTest(), "-buildmode=pie")
}
}
}
return nil
}
// run pending test commands, in parallel, emitting headers as appropriate.
// When finished, emit header for nextTest, which is going to run after the
// pending commands are done (and runPending returns).
// A test should call runPending if it wants to make sure that it is not
// running in parallel with earlier tests, or if it has some other reason
// for needing the earlier tests to be done.
func (t *tester) runPending(nextTest *distTest) {
checkNotStale("go", "std")
worklist := t.worklist
t.worklist = nil
for _, w := range worklist {
w.start = make(chan bool)
w.end = make(chan bool)
go func(w *work) {
if !<-w.start {
timelog("skip", w.dt.name)
w.out = []byte(fmt.Sprintf("skipped due to earlier error\n"))
} else {
timelog("start", w.dt.name)
w.out, w.err = w.cmd.CombinedOutput()
}
timelog("end", w.dt.name)
w.end <- true
}(w)
}
started := 0
ended := 0
var last *distTest
for ended < len(worklist) {
for started < len(worklist) && started-ended < maxbg {
//println("start", started)
w := worklist[started]
started++
w.start <- !t.failed || t.keepGoing
}
w := worklist[ended]
dt := w.dt
if dt.heading != "" && t.lastHeading != dt.heading {
t.lastHeading = dt.heading
t.out(dt.heading)
}
if dt != last {
// Assumes all the entries for a single dt are in one worklist.
last = w.dt
if vflag > 0 {
fmt.Printf("# go tool dist test -run=^%s$\n", dt.name)
}
}
if vflag > 1 {
errprintf("%s\n", strings.Join(w.cmd.Args, " "))
}
//println("wait", ended)
ended++
<-w.end
os.Stdout.Write(w.out)
if w.err != nil {
log.Printf("Failed: %v", w.err)
t.failed = true
}
checkNotStale("go", "std")
}
if t.failed && !t.keepGoing {
log.Fatal("FAILED")
}
if dt := nextTest; dt != nil {
if dt.heading != "" && t.lastHeading != dt.heading {
t.lastHeading = dt.heading
t.out(dt.heading)
}
if vflag > 0 {
fmt.Printf("# go tool dist test -run=^%s$\n", dt.name)
}
}
}
func (t *tester) hasBash() bool {
switch gohostos {
case "windows", "plan9":
return false
}
return true
}
func (t *tester) hasCxx() bool {
cxx, _ := exec.LookPath(compilerEnvLookup(defaultcxx, goos, goarch))
return cxx != ""
}
func (t *tester) hasSwig() bool {
swig, err := exec.LookPath("swig")
if err != nil {
return false
}
// Check that swig was installed with Go support by checking
// that a go directory exists inside the swiglib directory.
// See https://golang.org/issue/23469.
output, err := exec.Command(swig, "-go", "-swiglib").Output()
if err != nil {
return false
}
swigDir := strings.TrimSpace(string(output))
_, err = os.Stat(filepath.Join(swigDir, "go"))
if err != nil {
return false
}
// Check that swig has a new enough version.
// See https://golang.org/issue/22858.
out, err := exec.Command(swig, "-version").CombinedOutput()
if err != nil {
return false
}
re := regexp.MustCompile(`[vV]ersion +([\d]+)([.][\d]+)?([.][\d]+)?`)
matches := re.FindSubmatch(out)
if matches == nil {
// Can't find version number; hope for the best.
return true
}
major, err := strconv.Atoi(string(matches[1]))
if err != nil {
// Can't find version number; hope for the best.
return true
}
if major < 3 {
return false
}
if major > 3 {
// 4.0 or later
return true
}
// We have SWIG version 3.x.
if len(matches[2]) > 0 {
minor, err := strconv.Atoi(string(matches[2][1:]))
if err != nil {
return true
}
if minor > 0 {
// 3.1 or later
return true
}
}
// We have SWIG version 3.0.x.
if len(matches[3]) > 0 {
patch, err := strconv.Atoi(string(matches[3][1:]))
if err != nil {
return true
}
if patch < 6 {
// Before 3.0.6.
return false
}
}
return true
}
func (t *tester) raceDetectorSupported() bool {
if gohostos != goos {
return false
}
if !t.cgoEnabled {
return false
}
if !raceDetectorSupported(goos, goarch) {
return false
}
// The race detector doesn't work on Alpine Linux:
// golang.org/issue/14481
if isAlpineLinux() {
return false
}
// NetBSD support is unfinished.
// golang.org/issue/26403
if goos == "netbsd" {
return false
}
return true
}
func isAlpineLinux() bool {
if runtime.GOOS != "linux" {
return false
}
fi, err := os.Lstat("/etc/alpine-release")
return err == nil && fi.Mode().IsRegular()
}
func (t *tester) runFlag(rx string) string {
if t.compileOnly {
return "-run=^$"
}
if rx == "" && goos == "js" && goarch == "wasm" {
return "-run=^Test" // exclude examples; Issue 25913
}
return "-run=" + rx
}
func (t *tester) raceTest(dt *distTest) error {
t.addCmd(dt, "src", t.goTest(), "-race", "-i", "runtime/race", "flag", "os", "os/exec")
t.addCmd(dt, "src", t.goTest(), "-race", t.runFlag("Output"), "runtime/race")
t.addCmd(dt, "src", t.goTest(), "-race", t.runFlag("TestParse|TestEcho|TestStdinCloseRace|TestClosedPipeRace|TestTypeRace|TestFdRace|TestFdReadRace|TestFileCloseRace"), "flag", "net", "os", "os/exec", "encoding/gob")
// We don't want the following line, because it
// slows down all.bash (by 10 seconds on my laptop).
// The race builder should catch any error here, but doesn't.
// TODO(iant): Figure out how to catch this.
// t.addCmd(dt, "src", t.goTest(), "-race", "-run=TestParallelTest", "cmd/go")
if t.cgoEnabled {
cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-race")
cmd.Env = append(os.Environ(), "GOTRACEBACK=2")
}
if t.extLink() {
// Test with external linking; see issue 9133.
t.addCmd(dt, "src", t.goTest(), "-race", "-ldflags=-linkmode=external", t.runFlag("TestParse|TestEcho|TestStdinCloseRace"), "flag", "os/exec")
}
return nil
}
var runtest struct {
sync.Once
exe string
err error
}
func (t *tester) testDirTest(dt *distTest, shard, shards int) error {
runtest.Do(func() {
const exe = "runtest.exe" // named exe for Windows, but harmless elsewhere
cmd := t.dirCmd("test", "go", "build", "-o", exe, "run.go")
cmd.Env = append(os.Environ(), "GOOS="+gohostos, "GOARCH="+gohostarch)
runtest.exe = filepath.Join(cmd.Dir, exe)
if err := cmd.Run(); err != nil {
runtest.err = err
return
}
xatexit(func() {
os.Remove(runtest.exe)
})
})
if runtest.err != nil {
return runtest.err
}
if t.compileOnly {
return nil
}
t.addCmd(dt, "test", runtest.exe,
fmt.Sprintf("--shard=%d", shard),
fmt.Sprintf("--shards=%d", shards),
)
return nil
}
// cgoPackages is the standard packages that use cgo.
var cgoPackages = []string{
"crypto/x509",
"net",
"os/user",
}
var funcBenchmark = []byte("\nfunc Benchmark")
// packageHasBenchmarks reports whether pkg has benchmarks.
// On any error, it conservatively returns true.
//
// This exists just to eliminate work on the builders, since compiling
// a test in race mode just to discover it has no benchmarks costs a
// second or two per package, and this function returns false for
// about 100 packages.
func (t *tester) packageHasBenchmarks(pkg string) bool {
pkgDir := filepath.Join(goroot, "src", pkg)
d, err := os.Open(pkgDir)
if err != nil {
return true // conservatively
}
defer d.Close()
names, err := d.Readdirnames(-1)
if err != nil {
return true // conservatively
}
for _, name := range names {
if !strings.HasSuffix(name, "_test.go") {
continue
}
slurp, err := ioutil.ReadFile(filepath.Join(pkgDir, name))
if err != nil {
return true // conservatively
}
if bytes.Contains(slurp, funcBenchmark) {
return true
}
}
return false
}
// raceDetectorSupported is a copy of the function
// cmd/internal/sys.RaceDetectorSupported, which can't be used here
// because cmd/dist has to be buildable by Go 1.4.
func raceDetectorSupported(goos, goarch string) bool {
switch goos {
case "linux":
return goarch == "amd64" || goarch == "ppc64le" || goarch == "arm64"
case "darwin", "freebsd", "netbsd", "windows":
return goarch == "amd64"
default:
return false
}
}
// mSanSupported is a copy of the function cmd/internal/sys.MSanSupported,
// which can't be used here because cmd/dist has to be buildable by Go 1.4.
func mSanSupported(goos, goarch string) bool {
switch goos {
case "linux":
return goarch == "amd64" || goarch == "arm64"
default:
return false
}
}
| [
"\"GO_GCFLAGS\"",
"\"GOTESTONLY\"",
"\"PATH\"",
"\"GO_BUILDER_NAME\"",
"\"GO_TEST_TIMEOUT_SCALE\"",
"\"GOROOT_FINAL\"",
"\"GO_TEST_SHORT\"",
"\"GO_BUILDER_NAME\"",
"\"GOROOT\"",
"\"GO_BUILDER_NAME\"",
"\"FC\"",
"\"GO_BUILDER_NAME\"",
"\"GO_BUILDER_NAME\""
]
| []
| [
"GO_GCFLAGS",
"FC",
"GOTESTONLY",
"GO_BUILDER_NAME",
"GO_TEST_TIMEOUT_SCALE",
"GOROOT_FINAL",
"GO_TEST_SHORT",
"GOROOT",
"PATH"
]
| [] | ["GO_GCFLAGS", "FC", "GOTESTONLY", "GO_BUILDER_NAME", "GO_TEST_TIMEOUT_SCALE", "GOROOT_FINAL", "GO_TEST_SHORT", "GOROOT", "PATH"] | go | 9 | 0 | |
contrib/packs/actions/pack_mgmt/register.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from st2client.client import Client
from st2client.models.keyvalue import KeyValuePair # pylint: disable=no-name-in-module
from st2common.runners.base_action import Action
__all__ = [
'St2RegisterAction'
]
COMPATIBILITY_TRANSFORMATIONS = {
'runners': 'runner',
'triggers': 'trigger',
'sensors': 'sensor',
'actions': 'action',
'rules': 'rule',
'rule_types': 'rule_type',
'aliases': 'alias',
'policiy_types': 'policy_type',
'policies': 'policy',
'configs': 'config',
}
def filter_none_values(value):
"""
Filter out string "None" values from the provided dict.
:rtype: ``dict``
"""
result = dict([(k, v) for k, v in value.items() if v != "None"])
return result
def format_result(item):
if not item:
return None
return item.to_dict()
class St2RegisterAction(Action):
def __init__(self, config):
super(St2RegisterAction, self).__init__(config)
self._client = Client
self._kvp = KeyValuePair
self.client = self._get_client()
def run(self, register, packs=None):
types = []
for type in register.split(','):
if type in COMPATIBILITY_TRANSFORMATIONS:
types.append(COMPATIBILITY_TRANSFORMATIONS[type])
else:
types.append(type)
method_kwargs = {
'types': types
}
if packs:
method_kwargs['packs'] = packs
result = self._run_client_method(method=self.client.packs.register,
method_kwargs=method_kwargs,
format_func=format_result)
# TODO: make sure to return proper model
return result
def _get_client(self):
base_url, api_url, auth_url = self._get_st2_urls()
token = self._get_auth_token()
cacert = self._get_cacert()
client_kwargs = {}
if cacert:
client_kwargs['cacert'] = cacert
return self._client(base_url=base_url, api_url=api_url,
auth_url=auth_url, token=token,
**client_kwargs)
def _get_st2_urls(self):
# First try to use base_url from config.
base_url = self.config.get('base_url', None)
api_url = self.config.get('api_url', None)
auth_url = self.config.get('auth_url', None)
# not found look up from env vars. Assuming the pack is
# configuered to work with current StackStorm instance.
if not base_url:
api_url = os.environ.get('ST2_ACTION_API_URL', None)
auth_url = os.environ.get('ST2_ACTION_AUTH_URL', None)
return base_url, api_url, auth_url
def _get_auth_token(self):
# First try to use auth_token from config.
token = self.config.get('auth_token', None)
# not found look up from env vars. Assuming the pack is
# configuered to work with current StackStorm instance.
if not token:
token = os.environ.get('ST2_ACTION_AUTH_TOKEN', None)
return token
def _get_cacert(self):
cacert = self.config.get('cacert', None)
return cacert
def _run_client_method(self, method, method_kwargs, format_func, format_kwargs=None):
"""
Run the provided client method and format the result.
:param method: Client method to run.
:type method: ``func``
:param method_kwargs: Keyword arguments passed to the client method.
:type method_kwargs: ``dict``
:param format_func: Function for formatting the result.
:type format_func: ``func``
:rtype: ``list`` of ``dict``
"""
# Filter out parameters with string value of "None"
# This is a work around since the default values can only be strings
method_kwargs = filter_none_values(method_kwargs)
method_name = method.__name__
self.logger.debug('Calling client method "%s" with kwargs "%s"' % (method_name,
method_kwargs))
result = method(**method_kwargs)
result = format_func(result, **format_kwargs or {})
return result
| []
| []
| [
"ST2_ACTION_API_URL",
"ST2_ACTION_AUTH_URL",
"ST2_ACTION_AUTH_TOKEN"
]
| [] | ["ST2_ACTION_API_URL", "ST2_ACTION_AUTH_URL", "ST2_ACTION_AUTH_TOKEN"] | python | 3 | 0 | |
module/apmsql/mysql/mysql_test.go | package apmmysql_test
import (
"context"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.elastic.co/apm/apmtest"
"go.elastic.co/apm/model"
"go.elastic.co/apm/module/apmsql"
_ "go.elastic.co/apm/module/apmsql/mysql"
)
var mysqlHost = os.Getenv("MYSQL_HOST")
func TestQueryContext(t *testing.T) {
if mysqlHost == "" {
t.Skipf("MYSQL_HOST not specified")
}
db, err := apmsql.Open("mysql", "root:hunter2@tcp("+mysqlHost+")/test_db")
require.NoError(t, err)
defer db.Close()
_, err = db.Exec("CREATE TABLE IF NOT EXISTS foo (bar INT)")
require.NoError(t, err)
_, spans, _ := apmtest.WithTransaction(func(ctx context.Context) {
rows, err := db.QueryContext(ctx, "SELECT * FROM foo")
require.NoError(t, err)
rows.Close()
})
require.Len(t, spans, 1)
assert.NotNil(t, spans[0].ID)
assert.Equal(t, "SELECT FROM foo", spans[0].Name)
assert.Equal(t, "mysql", spans[0].Subtype)
assert.Equal(t, &model.SpanContext{
Database: &model.DatabaseSpanContext{
Instance: "test_db",
Statement: "SELECT * FROM foo",
Type: "sql",
User: "root",
},
}, spans[0].Context)
}
| [
"\"MYSQL_HOST\""
]
| []
| [
"MYSQL_HOST"
]
| [] | ["MYSQL_HOST"] | go | 1 | 0 | |
src/azure-cli-core/azure/cli/core/cloud.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
import os
import json
from pprint import pformat
import configparser
from azure.cli.core.profiles import API_PROFILES
from azure.cli.core._config import GLOBAL_CONFIG_DIR
from azure.cli.core.util import urlretrieve
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
CLOUD_CONFIG_FILE = os.path.join(GLOBAL_CONFIG_DIR, 'clouds.config')
# Add names of clouds that don't allow telemetry data collection here such as JEDI.
CLOUDS_FORBIDDING_TELEMETRY = []
class CloudNotRegisteredException(Exception):
def __init__(self, cloud_name):
super(CloudNotRegisteredException, self).__init__(cloud_name)
self.cloud_name = cloud_name
def __str__(self):
return "The cloud '{}' is not registered.".format(self.cloud_name)
class CloudAlreadyRegisteredException(Exception):
def __init__(self, cloud_name):
super(CloudAlreadyRegisteredException, self).__init__(cloud_name)
self.cloud_name = cloud_name
def __str__(self):
return "The cloud '{}' is already registered.".format(self.cloud_name)
class CannotUnregisterCloudException(Exception):
pass
class CloudEndpointNotSetException(CLIError):
pass
class CloudSuffixNotSetException(CLIError):
pass
class CloudEndpoints: # pylint: disable=too-few-public-methods,too-many-instance-attributes
def __init__(self,
management=None,
resource_manager=None,
sql_management=None,
batch_resource_id=None,
gallery=None,
active_directory=None,
active_directory_resource_id=None,
active_directory_graph_resource_id=None,
microsoft_graph_resource_id=None,
active_directory_data_lake_resource_id=None,
vm_image_alias_doc=None,
media_resource_id=None,
ossrdbms_resource_id=None,
log_analytics_resource_id=None,
app_insights_resource_id=None,
app_insights_telemetry_channel_resource_id=None,
synapse_analytics_resource_id=None,
attestation_resource_id=None):
# Attribute names are significant. They are used when storing/retrieving clouds from config
self.management = management
self.resource_manager = resource_manager
self.sql_management = sql_management
self.batch_resource_id = batch_resource_id
self.gallery = gallery
self.active_directory = active_directory
self.active_directory_resource_id = active_directory_resource_id
self.active_directory_graph_resource_id = active_directory_graph_resource_id
self.microsoft_graph_resource_id = microsoft_graph_resource_id
self.active_directory_data_lake_resource_id = active_directory_data_lake_resource_id
self.vm_image_alias_doc = vm_image_alias_doc
self.media_resource_id = media_resource_id
self.ossrdbms_resource_id = ossrdbms_resource_id
self.log_analytics_resource_id = log_analytics_resource_id
self.app_insights_resource_id = app_insights_resource_id
self.app_insights_telemetry_channel_resource_id = app_insights_telemetry_channel_resource_id
self.synapse_analytics_resource_id = synapse_analytics_resource_id
self.attestation_resource_id = attestation_resource_id
def has_endpoint_set(self, endpoint_name):
try:
# Can't simply use hasattr here as we override __getattribute__ below.
# Python 3 hasattr() only returns False if an AttributeError is raised but we raise
# CloudEndpointNotSetException. This exception is not a subclass of AttributeError.
getattr(self, endpoint_name)
return True
except Exception: # pylint: disable=broad-except
return False
def __getattribute__(self, name):
val = object.__getattribute__(self, name)
if val is None:
raise CloudEndpointNotSetException("The endpoint '{}' for this cloud "
"is not set but is used.\n"
"{} may be corrupt or invalid.\nResolve the error or delete this file "
"and try again.".format(name, CLOUD_CONFIG_FILE))
return val
class CloudSuffixes: # pylint: disable=too-few-public-methods,too-many-instance-attributes
def __init__(self,
storage_endpoint=None,
storage_sync_endpoint=None,
keyvault_dns=None,
sql_server_hostname=None,
azure_datalake_store_file_system_endpoint=None,
azure_datalake_analytics_catalog_and_job_endpoint=None,
acr_login_server_endpoint=None,
mysql_server_endpoint=None,
postgresql_server_endpoint=None,
mariadb_server_endpoint=None,
synapse_analytics_endpoint=None,
attestation_endpoint=None):
# Attribute names are significant. They are used when storing/retrieving clouds from config
self.storage_endpoint = storage_endpoint
self.storage_sync_endpoint = storage_sync_endpoint
self.keyvault_dns = keyvault_dns
self.sql_server_hostname = sql_server_hostname
self.mysql_server_endpoint = mysql_server_endpoint
self.postgresql_server_endpoint = postgresql_server_endpoint
self.mariadb_server_endpoint = mariadb_server_endpoint
self.azure_datalake_store_file_system_endpoint = azure_datalake_store_file_system_endpoint
self.azure_datalake_analytics_catalog_and_job_endpoint = azure_datalake_analytics_catalog_and_job_endpoint
self.acr_login_server_endpoint = acr_login_server_endpoint
self.synapse_analytics_endpoint = synapse_analytics_endpoint
self.attestation_endpoint = attestation_endpoint
def __getattribute__(self, name):
val = object.__getattribute__(self, name)
if val is None:
raise CloudSuffixNotSetException("The suffix '{}' for this cloud "
"is not set but is used.\n"
"{} may be corrupt or invalid.\nResolve the error or delete this file "
"and try again.".format(name, CLOUD_CONFIG_FILE))
return val
def _get_ossrdbms_resource_id(cloud_name):
ossrdbms_mapper = {
'AzureCloud': 'https://ossrdbms-aad.database.windows.net',
'AzureChinaCloud': 'https://ossrdbms-aad.database.chinacloudapi.cn',
'AzureUSGovernment': 'https://ossrdbms-aad.database.usgovcloudapi.net',
'AzureGermanCloud': 'https://ossrdbms-aad.database.cloudapi.de'
}
return ossrdbms_mapper.get(cloud_name, None)
def _get_microsoft_graph_resource_id(cloud_name):
graph_endpoint_mapper = {
'AzureCloud': 'https://graph.microsoft.com/',
'AzureChinaCloud': 'https://microsoftgraph.chinacloudapi.cn/',
'AzureUSGovernment': 'https://graph.microsoft.us/',
'AzureGermanCloud': 'https://graph.microsoft.de/'
}
return graph_endpoint_mapper.get(cloud_name, None)
def _get_storage_sync_endpoint(cloud_name):
storage_sync_endpoint_mapper = {
'AzureCloud': 'afs.azure.net',
'AzureUSGovernment': 'afs.azure.us',
}
return storage_sync_endpoint_mapper.get(cloud_name, None)
def _get_synapse_analytics_endpoint(cloud_name):
synapse_analytics_endpoint_mapper = {
'AzureCloud': 'dev.azuresynapse.net',
'AzureChinaCloud': 'dev.azuresynapse.azure.cn'
}
return synapse_analytics_endpoint_mapper.get(cloud_name, None)
def _get_database_server_endpoint(sql_server_hostname, cloud_name):
def _concat_db_server_endpoint(db_prefix):
if cloud_name == 'AzureCloud':
return db_prefix + '.database.azure.com'
if not sql_server_hostname:
return None
return db_prefix + sql_server_hostname
return _concat_db_server_endpoint
def _get_app_insights_telemetry_channel_resource_id(cloud_name):
app_insights_telemetry_channel_resource_id_mapper = {
'AzureCloud': 'https://dc.applicationinsights.azure.com/v2/track',
'AzureChinaCloud': 'https://dc.applicationinsights.azure.cn/v2/track',
'AzureUSGovernment': 'https://dc.applicationinsights.us/v2/track'
}
return app_insights_telemetry_channel_resource_id_mapper.get(cloud_name, None)
def _get_log_analytics_resource_id(cloud_name):
log_analytics_resource_id_mapper = {
'AzureCloud': 'https://api.loganalytics.io',
'AzureChinaCloud': 'https://api.loganalytics.azure.cn',
'AzureUSGovernment': 'https://api.loganalytics.us'
}
return log_analytics_resource_id_mapper.get(cloud_name, None)
def _get_app_insights_resource_id(cloud_name):
app_insights_resource_id_mapper = {
'AzureCloud': 'https://api.applicationinsights.io',
'AzureChinaCloud': 'https://api.applicationinsights.azure.cn',
'AzureUSGovernment': 'https://api.applicationinsights.us'
}
return app_insights_resource_id_mapper.get(cloud_name, None)
def _get_synapse_analytics_resource_id(cloud_name):
synapse_analytics_resource_id_mapper = {
'AzureCloud': 'https://dev.azuresynapse.net',
'AzureChinaCloud': 'https://dev.azuresynapse.net'
}
return synapse_analytics_resource_id_mapper.get(cloud_name, None)
def _get_attestation_resource_id(cloud_name):
attestation_resource_id_mapper = {
'AzureCloud': 'https://attest.azure.net'
}
return attestation_resource_id_mapper.get(cloud_name, None)
def _get_attestation_endpoint(cloud_name):
attestation_endpoint_mapper = {
'AzureCloud': '.attest.azure.net'
}
return attestation_endpoint_mapper.get(cloud_name, None)
def _convert_arm_to_cli(arm_cloud_metadata_dict):
cli_cloud_metadata_dict = {}
for cloud in arm_cloud_metadata_dict:
cli_cloud_metadata_dict[cloud['name']] = _arm_to_cli_mapper(cloud)
if 'AzureCloud' in cli_cloud_metadata_dict:
cli_cloud_metadata_dict['AzureCloud'].endpoints.active_directory = 'https://login.microsoftonline.com' # change once active_directory is fixed in ARM for the public cloud
return cli_cloud_metadata_dict
def _add_starting_dot(suffix):
return suffix if not suffix or suffix.startswith('.') else '.' + suffix
def _get_arm_endpoint(arm_dict, is_suffix=False):
def _get_processed_arm_endpoint(name, add_dot=False, fallback_value=None):
if is_suffix:
return (_add_starting_dot(arm_dict['suffixes'][name]) if add_dot else arm_dict['suffixes'][name]) if name in arm_dict['suffixes'] else fallback_value
return arm_dict[name] if name in arm_dict else fallback_value
return _get_processed_arm_endpoint
def _arm_to_cli_mapper(arm_dict):
get_endpoint = _get_arm_endpoint(arm_dict)
get_suffix = _get_arm_endpoint(arm_dict, is_suffix=True)
sql_server_hostname = get_suffix('sqlServerHostname', add_dot=True)
get_db_server_endpoint = _get_database_server_endpoint(sql_server_hostname, arm_dict['name'])
return Cloud(
arm_dict['name'],
endpoints=CloudEndpoints(
management=arm_dict['authentication']['audiences'][0],
resource_manager=arm_dict['resourceManager'],
sql_management=arm_dict['sqlManagement'],
batch_resource_id=arm_dict['batch'],
gallery=arm_dict['gallery'],
active_directory=arm_dict['authentication']['loginEndpoint'],
active_directory_resource_id=arm_dict['authentication']['audiences'][0],
active_directory_graph_resource_id=arm_dict['graphAudience'],
microsoft_graph_resource_id=_get_microsoft_graph_resource_id(arm_dict['name']), # change once microsoft_graph_resource_id is fixed in ARM
vm_image_alias_doc=arm_dict['vmImageAliasDoc'],
media_resource_id=arm_dict['media'],
ossrdbms_resource_id=_get_ossrdbms_resource_id(arm_dict['name']), # change once ossrdbms_resource_id is available via ARM
active_directory_data_lake_resource_id=arm_dict['activeDirectoryDataLake'] if 'activeDirectoryDataLake' in arm_dict else None,
app_insights_resource_id=get_endpoint('appInsightsResourceId', fallback_value=_get_app_insights_resource_id(arm_dict['name'])),
log_analytics_resource_id=get_endpoint('logAnalyticsResourceId', fallback_value=_get_log_analytics_resource_id(arm_dict['name'])),
synapse_analytics_resource_id=get_endpoint('synapseAnalyticsResourceId', fallback_value=_get_synapse_analytics_resource_id(arm_dict['name'])),
app_insights_telemetry_channel_resource_id=get_endpoint('appInsightsTelemetryChannelResourceId', fallback_value=_get_app_insights_telemetry_channel_resource_id(arm_dict['name'])),
attestation_resource_id=get_endpoint('attestationResourceId', fallback_value=_get_attestation_resource_id(arm_dict['name']))),
suffixes=CloudSuffixes(
storage_endpoint=get_suffix('storage'),
storage_sync_endpoint=get_suffix('storageSyncEndpointSuffix', fallback_value=_get_storage_sync_endpoint(arm_dict['name'])),
keyvault_dns=get_suffix('keyVaultDns', add_dot=True),
sql_server_hostname=sql_server_hostname,
mysql_server_endpoint=get_suffix('mysqlServerEndpoint', add_dot=True, fallback_value=get_db_server_endpoint('.mysql')),
postgresql_server_endpoint=get_suffix('postgresqlServerEndpoint', add_dot=True, fallback_value=get_db_server_endpoint('.postgres')),
mariadb_server_endpoint=get_suffix('mariadbServerEndpoint', add_dot=True, fallback_value=get_db_server_endpoint('.mariadb')),
azure_datalake_store_file_system_endpoint=get_suffix('azureDataLakeStoreFileSystem'),
azure_datalake_analytics_catalog_and_job_endpoint=get_suffix('azureDataLakeAnalyticsCatalogAndJob'),
synapse_analytics_endpoint=get_suffix('synapseAnalytics', add_dot=True, fallback_value=_get_synapse_analytics_endpoint(arm_dict['name'])),
acr_login_server_endpoint=get_suffix('acrLoginServer', add_dot=True),
attestation_endpoint=get_suffix('attestationEndpoint', add_dot=True, fallback_value=_get_attestation_endpoint(arm_dict['name']))))
class Cloud: # pylint: disable=too-few-public-methods
""" Represents an Azure Cloud instance """
def __init__(self,
name,
endpoints=None,
suffixes=None,
profile=None,
is_active=False):
self.name = name
self.endpoints = endpoints or CloudEndpoints()
self.suffixes = suffixes or CloudSuffixes()
self.profile = profile
self.is_active = is_active
def __str__(self):
o = {
'profile': self.profile,
'name': self.name,
'is_active': self.is_active,
'endpoints': vars(self.endpoints),
'suffixes': vars(self.suffixes),
}
return pformat(o)
AZURE_PUBLIC_CLOUD = Cloud(
'AzureCloud',
endpoints=CloudEndpoints(
management='https://management.core.windows.net/',
resource_manager='https://management.azure.com/',
sql_management='https://management.core.windows.net:8443/',
batch_resource_id='https://batch.core.windows.net/',
gallery='https://gallery.azure.com/',
active_directory='https://login.microsoftonline.com',
active_directory_resource_id='https://management.core.windows.net/',
active_directory_graph_resource_id='https://graph.windows.net/',
microsoft_graph_resource_id='https://graph.microsoft.com/',
active_directory_data_lake_resource_id='https://datalake.azure.net/',
vm_image_alias_doc='https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json',
media_resource_id='https://rest.media.azure.net',
ossrdbms_resource_id='https://ossrdbms-aad.database.windows.net',
app_insights_resource_id='https://api.applicationinsights.io',
log_analytics_resource_id='https://api.loganalytics.io',
app_insights_telemetry_channel_resource_id='https://dc.applicationinsights.azure.com/v2/track',
synapse_analytics_resource_id='https://dev.azuresynapse.net',
attestation_resource_id='https://attest.azure.net'),
suffixes=CloudSuffixes(
storage_endpoint='core.windows.net',
storage_sync_endpoint='afs.azure.net',
keyvault_dns='.vault.azure.net',
sql_server_hostname='.database.windows.net',
mysql_server_endpoint='.mysql.database.azure.com',
postgresql_server_endpoint='.postgres.database.azure.com',
mariadb_server_endpoint='.mariadb.database.azure.com',
azure_datalake_store_file_system_endpoint='azuredatalakestore.net',
azure_datalake_analytics_catalog_and_job_endpoint='azuredatalakeanalytics.net',
acr_login_server_endpoint='.azurecr.io',
synapse_analytics_endpoint='.dev.azuresynapse.net',
attestation_endpoint='.attest.azure.net'))
AZURE_CHINA_CLOUD = Cloud(
'AzureChinaCloud',
endpoints=CloudEndpoints(
management='https://management.core.chinacloudapi.cn/',
resource_manager='https://management.chinacloudapi.cn',
sql_management='https://management.core.chinacloudapi.cn:8443/',
batch_resource_id='https://batch.chinacloudapi.cn/',
gallery='https://gallery.chinacloudapi.cn/',
active_directory='https://login.chinacloudapi.cn',
active_directory_resource_id='https://management.core.chinacloudapi.cn/',
active_directory_graph_resource_id='https://graph.chinacloudapi.cn/',
microsoft_graph_resource_id='https://microsoftgraph.chinacloudapi.cn',
vm_image_alias_doc='https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json',
media_resource_id='https://rest.media.chinacloudapi.cn',
ossrdbms_resource_id='https://ossrdbms-aad.database.chinacloudapi.cn',
app_insights_resource_id='https://api.applicationinsights.azure.cn',
log_analytics_resource_id='https://api.loganalytics.azure.cn',
app_insights_telemetry_channel_resource_id='https://dc.applicationinsights.azure.cn/v2/track',
synapse_analytics_resource_id='https://dev.azuresynapse.net'),
suffixes=CloudSuffixes(
storage_endpoint='core.chinacloudapi.cn',
keyvault_dns='.vault.azure.cn',
sql_server_hostname='.database.chinacloudapi.cn',
mysql_server_endpoint='.mysql.database.chinacloudapi.cn',
postgresql_server_endpoint='.postgres.database.chinacloudapi.cn',
mariadb_server_endpoint='.mariadb.database.chinacloudapi.cn',
acr_login_server_endpoint='.azurecr.cn',
synapse_analytics_endpoint='.dev.azuresynapse.azure.cn'))
AZURE_US_GOV_CLOUD = Cloud(
'AzureUSGovernment',
endpoints=CloudEndpoints(
management='https://management.core.usgovcloudapi.net/',
resource_manager='https://management.usgovcloudapi.net/',
sql_management='https://management.core.usgovcloudapi.net:8443/',
batch_resource_id='https://batch.core.usgovcloudapi.net/',
gallery='https://gallery.usgovcloudapi.net/',
active_directory='https://login.microsoftonline.us',
active_directory_resource_id='https://management.core.usgovcloudapi.net/',
active_directory_graph_resource_id='https://graph.windows.net/',
microsoft_graph_resource_id='https://graph.microsoft.us/',
vm_image_alias_doc='https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json',
media_resource_id='https://rest.media.usgovcloudapi.net',
ossrdbms_resource_id='https://ossrdbms-aad.database.usgovcloudapi.net',
app_insights_resource_id='https://api.applicationinsights.us',
log_analytics_resource_id='https://api.loganalytics.us',
app_insights_telemetry_channel_resource_id='https://dc.applicationinsights.us/v2/track'),
suffixes=CloudSuffixes(
storage_endpoint='core.usgovcloudapi.net',
storage_sync_endpoint='afs.azure.us',
keyvault_dns='.vault.usgovcloudapi.net',
sql_server_hostname='.database.usgovcloudapi.net',
mysql_server_endpoint='.mysql.database.usgovcloudapi.net',
postgresql_server_endpoint='.postgres.database.usgovcloudapi.net',
mariadb_server_endpoint='.mariadb.database.usgovcloudapi.net',
acr_login_server_endpoint='.azurecr.us'))
AZURE_GERMAN_CLOUD = Cloud(
'AzureGermanCloud',
endpoints=CloudEndpoints(
management='https://management.core.cloudapi.de/',
resource_manager='https://management.microsoftazure.de',
sql_management='https://management.core.cloudapi.de:8443/',
batch_resource_id='https://batch.cloudapi.de/',
gallery='https://gallery.cloudapi.de/',
active_directory='https://login.microsoftonline.de',
active_directory_resource_id='https://management.core.cloudapi.de/',
active_directory_graph_resource_id='https://graph.cloudapi.de/',
microsoft_graph_resource_id='https://graph.microsoft.de',
vm_image_alias_doc='https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json',
media_resource_id='https://rest.media.cloudapi.de',
ossrdbms_resource_id='https://ossrdbms-aad.database.cloudapi.de'),
suffixes=CloudSuffixes(
storage_endpoint='core.cloudapi.de',
keyvault_dns='.vault.microsoftazure.de',
sql_server_hostname='.database.cloudapi.de',
mysql_server_endpoint='.mysql.database.cloudapi.de',
postgresql_server_endpoint='.postgres.database.cloudapi.de',
mariadb_server_endpoint='.mariadb.database.cloudapi.de'))
KNOWN_CLOUDS = [AZURE_PUBLIC_CLOUD, AZURE_CHINA_CLOUD, AZURE_US_GOV_CLOUD, AZURE_GERMAN_CLOUD]
if 'ARM_CLOUD_METADATA_URL' in os.environ:
try:
arm_cloud_dict = json.loads(urlretrieve(os.getenv('ARM_CLOUD_METADATA_URL')))
cli_cloud_dict = _convert_arm_to_cli(arm_cloud_dict)
if 'AzureCloud' in cli_cloud_dict:
cli_cloud_dict['AzureCloud'].endpoints.active_directory = 'https://login.microsoftonline.com' # change once active_directory is fixed in ARM for the public cloud
KNOWN_CLOUDS = list(cli_cloud_dict.values())
except Exception as ex: # pylint: disable=broad-except
logger.warning('Failed to load cloud metadata from the url specified by ARM_CLOUD_METADATA_URL')
raise ex
def _set_active_cloud(cli_ctx, cloud_name):
cli_ctx.config.set_value('cloud', 'name', cloud_name)
cli_ctx.cloud = get_cloud(cli_ctx, cloud_name)
def get_active_cloud_name(cli_ctx):
try:
return cli_ctx.config.get('cloud', 'name')
except (configparser.NoOptionError, configparser.NoSectionError):
_set_active_cloud(cli_ctx, AZURE_PUBLIC_CLOUD.name)
return AZURE_PUBLIC_CLOUD.name
def _get_cloud(cli_ctx, cloud_name):
return next((x for x in get_clouds(cli_ctx) if x.name == cloud_name), None)
def cloud_is_registered(cli_ctx, cloud_name):
return bool(_get_cloud(cli_ctx, cloud_name))
def get_custom_clouds(cli_ctx):
known_cloud_names = [c.name for c in KNOWN_CLOUDS]
return [c for c in get_clouds(cli_ctx) if c.name not in known_cloud_names]
def _get_cloud_name(cli_ctx, cloud_name):
return next((x.name for x in get_clouds(cli_ctx) if x.name.lower() == cloud_name.lower()), cloud_name)
def get_clouds(cli_ctx):
clouds = []
config = configparser.ConfigParser()
# Start off with known clouds and apply config file on top of current config
for c in KNOWN_CLOUDS:
_config_add_cloud(config, c)
try:
config.read(CLOUD_CONFIG_FILE)
except configparser.MissingSectionHeaderError:
os.remove(CLOUD_CONFIG_FILE)
logger.warning("'%s' is in bad format and has been removed.", CLOUD_CONFIG_FILE)
for section in config.sections():
c = Cloud(section)
for option in config.options(section):
if option == 'profile':
c.profile = config.get(section, option)
if option.startswith('endpoint_'):
setattr(c.endpoints, option.replace('endpoint_', ''), config.get(section, option))
elif option.startswith('suffix_'):
setattr(c.suffixes, option.replace('suffix_', ''), config.get(section, option))
if c.profile is None:
# If profile isn't set, use latest
setattr(c, 'profile', 'latest')
if c.profile not in API_PROFILES:
raise CLIError('Profile {} does not exist or is not supported.'.format(c.profile))
if not c.endpoints.has_endpoint_set('management') and \
c.endpoints.has_endpoint_set('resource_manager'):
# If management endpoint not set, use resource manager endpoint
c.endpoints.management = c.endpoints.resource_manager
clouds.append(c)
active_cloud_name = get_active_cloud_name(cli_ctx)
for c in clouds:
if c.name == active_cloud_name:
c.is_active = True
break
return clouds
def get_cloud(cli_ctx, cloud_name):
cloud = _get_cloud(cli_ctx, cloud_name)
if not cloud:
raise CloudNotRegisteredException(cloud_name)
return cloud
def get_active_cloud(cli_ctx=None):
if not cli_ctx:
from azure.cli.core import get_default_cli
cli_ctx = get_default_cli()
try:
return get_cloud(cli_ctx, get_active_cloud_name(cli_ctx))
except CloudNotRegisteredException as err:
logger.warning(err)
logger.warning("Resetting active cloud to'%s'.", AZURE_PUBLIC_CLOUD.name)
_set_active_cloud(cli_ctx, AZURE_PUBLIC_CLOUD.name)
return get_cloud(cli_ctx, AZURE_PUBLIC_CLOUD.name)
def get_cloud_subscription(cloud_name):
config = configparser.ConfigParser()
config.read(CLOUD_CONFIG_FILE)
try:
return config.get(cloud_name, 'subscription')
except (configparser.NoOptionError, configparser.NoSectionError):
return None
def set_cloud_subscription(cli_ctx, cloud_name, subscription):
if not _get_cloud(cli_ctx, cloud_name):
raise CloudNotRegisteredException(cloud_name)
config = configparser.ConfigParser()
config.read(CLOUD_CONFIG_FILE)
if subscription:
try:
config.add_section(cloud_name)
except configparser.DuplicateSectionError:
pass
config.set(cloud_name, 'subscription', subscription)
else:
try:
config.remove_option(cloud_name, 'subscription')
except configparser.NoSectionError:
pass
if not os.path.isdir(GLOBAL_CONFIG_DIR):
os.makedirs(GLOBAL_CONFIG_DIR)
with open(CLOUD_CONFIG_FILE, 'w') as configfile:
config.write(configfile)
def _set_active_subscription(cli_ctx, cloud_name):
from azure.cli.core._profile import (Profile, _ENVIRONMENT_NAME, _SUBSCRIPTION_ID,
_STATE, _SUBSCRIPTION_NAME)
profile = Profile(cli_ctx=cli_ctx)
subscription_to_use = get_cloud_subscription(cloud_name) or \
next((s[_SUBSCRIPTION_ID] for s in profile.load_cached_subscriptions() # noqa
if s[_STATE] == 'Enabled'),
None)
if subscription_to_use:
try:
profile.set_active_subscription(subscription_to_use)
sub = profile.get_subscription(subscription_to_use)
logger.warning("Active subscription switched to '%s (%s)'.",
sub[_SUBSCRIPTION_NAME], sub[_SUBSCRIPTION_ID])
except CLIError as e:
logger.warning(e)
logger.warning("Unable to automatically switch the active subscription. "
"Use 'az account set'.")
else:
logger.warning("Use 'az login' to log in to this cloud.")
logger.warning("Use 'az account set' to set the active subscription.")
def switch_active_cloud(cli_ctx, cloud_name):
if cli_ctx.cloud.name == cloud_name:
return
if not _get_cloud(cli_ctx, cloud_name):
raise CloudNotRegisteredException(cloud_name)
_set_active_cloud(cli_ctx, cloud_name)
logger.warning("Switched active cloud to '%s'.", cloud_name)
_set_active_subscription(cli_ctx, cloud_name)
def _config_add_cloud(config, cloud, overwrite=False):
""" Add a cloud to a config object """
try:
config.add_section(cloud.name)
except configparser.DuplicateSectionError:
if not overwrite:
raise CloudAlreadyRegisteredException(cloud.name)
if cloud.profile:
config.set(cloud.name, 'profile', cloud.profile)
for k, v in cloud.endpoints.__dict__.items():
if v is not None:
config.set(cloud.name, 'endpoint_{}'.format(k), v)
for k, v in cloud.suffixes.__dict__.items():
if v is not None:
config.set(cloud.name, 'suffix_{}'.format(k), v)
def _save_cloud(cloud, overwrite=False):
config = configparser.ConfigParser()
config.read(CLOUD_CONFIG_FILE)
_config_add_cloud(config, cloud, overwrite=overwrite)
if not os.path.isdir(GLOBAL_CONFIG_DIR):
os.makedirs(GLOBAL_CONFIG_DIR)
with open(CLOUD_CONFIG_FILE, 'w') as configfile:
config.write(configfile)
def add_cloud(cli_ctx, cloud):
if _get_cloud(cli_ctx, cloud.name):
raise CloudAlreadyRegisteredException(cloud.name)
_save_cloud(cloud)
def update_cloud(cli_ctx, cloud):
if not _get_cloud(cli_ctx, cloud.name):
raise CloudNotRegisteredException(cloud.name)
_save_cloud(cloud, overwrite=True)
def remove_cloud(cli_ctx, cloud_name):
if not _get_cloud(cli_ctx, cloud_name):
raise CloudNotRegisteredException(cloud_name)
if cloud_name == cli_ctx.cloud.name:
raise CannotUnregisterCloudException("The cloud '{}' cannot be unregistered "
"as it's currently active.".format(cloud_name))
is_known_cloud = next((x for x in KNOWN_CLOUDS if x.name == cloud_name), None)
if is_known_cloud:
raise CannotUnregisterCloudException("The cloud '{}' cannot be unregistered "
"as it's not a custom cloud.".format(cloud_name))
config = configparser.ConfigParser()
config.read(CLOUD_CONFIG_FILE)
config.remove_section(cloud_name)
with open(CLOUD_CONFIG_FILE, 'w') as configfile:
config.write(configfile)
def cloud_forbid_telemetry(cli_ctx):
return get_active_cloud_name(cli_ctx) in CLOUDS_FORBIDDING_TELEMETRY
| []
| []
| [
"ARM_CLOUD_METADATA_URL"
]
| [] | ["ARM_CLOUD_METADATA_URL"] | python | 1 | 0 | |
google/cloud/talent/v4/talent-v4-py/google/cloud/talent_v4/services/job_service/client.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.talent_v4.services.job_service import pagers
from google.cloud.talent_v4.types import common
from google.cloud.talent_v4.types import histogram
from google.cloud.talent_v4.types import job
from google.cloud.talent_v4.types import job as gct_job
from google.cloud.talent_v4.types import job_service
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import JobServiceGrpcTransport
from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport
class JobServiceClientMeta(type):
"""Metaclass for the JobService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]]
_transport_registry["grpc"] = JobServiceGrpcTransport
_transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport
def get_transport_class(cls,
label: str = None,
) -> Type[JobServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class JobServiceClient(metaclass=JobServiceClientMeta):
"""A service handles job management, including job CRUD,
enumeration and search.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "jobs.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
JobServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
JobServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> JobServiceTransport:
"""Returns the transport used by the client instance.
Returns:
JobServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def company_path(project: str,tenant: str,company: str,) -> str:
"""Returns a fully-qualified company string."""
return "projects/{project}/tenants/{tenant}/companies/{company}".format(project=project, tenant=tenant, company=company, )
@staticmethod
def parse_company_path(path: str) -> Dict[str,str]:
"""Parses a company path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/companies/(?P<company>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def job_path(project: str,tenant: str,job: str,) -> str:
"""Returns a fully-qualified job string."""
return "projects/{project}/tenants/{tenant}/jobs/{job}".format(project=project, tenant=tenant, job=job, )
@staticmethod
def parse_job_path(path: str) -> Dict[str,str]:
"""Parses a job path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def tenant_path(project: str,tenant: str,) -> str:
"""Returns a fully-qualified tenant string."""
return "projects/{project}/tenants/{tenant}".format(project=project, tenant=tenant, )
@staticmethod
def parse_tenant_path(path: str) -> Dict[str,str]:
"""Parses a tenant path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, JobServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the job service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, JobServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, JobServiceTransport):
# transport is a JobServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError("When providing a transport instance, "
"provide its credentials directly.")
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def create_job(self,
request: Union[job_service.CreateJobRequest, dict] = None,
*,
parent: str = None,
job: gct_job.Job = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_job.Job:
r"""Creates a new job.
Typically, the job becomes searchable within 10 seconds,
but it may take up to 5 minutes.
Args:
request (Union[google.cloud.talent_v4.types.CreateJobRequest, dict]):
The request object. Create job request.
parent (str):
Required. The resource name of the tenant under which
the job is created.
The format is
"projects/{project_id}/tenants/{tenant_id}". For
example, "projects/foo/tenants/bar".
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job (google.cloud.talent_v4.types.Job):
Required. The Job to be created.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4.types.Job:
A Job resource represents a job posting (also referred to as a "job listing"
or "job requisition"). A job belongs to a
[Company][google.cloud.talent.v4.Company], which is
the hiring entity responsible for the job.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CreateJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CreateJobRequest):
request = job_service.CreateJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job is not None:
request.job = job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def batch_create_jobs(self,
request: Union[job_service.BatchCreateJobsRequest, dict] = None,
*,
parent: str = None,
jobs: Sequence[job.Job] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Begins executing a batch create jobs operation.
Args:
request (Union[google.cloud.talent_v4.types.BatchCreateJobsRequest, dict]):
The request object. Request to create a batch of jobs.
parent (str):
Required. The resource name of the tenant under which
the job is created.
The format is
"projects/{project_id}/tenants/{tenant_id}". For
example, "projects/foo/tenants/bar".
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
jobs (Sequence[google.cloud.talent_v4.types.Job]):
Required. The jobs to be created.
A maximum of 200 jobs can be created in
a batch.
This corresponds to the ``jobs`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.talent_v4.types.BatchCreateJobsResponse` The result of [JobService.BatchCreateJobs][google.cloud.talent.v4.JobService.BatchCreateJobs]. It's used to
replace
[google.longrunning.Operation.response][google.longrunning.Operation.response]
in case of success.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, jobs])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a job_service.BatchCreateJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.BatchCreateJobsRequest):
request = job_service.BatchCreateJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if jobs is not None:
request.jobs = jobs
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
job_service.BatchCreateJobsResponse,
metadata_type=common.BatchOperationMetadata,
)
# Done; return the response.
return response
def get_job(self,
request: Union[job_service.GetJobRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job.Job:
r"""Retrieves the specified job, whose status is OPEN or
recently EXPIRED within the last 90 days.
Args:
request (Union[google.cloud.talent_v4.types.GetJobRequest, dict]):
The request object. Get job request.
name (str):
Required. The resource name of the job to retrieve.
The format is
"projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}".
For example, "projects/foo/tenants/bar/jobs/baz".
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4.types.Job:
A Job resource represents a job posting (also referred to as a "job listing"
or "job requisition"). A job belongs to a
[Company][google.cloud.talent.v4.Company], which is
the hiring entity responsible for the job.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a job_service.GetJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.GetJobRequest):
request = job_service.GetJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_job(self,
request: Union[job_service.UpdateJobRequest, dict] = None,
*,
job: gct_job.Job = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_job.Job:
r"""Updates specified job.
Typically, updated contents become visible in search
results within 10 seconds, but it may take up to 5
minutes.
Args:
request (Union[google.cloud.talent_v4.types.UpdateJobRequest, dict]):
The request object. Update job request.
job (google.cloud.talent_v4.types.Job):
Required. The Job to be updated.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Strongly recommended for the best service experience.
If
[update_mask][google.cloud.talent.v4.UpdateJobRequest.update_mask]
is provided, only the specified fields in
[job][google.cloud.talent.v4.UpdateJobRequest.job] are
updated. Otherwise all the fields are updated.
A field mask to restrict the fields that are updated.
Only top level fields of
[Job][google.cloud.talent.v4.Job] are supported.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4.types.Job:
A Job resource represents a job posting (also referred to as a "job listing"
or "job requisition"). A job belongs to a
[Company][google.cloud.talent.v4.Company], which is
the hiring entity responsible for the job.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([job, update_mask])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a job_service.UpdateJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.UpdateJobRequest):
request = job_service.UpdateJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if job is not None:
request.job = job
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("job.name", request.job.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def batch_update_jobs(self,
request: Union[job_service.BatchUpdateJobsRequest, dict] = None,
*,
parent: str = None,
jobs: Sequence[job.Job] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Begins executing a batch update jobs operation.
Args:
request (Union[google.cloud.talent_v4.types.BatchUpdateJobsRequest, dict]):
The request object. Request to update a batch of jobs.
parent (str):
Required. The resource name of the tenant under which
the job is created.
The format is
"projects/{project_id}/tenants/{tenant_id}". For
example, "projects/foo/tenants/bar".
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
jobs (Sequence[google.cloud.talent_v4.types.Job]):
Required. The jobs to be updated.
A maximum of 200 jobs can be updated in
a batch.
This corresponds to the ``jobs`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.talent_v4.types.BatchUpdateJobsResponse` The result of [JobService.BatchUpdateJobs][google.cloud.talent.v4.JobService.BatchUpdateJobs]. It's used to
replace
[google.longrunning.Operation.response][google.longrunning.Operation.response]
in case of success.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, jobs])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a job_service.BatchUpdateJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.BatchUpdateJobsRequest):
request = job_service.BatchUpdateJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if jobs is not None:
request.jobs = jobs
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
job_service.BatchUpdateJobsResponse,
metadata_type=common.BatchOperationMetadata,
)
# Done; return the response.
return response
def delete_job(self,
request: Union[job_service.DeleteJobRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified job.
Typically, the job becomes unsearchable within 10
seconds, but it may take up to 5 minutes.
Args:
request (Union[google.cloud.talent_v4.types.DeleteJobRequest, dict]):
The request object. Delete job request.
name (str):
Required. The resource name of the job to be deleted.
The format is
"projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}".
For example, "projects/foo/tenants/bar/jobs/baz".
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a job_service.DeleteJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.DeleteJobRequest):
request = job_service.DeleteJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def batch_delete_jobs(self,
request: Union[job_service.BatchDeleteJobsRequest, dict] = None,
*,
parent: str = None,
names: Sequence[str] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Begins executing a batch delete jobs operation.
Args:
request (Union[google.cloud.talent_v4.types.BatchDeleteJobsRequest, dict]):
The request object. Request to delete a batch of jobs.
parent (str):
Required. The resource name of the tenant under which
the job is created.
The format is
"projects/{project_id}/tenants/{tenant_id}". For
example, "projects/foo/tenants/bar".
The parent of all of the jobs specified in ``names``
must match this field.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
names (Sequence[str]):
The names of the jobs to delete.
The format is
"projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}".
For example, "projects/foo/tenants/bar/jobs/baz".
A maximum of 200 jobs can be deleted in a batch.
This corresponds to the ``names`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.talent_v4.types.BatchDeleteJobsResponse` The result of [JobService.BatchDeleteJobs][google.cloud.talent.v4.JobService.BatchDeleteJobs]. It's used to
replace
[google.longrunning.Operation.response][google.longrunning.Operation.response]
in case of success.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, names])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a job_service.BatchDeleteJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.BatchDeleteJobsRequest):
request = job_service.BatchDeleteJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if names is not None:
request.names = names
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
job_service.BatchDeleteJobsResponse,
metadata_type=common.BatchOperationMetadata,
)
# Done; return the response.
return response
def list_jobs(self,
request: Union[job_service.ListJobsRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobsPager:
r"""Lists jobs by filter.
Args:
request (Union[google.cloud.talent_v4.types.ListJobsRequest, dict]):
The request object. List jobs request.
parent (str):
Required. The resource name of the tenant under which
the job is created.
The format is
"projects/{project_id}/tenants/{tenant_id}". For
example, "projects/foo/tenants/bar".
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Required. The filter string specifies the jobs to be
enumerated.
Supported operator: =, AND
The fields eligible for filtering are:
- ``companyName``
- ``requisitionId``
- ``status`` Available values: OPEN, EXPIRED, ALL.
Defaults to OPEN if no value is specified.
At least one of ``companyName`` and ``requisitionId``
must present or an INVALID_ARGUMENT error is thrown.
Sample Query:
- companyName =
"projects/foo/tenants/bar/companies/baz"
- companyName =
"projects/foo/tenants/bar/companies/baz" AND
requisitionId = "req-1"
- companyName =
"projects/foo/tenants/bar/companies/baz" AND status =
"EXPIRED"
- requisitionId = "req-1"
- requisitionId = "req-1" AND status = "EXPIRED"
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4.services.job_service.pagers.ListJobsPager:
List jobs response.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a job_service.ListJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.ListJobsRequest):
request = job_service.ListJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListJobsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def search_jobs(self,
request: Union[job_service.SearchJobsRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job_service.SearchJobsResponse:
r"""Searches for jobs using the provided
[SearchJobsRequest][google.cloud.talent.v4.SearchJobsRequest].
This call constrains the
[visibility][google.cloud.talent.v4.Job.visibility] of jobs
present in the database, and only returns jobs that the caller
has permission to search against.
Args:
request (Union[google.cloud.talent_v4.types.SearchJobsRequest, dict]):
The request object. The Request body of the `SearchJobs`
call.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4.types.SearchJobsResponse:
Response for SearchJob method.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a job_service.SearchJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.SearchJobsRequest):
request = job_service.SearchJobsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.search_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def search_jobs_for_alert(self,
request: Union[job_service.SearchJobsRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job_service.SearchJobsResponse:
r"""Searches for jobs using the provided
[SearchJobsRequest][google.cloud.talent.v4.SearchJobsRequest].
This API call is intended for the use case of targeting passive
job seekers (for example, job seekers who have signed up to
receive email alerts about potential job opportunities), it has
different algorithmic adjustments that are designed to
specifically target passive job seekers.
This call constrains the
[visibility][google.cloud.talent.v4.Job.visibility] of jobs
present in the database, and only returns jobs the caller has
permission to search against.
Args:
request (Union[google.cloud.talent_v4.types.SearchJobsRequest, dict]):
The request object. The Request body of the `SearchJobs`
call.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4.types.SearchJobsResponse:
Response for SearchJob method.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a job_service.SearchJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.SearchJobsRequest):
request = job_service.SearchJobsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-talent",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"JobServiceClient",
)
| []
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"] | python | 2 | 0 | |
main.go | package main
import (
"encoding/json"
"fmt"
"log"
"net"
"os"
"os/signal"
"runtime"
"strconv"
"strings"
"syscall"
)
// ActionMessage is a struct representing a message from CSM
type ActionMessage struct {
Version int `json:"Version"`
ClientID string `json:"ClientId"`
Type string `json:"Type"`
Service string `json:"Service"`
Action string `json:"Api"`
Timestamp int `json:"Timestamp"`
AttemptLatency int `json:"AttemptLatency"`
Fqdn string `json:"Fqdn"`
UserAgent string `json:"UserAgent"`
AccessKey string `json:"AccessKey"`
Region string `json:"Region"`
HTTPStatusCode int `json:"HttpStatusCode"`
FinalHTTPStatusCode int `json:"FinalHttpStatusCode"`
XAmzRequestID string `json:"XAmzRequestId"`
XAmzID2 string `json:"XAmzId2"`
}
func listen(connection *net.UDPConn, quit chan struct{}) {
buffer := make([]byte, 1024)
n, _, err := 0, new(net.UDPAddr), error(nil)
var message ActionMessage
for err == nil {
n, _, err = connection.ReadFromUDP(buffer)
err := json.Unmarshal(buffer[:n], &message)
if err != nil {
log.Println(err)
}
//Each action taken sends two json messages. The first has a type of "ApiCallAttempt" this filters for the API call itself
if message.Type == "ApiCall" {
fmt.Println(strings.ToLower(message.Service) + ":" + message.Action)
}
}
fmt.Println("listener failed - ", err)
quit <- struct{}{}
}
//SetupCloseHandler Displays a message when the user closes the program
func SetupCloseHandler() {
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
fmt.Println("\rCtrl+C pressed, Stopping...")
os.Exit(0)
}()
}
func main() {
var port = 31000
var err error
var addr = net.UDPAddr{
Port: port,
IP: net.IP{127, 0, 0, 1},
}
if os.Getenv("AWS_CSM_PORT") != "" {
port, err = strconv.Atoi(os.Getenv("AWS_CSM_PORT"))
if err != nil {
fmt.Println("Could not parse value of AWS_CSM_PORT Exiting...")
os.Exit(1)
}
}
if os.Getenv("IN_DOCKER") == "True" {
addr = net.UDPAddr{
Port: port,
IP: net.IP{0, 0, 0, 0},
}
}
connection, err := net.ListenUDP("udp", &addr)
if err != nil {
fmt.Println("Could not start Action hero on the specified port, Exiting...")
os.Exit(1)
}
fmt.Println("Action Hero Starting...")
SetupCloseHandler()
quit := make(chan struct{})
for i := 0; i < runtime.NumCPU(); i++ {
go listen(connection, quit)
}
<-quit // hang until an error
}
| [
"\"AWS_CSM_PORT\"",
"\"AWS_CSM_PORT\"",
"\"IN_DOCKER\""
]
| []
| [
"AWS_CSM_PORT",
"IN_DOCKER"
]
| [] | ["AWS_CSM_PORT", "IN_DOCKER"] | go | 2 | 0 | |
packages/main/tests/python/test_robocorp_workitems.py | import copy
import json
import logging
import os
import tempfile
from contextlib import contextmanager
try:
from contextlib import nullcontext
except ImportError:
from contextlib import suppress as nullcontext
from pathlib import Path
from unittest import mock
import pytest
from requests import HTTPError
from RPA.Robocorp.WorkItems import (
BaseAdapter,
ENCODING,
EmptyQueue,
Error,
FileAdapter,
RobocorpAdapter,
State,
WorkItems,
)
from RPA.Robocorp.utils import DEBUG_ON, RequestsHTTPError, set_dot_value
from . import RESOURCES_DIR, RESULTS_DIR
VARIABLES_FIRST = {"username": "testguy", "address": "[email protected]"}
VARIABLES_SECOND = {"username": "another", "address": "[email protected]"}
IN_OUT_ID = "workitem-id-out"
VALID_DATA = {
"workitem-id-first": VARIABLES_FIRST,
"workitem-id-second": VARIABLES_SECOND,
IN_OUT_ID: [1, 2, 3],
}
VALID_FILES = {
"workitem-id-first": {
"file1.txt": b"data1",
"file2.txt": b"data2",
"file3.png": b"data3",
},
"workitem-id-second": {},
IN_OUT_ID: {},
}
ITEMS_JSON = [{"payload": {"a-key": "a-value"}, "files": {"a-file": "file.txt"}}]
OUTPUT_DIR = RESULTS_DIR / "output_dir"
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
@contextmanager
def temp_filename(content=None, **kwargs):
"""Create temporary file and yield file relative path, then delete it afterwards.
Needs to close file handle, since Windows won't allow multiple
open handles to the same file.
"""
with tempfile.NamedTemporaryFile(delete=False, **kwargs) as fd:
path = fd.name
if content:
fd.write(content)
try:
yield path
finally:
os.unlink(path)
def is_equal_files(lhs, rhs):
lhs = Path(lhs).resolve()
rhs = Path(rhs).resolve()
return lhs == rhs
class MockAdapter(BaseAdapter):
DATA = {}
FILES = {}
INDEX = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._data_keys = []
self.releases = []
@classmethod
def validate(cls, item, key, val):
data = cls.DATA.get(item.id)
assert data is not None
assert data[key] == val
@property
def data_keys(self):
if not self._data_keys:
self._data_keys = list(self.DATA.keys())
return self._data_keys
def reserve_input(self) -> str:
if self.INDEX >= len(self.data_keys):
raise EmptyQueue("No work items in the input queue")
try:
return self.data_keys[self.INDEX]
finally:
self.INDEX += 1
def release_input(self, item_id: str, state: State, exception: dict = None):
self.releases.append((item_id, state, exception)) # purely for testing purposes
def create_output(self, parent_id, payload=None) -> str:
self.save_payload(IN_OUT_ID, payload)
return IN_OUT_ID
def load_payload(self, item_id):
return self.DATA[item_id]
def save_payload(self, item_id, payload):
self.DATA[item_id] = payload
def list_files(self, item_id):
return self.FILES[item_id]
def get_file(self, item_id, name):
return self.FILES[item_id][name]
def add_file(self, item_id, name, *, original_name, content):
self.FILES[item_id][name] = content
def remove_file(self, item_id, name):
del self.FILES[item_id][name]
class TestLibrary:
"""Tests the library itself as a whole."""
@staticmethod
@pytest.fixture
def adapter():
MockAdapter.DATA = copy.deepcopy(VALID_DATA)
MockAdapter.FILES = copy.deepcopy(VALID_FILES)
try:
yield MockAdapter
finally:
MockAdapter.DATA = {}
MockAdapter.FILES = {}
MockAdapter.INDEX = 0
@staticmethod
@pytest.fixture
def library(adapter):
yield WorkItems(default_adapter=adapter)
@staticmethod
def _get_resource_data(name, binary=False):
path = RESOURCES_DIR / "work-items" / name
if binary:
return path.read_bytes()
return path.read_text(encoding=ENCODING)
@classmethod
@pytest.fixture(
params=[
("mail-text.txt", "A message from e-mail"),
("mail-json.txt", {"message": "from email"}),
("mail-yaml.txt", {"message": "from email", "extra": {"value": 1}}),
]
)
def raw_email_data(cls, request):
raw_email = cls._get_resource_data(request.param[0])
expected_body = request.param[1]
return raw_email, expected_body
@classmethod
@pytest.fixture(
params=[
("email.text", False, "A message from e-mail"),
("__mail.html", True, "from email"),
]
)
def parsed_email_data(cls, request):
email_var = request.param[0]
parsed_email = None
expected_body = request.param[2]
if request.param[1]:
parsed_email = cls._get_resource_data(email_var, binary=True)
return email_var, parsed_email, expected_body
def test_autoload(self, library):
# Called by Robot Framework listener
library._start_suite(None, None)
# Work item loaded using env variables
env = library.current
assert env is not None
assert env.payload == VARIABLES_FIRST
def test_autoload_disable(self, adapter):
library = WorkItems(default_adapter=adapter, autoload=False)
# Called by Robot Framework listener
library._start_suite(None, None)
assert library._current is None
def test_keyword_get_input_work_item(self, library):
first = library.get_input_work_item()
assert first.payload == VARIABLES_FIRST
assert first == library.current
second = library.get_input_work_item()
assert second.payload == VARIABLES_SECOND
assert second == library.current
def test_keyword_save_work_item(self, library):
item = library.get_input_work_item()
for key, value in VARIABLES_FIRST.items():
MockAdapter.validate(item, key, value)
modified = {"username": "changed", "address": "[email protected]"}
item.payload = modified
library.save_work_item()
for key, value in modified.items():
MockAdapter.validate(item, key, value)
def test_no_active_item(self):
library = WorkItems(default_adapter=MockAdapter)
with pytest.raises(RuntimeError) as err:
library.save_work_item()
assert str(err.value) == "No active work item"
def test_list_variables(self, library):
library.get_input_work_item()
names = library.list_work_item_variables()
assert len(names) == 2
assert "username" in names
assert "address" in names
def test_get_variables(self, library):
library.get_input_work_item()
value = library.get_work_item_variable("username")
assert value == "testguy"
with pytest.raises(KeyError):
library.get_work_item_variable("notexist")
def test_get_variables_default(self, library):
library.get_input_work_item()
value = library.get_work_item_variable("username", default="doesntmatter")
assert value == "testguy"
value = library.get_work_item_variable("notexist", default="doesmatter")
assert value == "doesmatter"
def test_delete_variables(self, library):
library.get_input_work_item()
assert "username" in library.list_work_item_variables()
library.delete_work_item_variables("username")
assert "username" not in library.list_work_item_variables()
library.delete_work_item_variables("doesntexist")
with pytest.raises(KeyError):
library.delete_work_item_variables("doesntexist", force=False)
def test_delete_variables_single(self, library):
library.get_input_work_item()
assert "username" in library.list_work_item_variables()
assert len(library.current.payload) == 2
library.delete_work_item_variables("username")
assert "username" not in library.list_work_item_variables()
assert len(library.current.payload) == 1
def test_delete_variables_multiple(self, library):
library.get_input_work_item()
names = library.list_work_item_variables()
assert "username" in names
assert "address" in names
assert len(names) == 2
library.delete_work_item_variables("username", "address")
names = library.list_work_item_variables()
assert "username" not in names
assert "username" not in names
assert len(names) == 0
def test_delete_variables_unknown(self, library):
library.get_input_work_item()
assert len(library.list_work_item_variables()) == 2
library.delete_work_item_variables("unknown-variable")
assert len(library.list_work_item_variables()) == 2
with pytest.raises(KeyError):
library.delete_work_item_variables("unknown-variable", force=False)
assert len(library.list_work_item_variables()) == 2
def test_raw_payload(self, library):
_ = library.get_input_work_item()
_ = library.get_input_work_item()
item = library.get_input_work_item()
payload = library.get_work_item_payload()
assert payload == [1, 2, 3]
library.set_work_item_payload({"output": 0xBEEF})
library.save_work_item()
MockAdapter.validate(item, "output", 0xBEEF)
def test_list_files(self, library):
library.get_input_work_item()
files = library.list_work_item_files()
assert files == ["file1.txt", "file2.txt", "file3.png"]
def test_get_file(self, library):
library.get_input_work_item()
with temp_filename() as path:
result = library.get_work_item_file("file2.txt", path)
with open(result) as fd:
data = fd.read()
assert is_equal_files(result, path)
assert data == "data2"
def test_get_file_notexist(self, library):
library.get_input_work_item()
with pytest.raises(FileNotFoundError):
library.get_work_item_file("file5.txt")
def test_add_file(self, library):
item = library.get_input_work_item()
with temp_filename(b"some-input-content") as path:
library.add_work_item_file(path, "file4.txt")
files = library.list_work_item_files()
assert files == ["file1.txt", "file2.txt", "file3.png", "file4.txt"]
assert "file4.txt" not in MockAdapter.FILES[item.id]
library.save_work_item()
assert MockAdapter.FILES[item.id]["file4.txt"] == b"some-input-content"
def test_add_file_duplicate(self, library):
item = library.get_input_work_item()
def verify_files():
files = library.list_work_item_files()
assert files == ["file1.txt", "file2.txt", "file3.png", "file4.txt"]
with temp_filename(b"some-input-content") as path:
library.add_work_item_file(path, "file4.txt")
assert "file4.txt" not in MockAdapter.FILES[item.id]
verify_files()
# Add duplicate for unsaved item
library.add_work_item_file(path, "file4.txt")
assert "file4.txt" not in MockAdapter.FILES[item.id]
verify_files()
library.save_work_item()
assert MockAdapter.FILES[item.id]["file4.txt"] == b"some-input-content"
verify_files()
# Add duplicate for saved item
library.add_work_item_file(path, "file4.txt")
verify_files()
library.save_work_item()
verify_files()
def test_add_file_notexist(self, library):
library.get_input_work_item()
with pytest.raises(FileNotFoundError):
library.add_work_item_file("file5.txt", "doesnt-matter")
def test_remove_file(self, library):
item = library.get_input_work_item()
library.remove_work_item_file("file2.txt")
files = library.list_work_item_files()
assert files == ["file1.txt", "file3.png"]
assert "file2.txt" in MockAdapter.FILES[item.id]
library.save_work_item()
assert "file2.txt" not in MockAdapter.FILES[item.id]
def test_remove_file_notexist(self, library):
library.get_input_work_item()
library.remove_work_item_file("file5.txt")
with pytest.raises(FileNotFoundError):
library.remove_work_item_file("file5.txt", missing_ok=False)
def test_get_file_pattern(self, library):
library.get_input_work_item()
with tempfile.TemporaryDirectory() as outdir:
file1 = os.path.join(outdir, "file1.txt")
file2 = os.path.join(outdir, "file2.txt")
paths = library.get_work_item_files("*.txt", outdir)
assert is_equal_files(paths[0], file1)
assert is_equal_files(paths[1], file2)
assert os.path.exists(file1)
assert os.path.exists(file2)
def test_remove_file_pattern(self, library):
item = library.get_input_work_item()
library.remove_work_item_files("*.txt")
files = library.list_work_item_files()
assert files == ["file3.png"]
assert list(MockAdapter.FILES[item.id]) == [
"file1.txt",
"file2.txt",
"file3.png",
]
library.save_work_item()
files = library.list_work_item_files()
assert files == ["file3.png"]
assert list(MockAdapter.FILES[item.id]) == ["file3.png"]
def test_clear_work_item(self, library):
library.get_input_work_item()
library.clear_work_item()
library.save_work_item()
assert library.get_work_item_payload() == {}
assert library.list_work_item_files() == []
def test_get_file_unsaved(self, library):
library.get_input_work_item()
with temp_filename(b"some-input-content") as path:
library.add_work_item_file(path, "file4.txt")
files = library.list_work_item_files()
assert files == ["file1.txt", "file2.txt", "file3.png", "file4.txt"]
assert "file4.txt" not in MockAdapter.FILES
with tempfile.TemporaryDirectory() as outdir:
names = ["file1.txt", "file2.txt", "file4.txt"]
result = library.get_work_item_files("*.txt", outdir)
expected = [os.path.join(outdir, name) for name in names]
for lhs, rhs in zip(result, expected):
assert is_equal_files(lhs, rhs)
with open(result[-1]) as fd:
assert fd.read() == "some-input-content"
def test_get_file_unsaved_no_copy(self, library):
library.get_input_work_item()
with tempfile.TemporaryDirectory() as outdir:
path = os.path.join(outdir, "nomove.txt")
with open(path, "w") as fd:
fd.write("my content")
mtime = os.path.getmtime(path)
library.add_work_item_file(path)
files = library.list_work_item_files()
assert files == ["file1.txt", "file2.txt", "file3.png", "nomove.txt"]
paths = library.get_work_item_files("*.txt", outdir)
assert is_equal_files(paths[-1], path)
assert os.path.getmtime(path) == mtime
def test_get_file_unsaved_relative(self, library):
library.get_input_work_item()
with tempfile.TemporaryDirectory() as outdir:
curdir = os.getcwd()
try:
os.chdir(outdir)
with open("nomove.txt", "w") as fd:
fd.write("my content")
mtime = os.path.getmtime("nomove.txt")
library.add_work_item_file(os.path.join(outdir, "nomove.txt"))
files = library.list_work_item_files()
assert files == ["file1.txt", "file2.txt", "file3.png", "nomove.txt"]
paths = library.get_work_item_files("*.txt")
assert is_equal_files(paths[-1], os.path.join(outdir, "nomove.txt"))
assert os.path.getmtime("nomove.txt") == mtime
finally:
os.chdir(curdir)
def test_get_file_no_matches(self, library):
library.get_input_work_item()
with tempfile.TemporaryDirectory() as outdir:
paths = library.get_work_item_files("*.pdf", outdir)
assert len(paths) == 0
def test_create_output_work_item(self, library):
input_item = library.get_input_work_item()
output_item = library.create_output_work_item()
assert output_item.id is None
assert output_item.parent_id == input_item.id
def test_create_output_work_item_no_input(self, library):
with pytest.raises(RuntimeError):
library.create_output_work_item()
@staticmethod
@pytest.fixture(
params=[
lambda *files: files, # files provided as tuple
lambda *files: list(files), # as list of paths
lambda *files: ", ".join(files), # comma separated paths
]
)
def out_files(request):
"""Output work item files."""
with temp_filename(b"out-content-1", suffix="-1.txt") as path1, temp_filename(
b"out-content-2", suffix="-2.txt"
) as path2:
func = request.param
yield func(path1, path2)
def test_create_output_work_item_variables_files(self, library, out_files):
library.get_input_work_item()
variables = {"my_var1": "value1", "my_var2": "value2"}
library.create_output_work_item(variables=variables, files=out_files, save=True)
assert library.get_work_item_variable("my_var1") == "value1"
assert library.get_work_item_variable("my_var2") == "value2"
# This actually "downloads" (creates) the files, so make sure we remove them
# afterwards.
paths = library.get_work_item_files("*.txt", dirname=OUTPUT_DIR)
try:
assert len(paths) == 2
for path in paths:
with open(path) as stream:
content = stream.read()
idx = Path(path).stem.split("-")[-1]
assert content == f"out-content-{idx}"
finally:
for path in paths:
os.remove(path)
def test_custom_root(self, adapter):
library = WorkItems(default_adapter=adapter, root="vars")
item = library.get_input_work_item()
variables = library.get_work_item_variables()
assert variables == {}
library.set_work_item_variables(cool="beans", yeah="boi")
assert item.payload == {
**VARIABLES_FIRST,
"vars": {"cool": "beans", "yeah": "boi"},
}
@pytest.mark.parametrize("limit", [0, 1, 2, 3, 4]) # no, existing and over limit
def test_iter_work_items(self, library, limit):
usernames = []
def func(a, b, r=3):
assert a + b == r
# Collects the "username" variable from the payload if provided and returns
# True if found, False otherwise.
payload = library.get_work_item_payload()
if not isinstance(payload, dict):
return False
username = payload.get("username")
if username:
usernames.append(username)
return username is not None
library.get_input_work_item()
results = library.for_each_input_work_item(func, 1, 2, items_limit=limit, r=3)
expected_usernames = ["testguy", "another"]
expected_results = [True, True, False]
if limit:
expected_usernames = expected_usernames[:limit]
expected_results = expected_results[:limit]
assert usernames == expected_usernames
assert results == expected_results
def test_iter_work_items_limit_and_state(self, library):
def func():
return 1
# Pick one single item and make sure its state is set implicitly.
results = library.for_each_input_work_item(func, items_limit=1)
assert len(results) == 1
assert library.current.state is State.DONE
def func2():
library.release_input_work_item(State.FAILED)
return 2
# Pick-up the rest of the two inputs and set state explicitly.
results = library.for_each_input_work_item(func2)
assert len(results) == 2
assert library.current.state is State.FAILED
@pytest.mark.parametrize("return_results", [True, False])
def test_iter_work_items_return_results(self, library, return_results):
def func():
return 1
library.get_input_work_item()
results = library.for_each_input_work_item(func, return_results=return_results)
if return_results:
assert results == [1] * 3
else:
assert results is None
@pytest.mark.parametrize("processed_items", [0, 1, 2, 3])
def test_successive_work_items_iteration(self, library, processed_items):
for _ in range(processed_items):
library.get_input_work_item()
library.release_input_work_item(State.DONE)
def func():
pass
# Checks if all remaining input work items are processed once.
results = library.for_each_input_work_item(func)
assert len(results) == 3 - processed_items
# Checks if there's no double processing of the last already processed item.
results = library.for_each_input_work_item(func)
assert len(results) == 0
@staticmethod
@pytest.fixture(
params=[
None,
{"exception_type": "BUSINESS"},
{
"exception_type": "APPLICATION",
"code": "UNEXPECTED_ERROR",
"message": "This is an unexpected error",
},
{
"exception_type": "APPLICATION",
"code": None,
"message": "This is an unexpected error",
},
{
"exception_type": "APPLICATION",
"code": None,
"message": None,
},
{
"exception_type": None,
"code": None,
"message": None,
},
{
"exception_type": None,
"code": "APPLICATION",
"message": None,
},
{
"exception_type": None,
"code": "",
"message": "Not empty",
},
]
)
def release_exception(request):
exception = request.param or {}
effect = nullcontext()
success = True
if not exception.get("exception_type") and any(
map(lambda key: exception.get(key), ["code", "message"])
):
effect = pytest.raises(RuntimeError)
success = False
return exception or None, effect, success
def test_release_work_item_failed(self, library, release_exception):
exception, effect, success = release_exception
library.get_input_work_item()
with effect:
library.release_input_work_item(
"FAILED", **(exception or {})
) # intentionally providing a string for the state
if success:
assert library.current.state == State.FAILED
exception_type = (exception or {}).pop("exception_type", None)
if exception_type:
exception["type"] = Error(exception_type).value
exception.setdefault("code", None)
exception.setdefault("message", None)
else:
exception = None
if success:
assert library.adapter.releases == [
("workitem-id-first", State.FAILED, exception)
]
@pytest.mark.parametrize("exception", [None, {"exception_type": Error.APPLICATION}])
def test_release_work_item_done(self, library, exception):
library.get_input_work_item()
library.release_input_work_item(State.DONE, **(exception or {}))
assert library.current.state is State.DONE
assert library.adapter.releases == [
# No exception sent for non failures.
("workitem-id-first", State.DONE, None)
]
def test_auto_release_work_item(self, library):
library.get_input_work_item()
library.get_input_work_item() # this automatically sets the state of the last
assert library.current.state is None # because the previous one has a state
assert library.adapter.releases == [("workitem-id-first", State.DONE, None)]
def test_parse_work_item_from_raw_email(self, library, raw_email_data):
raw_email, expected_body = raw_email_data
library.adapter.DATA["workitem-id-first"]["rawEmail"] = raw_email
library.get_input_work_item()
parsed_email = library.get_work_item_variable("parsedEmail")
assert parsed_email["Body"] == expected_body
def test_parse_work_item_from_parsed_email(self, library, parsed_email_data):
email_var, parsed_email, expected_body = parsed_email_data
if parsed_email:
library.adapter.FILES["workitem-id-first"][email_var] = parsed_email
else:
payload = library.adapter.DATA["workitem-id-first"]
payload["email"] = {}
set_dot_value(payload, email_var, value=expected_body)
library.get_input_work_item()
parsed_email = library.get_work_item_variable("parsedEmail")
email_parsed = library.get_work_item_variable("email")
assert parsed_email["Body"] == email_parsed["body"]
assert expected_body in parsed_email["Body"]
assert expected_body in email_parsed["body"]
def test_parse_work_item_from_email_missing_content(self, library):
library.get_input_work_item()
for payload_var in ("rawEmail", "parsedEmail", "email"):
with pytest.raises(KeyError):
library.get_work_item_variable(payload_var)
class TestFileAdapter:
"""Tests the local dev env `FileAdapter` on Work Items."""
@contextmanager
def _input_work_items(self):
with tempfile.TemporaryDirectory() as datadir:
items_in = os.path.join(datadir, "items.json")
with open(items_in, "w") as fd:
json.dump(ITEMS_JSON, fd)
with open(os.path.join(datadir, "file.txt"), "w") as fd:
fd.write("some mock content")
output_dir = os.path.join(datadir, "output_dir")
os.makedirs(output_dir)
items_out = os.path.join(output_dir, "items-out.json")
yield items_in, items_out
@pytest.fixture(
params=[
("RPA_WORKITEMS_PATH", "N/A"),
("RPA_INPUT_WORKITEM_PATH", "RPA_OUTPUT_WORKITEM_PATH"),
]
)
def adapter(self, monkeypatch, request):
with self._input_work_items() as (items_in, items_out):
monkeypatch.setenv(request.param[0], items_in)
monkeypatch.setenv(request.param[1], items_out)
yield FileAdapter()
@staticmethod
@pytest.fixture
def empty_adapter():
# No work items i/o files nor envs set.
return FileAdapter()
def test_load_data(self, adapter):
item_id = adapter.reserve_input()
data = adapter.load_payload(item_id)
assert data == {"a-key": "a-value"}
def test_list_files(self, adapter):
item_id = adapter.reserve_input()
files = adapter.list_files(item_id)
assert files == ["a-file"]
def test_get_file(self, adapter):
item_id = adapter.reserve_input()
content = adapter.get_file(item_id, "a-file")
assert content == b"some mock content"
def test_add_file(self, adapter):
item_id = adapter.reserve_input()
adapter.add_file(
item_id,
"secondfile.txt",
original_name="secondfile2.txt",
content=b"somedata",
)
assert adapter.inputs[0]["files"]["secondfile.txt"] == "secondfile2.txt"
assert os.path.isfile(Path(adapter.input_path).parent / "secondfile2.txt")
def test_save_data_input(self, adapter):
item_id = adapter.reserve_input()
adapter.save_payload(item_id, {"key": "value"})
with open(adapter.input_path) as fd:
data = json.load(fd)
assert data == [
{"payload": {"key": "value"}, "files": {"a-file": "file.txt"}}
]
def test_save_data_output(self, adapter):
item_id = adapter.create_output("0", {})
adapter.save_payload(item_id, {"key": "value"})
output = os.getenv("RPA_OUTPUT_WORKITEM_PATH")
if output:
assert "output_dir" in output # checks automatic dir creation
else:
output = Path(adapter.input_path).with_suffix(".output.json")
assert os.path.isfile(output)
with open(output) as fd:
data = json.load(fd)
assert data == [{"payload": {"key": "value"}, "files": {}}]
def test_missing_file(self, monkeypatch):
monkeypatch.setenv("RPA_WORKITEMS_PATH", "not-exist.json")
adapter = FileAdapter()
assert adapter.inputs == [{"payload": {}}]
def test_empty_queue(self, monkeypatch):
with tempfile.TemporaryDirectory() as datadir:
items = os.path.join(datadir, "items.json")
with open(items, "w") as fd:
json.dump([], fd)
monkeypatch.setenv("RPA_WORKITEMS_PATH", items)
adapter = FileAdapter()
assert adapter.inputs == [{"payload": {}}]
def test_malformed_queue(self, monkeypatch):
with tempfile.TemporaryDirectory() as datadir:
items = os.path.join(datadir, "items.json")
with open(items, "w") as fd:
json.dump(["not-an-item"], fd)
monkeypatch.setenv("RPA_WORKITEMS_PATH", items)
adapter = FileAdapter()
assert adapter.inputs == [{"payload": {}}]
def test_without_items_paths(self, empty_adapter):
assert empty_adapter.inputs == [{"payload": {}}]
# Can't save inputs nor outputs since there's no path defined for them.
with pytest.raises(RuntimeError):
empty_adapter.save_payload("0", {"input": "value"})
with pytest.raises(RuntimeError):
_ = empty_adapter.output_path
with pytest.raises(RuntimeError):
empty_adapter.create_output("1", {"var": "some-value"})
class TestRobocorpAdapter:
"""Test control room API calls and retrying behaviour."""
ENV = {
"RC_WORKSPACE_ID": "1",
"RC_PROCESS_RUN_ID": "2",
"RC_ACTIVITY_RUN_ID": "3",
"RC_WORKITEM_ID": "4",
"RC_API_WORKITEM_HOST": "https://api.workitem.com",
"RC_API_WORKITEM_TOKEN": "workitem-token",
"RC_API_PROCESS_HOST": "https://api.process.com",
"RC_API_PROCESS_TOKEN": "process-token",
"RC_PROCESS_ID": "5",
}
HEADERS_WORKITEM = {
"Authorization": f"Bearer {ENV['RC_API_WORKITEM_TOKEN']}",
"Content-Type": "application/json",
}
HEADERS_PROCESS = {
"Authorization": f"Bearer {ENV['RC_API_PROCESS_TOKEN']}",
"Content-Type": "application/json",
}
@pytest.fixture
def adapter(self, monkeypatch):
for name, value in self.ENV.items():
monkeypatch.setenv(name, value)
with mock.patch("RPA.Robocorp.utils.requests.get") as mock_get, mock.patch(
"RPA.Robocorp.utils.requests.post"
) as mock_post, mock.patch(
"RPA.Robocorp.utils.requests.put"
) as mock_put, mock.patch(
"RPA.Robocorp.utils.requests.delete"
) as mock_delete, mock.patch(
"time.sleep", return_value=None
) as mock_sleep:
self.mock_get = mock_get
self.mock_post = mock_post
self.mock_put = mock_put
self.mock_delete = mock_delete
self.mock_get.__name__ = "get"
self.mock_post.__name__ = "post"
self.mock_put.__name__ = "put"
self.mock_delete.__name__ = "delete"
self.mock_sleep = mock_sleep
yield RobocorpAdapter()
def test_reserve_input(self, adapter):
initial_item_id = adapter.reserve_input()
assert initial_item_id == self.ENV["RC_WORKITEM_ID"]
self.mock_post.return_value.json.return_value = {"workItemId": "44"}
reserved_item_id = adapter.reserve_input()
assert reserved_item_id == "44"
url = "https://api.process.com/process-v1/workspaces/1/processes/5/runs/2/robotRuns/3/reserve-next-work-item"
self.mock_post.assert_called_once_with(url, headers=self.HEADERS_PROCESS)
@pytest.mark.parametrize(
"exception",
[None, {"type": "BUSINESS", "code": "INVALID_DATA", "message": None}],
)
def test_release_input(self, adapter, exception):
item_id = "26"
adapter.release_input(
item_id,
State.FAILED,
exception=exception.copy() if exception else exception,
)
url = "https://api.process.com/process-v1/workspaces/1/processes/5/runs/2/robotRuns/3/release-work-item"
body = {
"workItemId": item_id,
"state": State.FAILED.value,
}
if exception:
body["exception"] = {
key: value for (key, value) in exception.items() if value
}
self.mock_post.assert_called_once_with(
url, headers=self.HEADERS_PROCESS, json=body
)
def test_load_payload(self, adapter):
item_id = "4"
expected_payload = {"name": "value"}
self.mock_get.return_value.json.return_value = expected_payload
payload = adapter.load_payload(item_id)
assert payload == expected_payload
response = self.mock_get.return_value
response.ok = False
response.status_code = 404
payload = adapter.load_payload(item_id)
assert payload == {}
def test_save_payload(self, adapter):
item_id = "1993"
payload = {"Cosmin": "Poieana"}
adapter.save_payload(item_id, payload)
url = f"https://api.workitem.com/json-v1/workspaces/1/workitems/{item_id}/data"
self.mock_put.assert_called_once_with(
url, headers=self.HEADERS_WORKITEM, json=payload
)
def test_remove_file(self, adapter):
item_id = "44"
name = "procrastination.txt"
file_id = "88"
self.mock_get.return_value.json.return_value = [
{"fileName": name, "fileId": file_id}
]
adapter.remove_file(item_id, name)
url = f"https://api.workitem.com/json-v1/workspaces/1/workitems/{item_id}/files/{file_id}"
self.mock_delete.assert_called_once_with(url, headers=self.HEADERS_WORKITEM)
def test_list_files(self, adapter):
expected_files = ["just.py", "mark.robot", "it.txt"]
self.mock_get.return_value.json.return_value = [
{"fileName": expected_files[0], "fileId": "1"},
{"fileName": expected_files[1], "fileId": "2"},
{"fileName": expected_files[2], "fileId": "3"},
]
files = adapter.list_files("4")
assert files == expected_files
@staticmethod
def _failing_response(request):
resp = mock.MagicMock()
resp.ok = False
resp.json.return_value = request.param[0]
resp.raise_for_status.side_effect = request.param[1]
return resp
@pytest.fixture(
params=[
# Requests response attribute values for: `.json()`, `.raise_for_status()`
({}, None),
(None, HTTPError()),
]
)
def failing_response(self, request):
return self._failing_response(request)
@staticmethod
@pytest.fixture
def success_response():
resp = mock.MagicMock()
resp.ok = True
return resp
@pytest.mark.parametrize(
"status_code,call_count",
[
# Retrying enabled:
(429, 5),
(500, 5),
# Retrying disabled:
(400, 1),
(401, 1),
(403, 1),
(409, 1),
],
)
def test_list_files_retrying(
self, adapter, failing_response, status_code, call_count
):
self.mock_get.return_value = failing_response
failing_response.status_code = status_code
with pytest.raises(RequestsHTTPError) as exc_info:
adapter.list_files("4")
assert exc_info.value.status_code == status_code
assert self.mock_get.call_count == call_count # tried once or 5 times in a row
@pytest.fixture(
params=[
# Requests response attribute values for: `.json()`, `.raise_for_status()`
({"error": {"code": "UNEXPECTED_ERROR"}}, None), # normal response
('{"error": {"code": "UNEXPECTED_ERROR"}}', None), # double serialized
(r'"{\"error\": {\"code\": \"UNEXPECTED_ERROR\"}}"', None), # triple
('[{"some": "value"}]', HTTPError()), # double serialized list
]
)
def failing_deserializing_response(self, request):
return self._failing_response(request)
def test_bad_response_payload(self, adapter, failing_deserializing_response):
self.mock_get.return_value = failing_deserializing_response
failing_deserializing_response.status_code = 429
with pytest.raises(RequestsHTTPError) as exc_info:
adapter.list_files("4")
err = "UNEXPECTED_ERROR"
call_count = 5
if err not in str(failing_deserializing_response.json.return_value):
err = "Error" # default error message in the absence of it
assert exc_info.value.status_code == 429
assert exc_info.value.status_message == err
assert self.mock_get.call_count == call_count
def test_logging_and_sleeping(self, adapter, failing_response, caplog):
assert DEBUG_ON, 'this test should be ran with "RPA_DEBUG_API" on'
# 1st call: raises 500 -> unexpected server crash, therefore needs retry
# (1 sleep)
# 2nd call: now raises 429 -> rate limit hit, needs retry and sleeps extra
# (2 sleeps)
# 3rd call: raises 400 -> malformed request, doesn't retry anymore and raises
# with last error, no sleeps performed
status_code = mock.PropertyMock(side_effect=[500, 429, 400])
type(failing_response).status_code = status_code
failing_response.reason = "for no reason :)"
self.mock_post.return_value = failing_response
with pytest.raises(RequestsHTTPError) as exc_info:
with caplog.at_level(logging.DEBUG):
adapter.create_output("1")
assert exc_info.value.status_code == 400 # last received server code
assert self.mock_sleep.call_count == 3 # 1 sleep (500) + 2 sleeps (429)
expected_logs = [
"POST 'https://api.process.com/process-v1/workspaces/1/processes/5/work-items/1/output'",
"API response: 500 'for no reason :)'",
"API response: 429 'for no reason :)'",
"API response: 400 'for no reason :)'",
]
captured_logs = set(record.message for record in caplog.records)
for expected_log in expected_logs:
assert expected_log in captured_logs
def test_add_get_file(self, adapter, success_response, caplog):
"""Uploads and retrieves files with AWS support.
This way we check if sensitive information (like auth params) don't get
exposed.
"""
item_id = adapter.reserve_input() # reserved initially from the env var
file_name = "myfile.txt"
file_content = b"some-data"
# Behaviour for: adding a file (2x POST), getting the file (3x GET).
#
# POST #1: 201 - default error handling
# POST #2: 201 - custom error handling -> status code retrieved
# GET #1: 200 - default error handling
# GET #2: 200 - default error handling
# GET #3: 200 - custom error handling -> status code retrieved
status_code = mock.PropertyMock(side_effect=[201, 200, 200])
type(success_response).status_code = status_code
# POST #1: JSON with file related data
# POST #2: ignored response content
# GET #1: JSON with all the file IDs
# GET #2: JSON with the file URL corresponding to ID
# GET #3: bytes response content (not ignored)
post_data = {
"url": "https://s3.eu-west-1.amazonaws.com/ci-4f23e-robocloud-td",
"fields": {
"dont": "care",
},
}
get_files_data = [
{
"fileName": file_name,
"fileId": "file-id",
}
]
get_file_data = {
"url": "https://ci-4f23e-robocloud-td.s3.eu-west-1.amazonaws.com/files/ws_17/wi_0dd63f07-ba7b-414a-bf92-293080975d2f/file_eddfd9ac-143f-4eb9-888f-b9c378e67aec?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=secret-credentials",
}
success_response.json.side_effect = [post_data, get_files_data, get_file_data]
success_response.content = file_content
self.mock_post.return_value = self.mock_get.return_value = success_response
# 2x POST (CR file entry, AWS file content)
adapter.add_file(
item_id,
file_name,
original_name="not-used.txt",
content=file_content,
)
files = self.mock_post.call_args_list[-1][1]["files"]
assert files == {"file": (file_name, file_content)}
# 3x GET (all files, specific file, file content)
content = adapter.get_file(item_id, file_name)
assert content == file_content
# Making sure sensitive info doesn't get exposed.
exposed = any(
"secret-credentials" in record.message for record in caplog.records
)
assert not exposed, "secret got exposed"
| []
| []
| [
"RPA_OUTPUT_WORKITEM_PATH"
]
| [] | ["RPA_OUTPUT_WORKITEM_PATH"] | python | 1 | 0 | |
.travis.py | import json
import os
import re
import subprocess
# Get a diff between master and current.
try:
commit_range = os.environ["TRAVIS_COMMIT_RANGE"]
changed_files = subprocess.check_output(["git", "diff", "--name-only", commit_range])
except KeyError:
print("🔥 This should be run on Travis. Otherwise make sure TRAVIS_BRANCH is set.")
exit(1)
# Filter JSON files only.
changed_files_json = []
if changed_files:
changed_files = changed_files.decode()
for changed_file in changed_files.split('\n'):
if re.search(r"\.json$", changed_file):
changed_files_json.append(changed_file)
# Iterate over list of changed JSON files.
for changed_file_json in changed_files_json:
print(f"Checking file {changed_file_json}...")
there_was_an_error = False
if not os.path.basename(changed_file_json)[0].isupper():
there_was_an_error = True
print("🔥 File name not capitalized.")
try:
with open(changed_file_json) as data_file:
file_content = json.loads(data_file.read())
except json.decoder.JSONDecodeError:
there_was_an_error = True
print(f"🔥 JSON could not be parsed. Follow this link to know more : https://jsonlint.com/?json={data_file.read()}")
if 'word' not in file_content:
there_was_an_error = True
print("🔥 Key 'word' not found.")
if not file_content["word"]:
there_was_an_error = True
print("🔥 Value for 'word' appears to be empty.")
if 'definitions' not in file_content:
there_was_an_error = True
print("🔥 Key 'definitions' not found.")
if not file_content["definitions"]:
there_was_an_error = True
print("🔥 Value for 'definitions' appears to be empty.")
if 'parts-of-speech' not in file_content:
there_was_an_error = True
print("🔥 Key 'parts-of-speech' not found.")
if not file_content["parts-of-speech"]:
there_was_an_error = True
print("🔥 Value for 'parts-of-speech' appears to be empty.")
if there_was_an_error:
exit(1)
| []
| []
| [
"TRAVIS_COMMIT_RANGE"
]
| [] | ["TRAVIS_COMMIT_RANGE"] | python | 1 | 0 | |
src/common/cscs_api_common.py | #
# Copyright (c) 2019-2021, ETH Zurich. All rights reserved.
#
# Please, refer to the LICENSE file in the root directory.
# SPDX-License-Identifier: BSD-3-Clause
#
import logging
import os
import jwt
import stat
import datetime
import hashlib
import tempfile
import json
import functools
from flask import request, jsonify, g
import requests
import urllib
import base64
import io
import re
import time
import threading
# Checks if an environment variable injected to F7T is a valid True value
# var <- object
# returns -> boolean
def get_boolean_var(var):
# ensure variable to be a string
var = str(var)
# True, true or TRUE
# Yes, yes or YES
# 1
return var.upper() == "TRUE" or var.upper() == "YES" or var == "1"
debug = get_boolean_var(os.environ.get("F7T_DEBUG_MODE", False))
AUTH_HEADER_NAME = 'Authorization'
realm_pubkey=os.environ.get("F7T_REALM_RSA_PUBLIC_KEY", '')
if realm_pubkey != '':
# headers are inserted here, must not be present
realm_pubkey = realm_pubkey.strip('\'"') # remove '"'
realm_pubkey = '-----BEGIN PUBLIC KEY-----\n' + realm_pubkey + '\n-----END PUBLIC KEY-----'
realm_pubkey_type = os.environ.get("F7T_REALM_RSA_TYPE").strip('\'"')
AUTH_AUDIENCE = os.environ.get("F7T_AUTH_TOKEN_AUD", '').strip('\'"')
ALLOWED_USERS = os.environ.get("F7T_AUTH_ALLOWED_USERS", '').strip('\'"').split(";")
AUTH_REQUIRED_SCOPE = os.environ.get("F7T_AUTH_REQUIRED_SCOPE", '').strip('\'"')
AUTH_ROLE = os.environ.get("F7T_AUTH_ROLE", '').strip('\'"')
CERTIFICATOR_URL = os.environ.get("F7T_CERTIFICATOR_URL")
TASKS_URL = os.environ.get("F7T_TASKS_URL")
F7T_SSH_CERTIFICATE_WRAPPER = get_boolean_var(os.environ.get("F7T_SSH_CERTIFICATE_WRAPPER", False))
# Fobidden chars on user path/parameters: wihtout scapes: < > | ; " ' & \ [ ] ( ) x00-x1F \x60
# r'...' specifies it's a regular expression with special treatment for \
FORBIDDEN_INPUT_CHARS = r'[\<\>\|\;\"\'\&\\\[\]\(\)\x00-\x1F\x60]'
# OPA endpoint
OPA_USE = get_boolean_var(os.environ.get("F7T_OPA_USE",False))
OPA_URL = os.environ.get("F7T_OPA_URL","http://localhost:8181").strip('\'"')
POLICY_PATH = os.environ.get("F7T_POLICY_PATH","v1/data/f7t/authz").strip('\'"')
### SSL parameters
USE_SSL = get_boolean_var(os.environ.get("F7T_USE_SSL", False))
SSL_CRT = os.environ.get("F7T_SSL_CRT", "")
SSL_KEY = os.environ.get("F7T_SSL_KEY", "")
TRACER_HEADER = "uber-trace-id"
# checks JWT from Keycloak, optionally validates signature. It only receives the content of header's auth pair (not key:content)
def check_header(header):
if debug:
logging.info('debug: header: ' + header)
# header = "Bearer ey...", remove first 7 chars
try:
if realm_pubkey == '':
if not debug:
logging.warning("WARNING: REALM_RSA_PUBLIC_KEY is empty, JWT tokens are NOT verified, setup is not set to debug.")
decoded = jwt.decode(header[7:], verify=False)
else:
if AUTH_AUDIENCE == '':
decoded = jwt.decode(header[7:], realm_pubkey, algorithms=realm_pubkey_type, options={'verify_aud': False})
else:
decoded = jwt.decode(header[7:], realm_pubkey, algorithms=realm_pubkey_type, audience=AUTH_AUDIENCE)
if AUTH_REQUIRED_SCOPE != "":
if AUTH_REQUIRED_SCOPE not in decoded["scope"].split():
return False
return True
except jwt.exceptions.InvalidSignatureError:
logging.error("JWT invalid signature", exc_info=True)
except jwt.ExpiredSignatureError:
logging.error("JWT token has expired", exc_info=True)
except jwt.InvalidAudienceError:
logging.error("JWT token invalid audience", exc_info=True)
except jwt.exceptions.InvalidAlgorithmError:
logging.error("JWT invalid signature algorithm", exc_info=True)
except Exception:
logging.error("Bad header or JWT, general exception raised", exc_info=True)
return False
# returns username
def get_username(header):
# header = "Bearer ey...", remove first 7 chars
try:
if realm_pubkey == '':
decoded = jwt.decode(header[7:], verify=False)
else:
decoded = jwt.decode(header[7:], realm_pubkey, algorithms=realm_pubkey_type, options={'verify_aud': False})
# check if it's a service account token
try:
if AUTH_ROLE in decoded["realm_access"]["roles"]:
clientId = decoded["clientId"]
username = decoded["resource_access"][clientId]["roles"][0]
return username
return decoded['preferred_username']
except Exception:
return decoded['preferred_username']
except jwt.exceptions.InvalidSignatureError:
logging.error("JWT invalid signature", exc_info=True)
except jwt.ExpiredSignatureError:
logging.error("JWT token has expired", exc_info=True)
except jwt.InvalidAudienceError:
logging.error("JWT token invalid audience", exc_info=True)
except jwt.exceptions.InvalidAlgorithmError:
logging.error("JWT invalid signature algorithm", exc_info=True)
except Exception:
logging.error("Bad header or JWT, general exception raised", exc_info=True)
return None
# function to check if pattern is in string
def in_str(stringval,words):
try:
stringval.index(words)
return True
except ValueError:
return False
# SSH certificates creation
# returns pub key certificate name
def create_certificate(headers, cluster_name, cluster_addr, command=None, options=None, exp_time=None):
"""
Args:
cluster_name = public name of system to be executed
cluster_addr = private DNS or IP of the system
command = command to be executed with the certificate (required)
option = parameters and options to be executed with {command}
exp_time = expiration time for SSH certificate
"""
reqURL = f"{CERTIFICATOR_URL}/?cluster={cluster_name}&addr={cluster_addr}"
if command:
logging.info(f"\tCommand: {command}")
reqURL += "&command=" + base64.urlsafe_b64encode(command.encode()).decode()
if options:
logging.info(f"\tOptions (truncated): {options:80}")
reqURL += "&option=" + base64.urlsafe_b64encode(options.encode()).decode()
if exp_time:
logging.info(f"\tExpiration: {exp_time} [s]")
reqURL += f"&exptime={exp_time}"
else:
logging.error('Tried to create certificate without command')
return [None, 1, 'Internal error']
if debug:
username = get_username(headers[AUTH_HEADER_NAME])
logging.info(f"Create certificate for user {username}")
if options:
# may contain Storage URL
logging.info(f"\tOptions (complete): {options}")
logging.info(f"Request URL: {reqURL}")
try:
resp = requests.get(reqURL, headers=headers, verify= (SSL_CRT if USE_SSL else False) )
if resp.status_code != 200:
return [None, resp.status_code, resp.json()["description"]]
jcert = resp.json()
# create temp dir to store certificate for this request
td = tempfile.mkdtemp(prefix="dummy")
os.symlink(os.getcwd() + "/user-key.pub", td + "/user-key.pub") # link on temp dir
os.symlink(os.getcwd() + "/user-key", td + "/user-key") # link on temp dir
certf = open(td + "/user-key-cert.pub", 'w')
certf.write(jcert["certificate"])
certf.close()
# stat.S_IRUSR -> owner has read permission
os.chmod(td + "/user-key-cert.pub", stat.S_IRUSR)
# keys: [pub_cert, pub_key, priv_key, temp_dir]
return [td + "/user-key-cert.pub", td + "/user-key.pub", td + "/user-key", td]
except requests.exceptions.SSLError as ssle:
logging.error(f"(-2) -> {ssle}")
logging.error(f"(-2) -> {ssle.strerror}")
return [None, -2, ssle]
except IOError as ioe:
logging.error(f"({ioe.errno}) -> {ioe.strerror}", exc_info=True)
return [None, ioe.errno, ioe.strerror]
except Exception as e:
logging.error(f"({type(e)}) -> {e}", exc_info=True)
return [None, -1, e]
# execute remote commands with Paramiko:
def exec_remote_command(headers, system_name, system_addr, action, file_transfer=None, file_content=None):
import paramiko, socket
logging.info(f'System name: {system_name} - action: {action}')
if file_transfer == "storage_cert":
# storage is using a previously generated cert, save cert list from content
# cert_list: list of 4 elements that contains
# [0] path to the public certificate
# [1] path to the public key for user
# [2] path to the priv key for user
# [3] path to the dir containing 3 previous files
cert_list = file_content
username = headers
else:
# get certificate:
# if OK returns: [pub_cert, pub_key, priv_key, temp_dir]
# if FAILED returns: [None, errno, strerror]
cert_list = create_certificate(headers, system_name, system_addr, command=action)
if cert_list[0] == None:
result = {"error": cert_list[1], "msg": cert_list[2]}
return result
username = get_username(headers[AUTH_HEADER_NAME])
[pub_cert, pub_key, priv_key, temp_dir] = cert_list
# -------------------
# remote exec with paramiko
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ipaddr = system_addr.split(':')
host = ipaddr[0]
if len(ipaddr) == 1:
port = 22
else:
port = int(ipaddr[1])
client.connect(hostname=host, port=port,
username=username,
key_filename=pub_cert,
allow_agent=False,
look_for_keys=False,
timeout=10)
if F7T_SSH_CERTIFICATE_WRAPPER:
if debug:
logging.info(f"Using F7T_SSH_CERTIFICATE_WRAPPER.")
# read cert to send it as a command to the server
with open(pub_cert, 'r') as cert_file:
cert = cert_file.read().rstrip("\n") # remove newline at the end
action = cert
stdin, stdout, stderr = client.exec_command(action)
if file_transfer == "upload":
# uploads use "cat", so write to stdin
stdin.channel.sendall(file_content)
stdin.channel.shutdown_write()
#stdin.channel.close()
output = ""
error = ""
finished = 0
stderr_errno = -2
stdout_errno = -2
stderr_errda = ""
stdout_errda = ""
# poll process status since directly using recv_exit_status() could result
# in a permanent hang when remote output is larger than the current Transport or session’s window_size
while True:
if stderr.channel.exit_status_ready():
logging.info(f"stderr channel exit status ready")
stderr_errno = stderr.channel.recv_exit_status()
endtime = time.time() + 30
eof_received = True
while not stderr.channel.eof_received:
# time.sleep(0.5)
if time.time() > endtime:
stderr.channel.close()
eof_received = False
break
if eof_received:
error = "".join(stderr.readlines())
# error = stderr.read()
# clean "tput: No ..." lines at error output
stderr_errda = clean_err_output(error)
break
# else:
# time.sleep(5)
#for i in range(0,10):
while True:
if stdout.channel.exit_status_ready():
logging.info(f"stdout channel exit status ready")
stdout_errno = stdout.channel.recv_exit_status()
endtime = time.time() + 30
eof_received = True
while not stdout.channel.eof_received:
# time.sleep(0.5)
if time.time() > endtime:
stdout.channel.close()
eof_received = False
break
if eof_received:
output = "".join(stdout.readlines())
# error = stderr.read() it hangs
# clean "tput: No ..." lines at error output
stdout_errda = clean_err_output(output)
break
# else:
# time.sleep(5)
if file_transfer == "download":
outlines = output
else:
# replace newlines with $ for parsing
outlines = output.replace('\n', '$')[:-1]
# hiding success results from utilities/download, since output is the content of the file
if file_transfer == "download":
if stderr_errno !=0:
logging.info(f"stderr: ({stderr_errno}) --> {stderr_errda}")
logging.info(f"stdout: ({stdout_errno}) --> {stdout_errda}")
logging.info(f"stdout: ({stdout_errno}) --> {outlines}")
else:
logging.info(f"stderr: ({stderr_errno}) --> Download OK (content hidden)")
logging.info(f"stdout: ({stdout_errno}) --> Download OK (content hidden)")
else:
logging.info(f"stderr: ({stderr_errno}) --> {stderr_errda}")
logging.info(f"stdout: ({stdout_errno}) --> {stdout_errda}")
logging.info(f"stdout: ({stdout_errno}) --> {outlines}")
if stderr_errno == 0:
if stderr_errda and not (in_str(stderr_errda,"Could not chdir to home directory") or in_str(stderr_errda,"scancel: Terminating job")):
result = {"error": 1, "msg": stderr_errda}
elif in_str(stdout_errda, "No such file"): # in case that error is 0 and the msg is on the stdout (like with some file)
result = {"error": 1, "msg": stdout_errda}
elif in_str(stdout_errda, "no read permission"): # in case that error is 0 and the msg is on the stdout (like with some file)
result = {"error": 1, "msg": stdout_errda}
elif in_str(stdout_errda, "cannot open"): # in case that error is 0 and the msg is on the stdout (like with some file)
result = {"error": 1, "msg": stdout_errda}
else:
result = {"error": 0, "msg": outlines}
elif stderr_errno > 0:
if stderr_errno == 7:
result = {"error": 7, "msg": "Failed to connect to staging area server"}
else:
result = {"error": stderr_errno, "msg": stderr_errda or stdout_errda}
elif len(stderr_errda) > 0:
result = {"error": 1, "msg": stderr_errda}
elif stdout_errno == -2:
result = {"error": -2, "msg": "Receive ready timeout exceeded"}
elif stderr_errno == -1:
result = {"error": -1, "msg": "No exit status was provided by the server"}
# first if paramiko exception raise
except paramiko.ssh_exception.NoValidConnectionsError as e:
logging.error(type(e), exc_info=True)
if e.errors:
for k, v in e.errors.items():
logging.error(f"errorno: {v.errno}")
logging.error(f"strerr: {v.strerror}")
result = {"error": v.errno, "msg": v.strerror}
except socket.gaierror as e:
logging.error(type(e), exc_info=True)
logging.error(e.errno)
logging.error(e.strerror)
result = {"error": e.errno, "msg": e.strerror}
except paramiko.ssh_exception.SSHException as e:
logging.error(type(e), exc_info=True)
logging.error(e)
result = {"error": 1, "msg": str(e)}
# second: time out
except socket.timeout as e:
logging.error(type(e), exc_info=True)
# timeout has not errno
logging.error(e)
result = {"error": 1, "msg": e.strerror}
except Exception as e:
logging.error(type(e), exc_info=True)
result = {"error": 1, "msg": str(e)}
finally:
client.close()
os.remove(pub_cert)
os.remove(pub_key)
os.remove(priv_key)
os.rmdir(temp_dir)
# hiding results from utilities/download, since output is the content of the file
if file_transfer == "download":
logging.info(f"Result: status_code {result['error']} -> Utilities download")
else:
logging.info(f"Result: status_code {result['error']} -> {result['msg']}")
return result
# clean TERM errors on stderr
# resaon: some servers produces this error becuase don't set a TERM
def clean_err_output(tex):
lines = ""
# python3 tex comes as a byte object, needs to be decoded to a str
#tex = tex.decode('utf-8')
for t in tex.split('\n'):
if t != 'tput: No value for $TERM and no -T specified':
lines += t
return lines
def parse_io_error(retval, operation, path):
"""
As command ended with error, create message to return to user
Args: retval (from exec_remote_command)
operation, path:
return:
jsonify('error message'), error_code (4xx), optional_header
"""
header = ''
if retval["error"] == 13:
# IOError 13: Permission denied
header = {"X-Permission-Denied": "User does not have permissions to access machine or paths"}
elif retval["error"] == 2:
# IOError 2: no such file
header = {"X-Invalid-Path": f"{path} is invalid."}
elif retval["error"] == -2:
# IOError -2: name or service not known
header = {"X-Machine-Not-Available": "Machine is not available"}
elif retval["error"] == 118:
header = {"X-Permission-Denied": "Internal SSH error"}
elif in_str(retval["msg"],"Permission") or in_str(retval["msg"],"OPENSSH"):
header = {"X-Permission-Denied": "User does not have permissions to access machine or paths"}
return jsonify(description = f"Failed to {operation}"), 400, header
# function to call create task entry API in Queue FS, returns task_id for new task
def create_task(headers, service=None):
# returns {"task_id":task_id}
# first try to get up task microservice:
try:
# X-Firecrest-Service: service that created the task
headers["X-Firecrest-Service"] = service
req = requests.post(f"{TASKS_URL}/", headers=headers, verify=(SSL_CRT if USE_SSL else False))
except requests.exceptions.ConnectionError as e:
logging.error(type(e), exc_info=True)
logging.error(e)
return -1
if req.status_code != 201:
return -1
logging.info(json.loads(req.content))
resp = json.loads(req.content)
task_id = resp["hash_id"]
return task_id
# function to call update task entry API in Queue FS
def update_task(task_id, headers, status, msg=None, is_json=False):
logging.info(f"Update {TASKS_URL}/{task_id} -> status: {status}")
data = {"status": status, "msg": msg}
if is_json:
req = requests.put(f"{TASKS_URL}/{task_id}",
json=data, headers=headers, verify=(SSL_CRT if USE_SSL else False))
else:
req = requests.put(f"{TASKS_URL}/{task_id}",
data=data, headers=headers, verify=(SSL_CRT if USE_SSL else False))
resp = json.loads(req.content)
return resp
# function to call update task entry API in Queue FS
def expire_task(task_id, headers, service):
logging.info(f"{TASKS_URL}/expire/{task_id}")
try:
headers["X-Firecrest-Service"] = service
req = requests.post(f"{TASKS_URL}/expire/{task_id}",
headers=headers, verify=(SSL_CRT if USE_SSL else False))
except Exception as e:
logging.error(type(e))
logging.error(e.args)
if not req.ok:
logging.info(req.json())
return False
return True
# function to check task status:
def get_task_status(task_id, headers):
logging.info(f"{TASKS_URL}/{task_id}")
try:
retval = requests.get(f"{TASKS_URL}/{task_id}",
headers=headers, verify=(SSL_CRT if USE_SSL else False))
if retval.status_code != 200:
return -1
data = retval.json()
logging.info(data["task"]["status"])
return data["task"]["status"]
except Exception as e:
logging.error(type(e), exc_info=True)
logging.error(e)
return -1
# checks if {path} is a valid file (exists and user in {auth_header} has read permissions)
def is_valid_file(path, headers, system_name, system_addr):
ID = headers.get(TRACER_HEADER, '')
# checks user accessibility to path using head command with 0 bytes
action = f"ID={ID} head -c 1 -- '{path}' > /dev/null"
retval = exec_remote_command(headers, system_name, system_addr, action)
logging.info(retval)
if retval["error"] != 0:
error_str=retval["msg"]
if retval["error"] == 113:
return {"result":False, "headers":{"X-Machine-Not-Available":"Machine is not available"} }
if retval["error"] == 124:
return {"result":False, "headers":{"X-Timeout": "Command has finished with timeout signal"}}
# error no such file
if in_str(error_str,"No such file"):
return {"result":False, "headers":{"X-Invalid-Path": f"{path} is an invalid path."}}
# permission denied
if in_str(error_str,"Permission denied") or in_str(error_str,"OPENSSH"):
return {"result":False, "headers":{"X-Permission-Denied": "User does not have permissions to access machine or path"}}
if in_str(error_str, "directory"):
return {"result":False, "headers":{"X-A-Directory": f"{path} is a directory"}}
return {"result":False, "headers":{"X-Error": retval["msg"]}}
return {"result":True}
# checks if {path} is a valid directory
# 'path' should exists and be accesible to the user (write permissions)
#
def is_valid_dir(path, headers, system_name, system_addr):
# create an empty file for testing path accesibility
# test file is a hidden file and has a timestamp in order to not overwrite other files created by user
# after this, file should be deleted
timestamp = datetime.datetime.today().strftime("%Y-%m-%dT%H:%M:%S.%f")
# using a hash
hashedTS = hashlib.md5()
hashedTS.update(timestamp.encode("utf-8"))
tempFileName = f".firecrest.{hashedTS.hexdigest()}"
ID = headers.get(TRACER_HEADER, '')
action = f"ID={ID} touch -- '{path}/{tempFileName}'"
retval = exec_remote_command(headers, system_name, system_addr, action)
logging.info(retval)
if retval["error"] != 0:
error_str=retval["msg"]
if retval["error"] == 113:
return {"result":False, "headers":{"X-Machine-Not-Available":"Machine is not available"} }
if retval["error"] == 124:
return {"result":False, "headers":{"X-Timeout": "Command has finished with timeout signal"}}
# error no such file
if in_str(error_str,"No such file"):
return {"result":False, "headers":{"X-Invalid-Path": f"{path} is an invalid path."}}
# permission denied
if in_str(error_str,"Permission denied") or in_str(error_str,"OPENSSH"):
return {"result":False, "headers":{"X-Permission-Denied": "User does not have permissions to access machine or path"}}
# not a directory
if in_str(error_str,"Not a directory"):
return {"result":False, "headers":{"X-Not-A-Directory": f"{path} is not a directory"}}
return {"result":False, "headers":{"X-Error": retval["msg"]}}
# delete test file created
action = f"ID={ID} rm -- '{path}/{tempFileName}'"
retval = exec_remote_command(headers, system_name, system_addr, action)
return {"result":True}
# wrapper to check if AUTH header is correct
# decorator use:
#
# @app.route("/endpoint", methods=["GET","..."])
# @check_auth_header
# def function_that_check_header():
# .....
def check_auth_header(func):
@functools.wraps(func)
def wrapper_check_auth_header(*args, **kwargs):
try:
auth_header = request.headers[AUTH_HEADER_NAME]
except KeyError:
logging.error("No Auth Header given")
return jsonify(description="No Auth Header given"), 401
if not check_header(auth_header):
return jsonify(description="Invalid header"), 401
return func(*args, **kwargs)
return wrapper_check_auth_header
# check user authorization on endpoint
# using Open Policy Agent
#
# use:
# check_user_auth(username,system)
def check_user_auth(username,system):
# check if OPA is active
if OPA_USE:
try:
input = {"input":{"user": f"{username}", "system": f"{system}"}}
if debug:
logging.info(f"OPA: enabled, using {OPA_URL}/{POLICY_PATH}")
resp_opa = requests.post(f"{OPA_URL}/{POLICY_PATH}", json=input)
logging.info(resp_opa.content)
if resp_opa.json()["result"]["allow"]:
logging.info(f"User {username} authorized by OPA")
return {"allow": True, "description":f"User {username} authorized", "status_code": 200 }
else:
logging.error(f"User {username} NOT authorized by OPA")
return {"allow": False, "description":f"User {username} not authorized in {system}", "status_code": 401}
except requests.exceptions.RequestException as e:
logging.error(e.args)
return {"allow": False, "description":"Authorization server error", "status_code": 404}
return {"allow": True, "description":"Authorization method not active", "status_code": 200 }
# Checks each paramiko command output on a error execution
# error_str: strerr (or strout) of the command
# error_code: errno of the command
# service_msg: service output in the "description" json response
def check_command_error(error_str, error_code, service_msg):
if error_code == -2:
header = {"X-Machine-Not-Available": "Machine is not available"}
return {"description": service_msg, "status_code": 400, "header": header}
if error_code == 113:
header = {"X-Machine-Not-Available":"Machine is not available"}
return {"description": service_msg, "status_code": 400, "header": header}
if error_code == 124:
header = {"X-Timeout": "Command has finished with timeout signal"}
return {"description": service_msg, "status_code": 400, "header": header}
if error_code == 118:
header = {"X-Error": "Command execution is not allowed in machine"}
return {"description": service_msg, "status_code": 400, "header": header}
# When certificate doesn't match SSH configuration
if in_str(error_str,"OPENSSH"):
header = {"X-Permission-Denied": "User does not have permissions to access machine"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"cannot access"):
header={"X-Invalid-Path":"path is an invalid path"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"No such file"):
if in_str(error_str,"cannot stat"):
header={"X-Not-Found":"sourcePath not found"}
return {"description": service_msg, "status_code": 400, "header": header}
# copy: cannot create, rename: cannot move
if in_str(error_str, "cannot create") or in_str(error_str,"cannot move"):
header = {"X-Invalid-Path": "sourcePath and/or targetPath are invalid paths"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"cannot remove"):
header = {"X-Invalid-Path": "path is an invalid path."}
return {"description": service_msg, "status_code": 400, "header": header}
header={"X-Invalid-Path":"path is an invalid path"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"cannot open"):
header = {"X-Permission-Denied": "User does not have permissions to access path"}
return {"description":service_msg, "status_code": 400, "header": header}
if in_str(error_str,"Permission denied"):
header = {"X-Permission-Denied": "User does not have permissions to access path"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"directory"):
header = {"X-A-Directory": "path is a directory, can't checksum directories"}
return {"description": service_msg, "status_code": 400, "header": header}
# if already exists, not overwrite (-i)
if in_str(error_str,"overwrite"):
header = {"X-Exists": "targetPath already exists"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"not permitted"):
header = {"X-Permission-Denied": "User does not have permissions to access path"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"invalid group"):
header = {"X-Invalid-Group": "group is an invalid group"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str,"invalid user"):
header = {"X-Invalid-Owner": "owner is an invalid user"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str, "invalid mode"):
header = {"X-Invalid-Mode": "mode is an invalid mode"}
return {"description": service_msg, "status_code": 400, "header": header}
if in_str(error_str, "read permission"):
header = {"X-Permission-Denied": "User does not have permissions to access path"}
return {"description": service_msg, "status_code": 400, "header": header}
header = {"X-Error": error_str}
return {"description": service_msg, "error": error_str, "status_code": 400, "header": header}
## Test if user provided text is not empty and has no invalid chars
def validate_input(text):
if text == None:
return "not specified"
if text == "":
return "is empty"
if re.search(FORBIDDEN_INPUT_CHARS, text) != None:
logging.warning(f'Forbidden char on: {base64.urlsafe_b64encode(text.encode()).decode()}')
return "has invalid char"
return ""
# formatter is executed for every log
class LogRequestFormatter(logging.Formatter):
def format(self, record):
try:
# try to get TID from Flask g object, it's set on @app.before_request on each microservice
record.TID = g.TID
except:
try:
record.TID = threading.current_thread().name
except:
record.TID = 'notid'
return super().format(record)
| []
| []
| [
"F7T_SSL_KEY",
"F7T_POLICY_PATH",
"F7T_USE_SSL",
"F7T_AUTH_REQUIRED_SCOPE",
"F7T_REALM_RSA_PUBLIC_KEY",
"F7T_CERTIFICATOR_URL",
"F7T_OPA_USE",
"F7T_SSH_CERTIFICATE_WRAPPER",
"F7T_SSL_CRT",
"F7T_AUTH_ALLOWED_USERS",
"F7T_REALM_RSA_TYPE",
"F7T_TASKS_URL",
"F7T_OPA_URL",
"F7T_AUTH_ROLE",
"F7T_DEBUG_MODE",
"F7T_AUTH_TOKEN_AUD"
]
| [] | ["F7T_SSL_KEY", "F7T_POLICY_PATH", "F7T_USE_SSL", "F7T_AUTH_REQUIRED_SCOPE", "F7T_REALM_RSA_PUBLIC_KEY", "F7T_CERTIFICATOR_URL", "F7T_OPA_USE", "F7T_SSH_CERTIFICATE_WRAPPER", "F7T_SSL_CRT", "F7T_AUTH_ALLOWED_USERS", "F7T_REALM_RSA_TYPE", "F7T_TASKS_URL", "F7T_OPA_URL", "F7T_AUTH_ROLE", "F7T_DEBUG_MODE", "F7T_AUTH_TOKEN_AUD"] | python | 16 | 0 | |
evaluate_3dpw_mine.py | import os
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import cv2
import config
from utils import Mesh
from models import CMR
from models.smpl_from_lib import SMPL
from utils.pose_utils import compute_similarity_transform_batch, \
scale_and_translation_transform_batch
from utils.cam_utils import orthographic_project_torch, undo_keypoint_normalisation
from datasets.my_3dpw_eval_dataset import PW3DEvalDataset
def evaluate_3dpw(model,
eval_dataset,
metrics,
device,
vis_save_path,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000):
eval_dataloader = DataLoader(eval_dataset,
batch_size=1,
shuffle=False,
drop_last=True,
num_workers=num_workers,
pin_memory=pin_memory)
smpl = SMPL(config.SMPL_MODEL_DIR, batch_size=1)
smpl_male = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='male')
smpl_female = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='female')
smpl.to(device)
smpl_male.to(device)
smpl_female.to(device)
J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
J_regressor_batch = J_regressor[None, :].to(device)
if 'pve' in metrics:
pve_smpl_sum = 0.0
pve_graph_sum = 0.0
pve_smpl_per_frame = []
pve_graph_per_frame = []
if 'pve_scale_corrected' in metrics:
pve_scale_corrected_smpl_sum = 0.0
pve_scale_corrected_graph_sum = 0.0
pve_scale_corrected_smpl_per_frame = []
pve_scale_corrected_graph_per_frame = []
if 'pve_pa' in metrics:
pve_pa_smpl_sum = 0.0
pve_pa_graph_sum = 0.0
pve_pa_smpl_per_frame = []
pve_pa_graph_per_frame = []
if 'pve-t' in metrics:
pvet_sum = 0.0
pvet_per_frame = []
if 'pve-t_scale_corrected' in metrics:
pvet_scale_corrected_sum = 0.0
pvet_scale_corrected_per_frame = []
if 'mpjpe' in metrics:
mpjpe_smpl_sum = 0.0
mpjpe_graph_sum = 0.0
mpjpe_smpl_per_frame = []
mpjpe_graph_per_frame = []
if 'mpjpe_scale_corrected' in metrics:
mpjpe_scale_corrected_smpl_sum = 0.0
mpjpe_scale_corrected_graph_sum = 0.0
mpjpe_scale_corrected_smpl_per_frame = []
mpjpe_scale_corrected_graph_per_frame = []
if 'j3d_rec_err' in metrics:
j3d_rec_err_smpl_sum = 0.0
j3d_rec_err_graph_sum = 0.0
j3d_rec_err_smpl_per_frame = []
j3d_rec_err_graph_per_frame = []
if 'pve_2d' in metrics:
pve_2d_smpl_sum = 0.0
pve_2d_graph_sum = 0.0
if 'pve_2d_scale_corrected' in metrics:
pve_2d_scale_corrected_smpl_sum = 0.0
pve_2d_scale_corrected_graph_sum = 0.0
if 'pve_2d_pa' in metrics:
pve_2d_pa_smpl_sum = 0.0
pve_2d_pa_graph_sum = 0.0
num_samples = 0
num_vertices = 6890
num_joints3d = 14
model.eval()
for batch_num, samples_batch in enumerate(tqdm(eval_dataloader)):
# ------------------------------- TARGETS and INPUTS -------------------------------
input = samples_batch['input']
input = input.to(device)
target_pose = samples_batch['pose'].to(device)
target_shape = samples_batch['shape'].to(device)
target_gender = samples_batch['gender'][0]
if target_gender == 'm':
target_smpl_output = smpl_male(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_vertices = target_smpl_output.vertices
target_reposed_smpl_output = smpl_male(betas=target_shape)
target_reposed_vertices = target_reposed_smpl_output.vertices
target_joints_h36m = torch.matmul(J_regressor_batch, target_vertices)
target_joints_h36mlsp = target_joints_h36m[:, config.H36M_TO_J14, :]
elif target_gender == 'f':
target_smpl_output = smpl_female(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_vertices = target_smpl_output.vertices
target_reposed_smpl_output = smpl_female(betas=target_shape)
target_reposed_vertices = target_reposed_smpl_output.vertices
target_joints_h36m = torch.matmul(J_regressor_batch, target_vertices)
target_joints_h36mlsp = target_joints_h36m[:, config.H36M_TO_J14, :]
# ------------------------------- PREDICTIONS -------------------------------
pred_vertices, pred_vertices_smpl, pred_camera, pred_rotmat, pred_betas = model(input)
pred_vertices_projected2d = orthographic_project_torch(pred_vertices, pred_camera)
pred_vertices_projected2d = undo_keypoint_normalisation(pred_vertices_projected2d, input.shape[-1])
pred_vertices_smpl_projected2d = orthographic_project_torch(pred_vertices_smpl, pred_camera)
pred_vertices_smpl_projected2d = undo_keypoint_normalisation(pred_vertices_smpl_projected2d, input.shape[-1])
pred_reposed_smpl_output = smpl(betas=pred_betas)
pred_reposed_vertices = pred_reposed_smpl_output.vertices
pred_joints_h36m = torch.matmul(J_regressor_batch, pred_vertices)
pred_joints_h36mlsp = pred_joints_h36m[:, config.H36M_TO_J14, :]
pred_joints_smpl_h36m = torch.matmul(J_regressor_batch, pred_vertices_smpl)
pred_joints_smpl_h36mlsp = pred_joints_smpl_h36m[:, config.H36M_TO_J14, :]
# Numpy-fying
target_vertices = target_vertices.cpu().detach().numpy()
target_reposed_vertices = target_reposed_vertices.cpu().detach().numpy()
target_joints_h36mlsp = target_joints_h36mlsp.cpu().detach().numpy()
pred_vertices = pred_vertices.cpu().detach().numpy()
pred_vertices_smpl = pred_vertices_smpl.cpu().detach().numpy()
pred_vertices_projected2d = pred_vertices_projected2d.cpu().detach().numpy()
pred_vertices_smpl_projected2d = pred_vertices_smpl_projected2d.cpu().detach().numpy()
pred_reposed_vertices = pred_reposed_vertices.cpu().detach().numpy()
pred_joints_h36mlsp = pred_joints_h36mlsp.cpu().detach().numpy()
pred_joints_smpl_h36mlsp = pred_joints_smpl_h36mlsp.cpu().detach().numpy()
# ------------------------------- METRICS -------------------------------
if 'pve' in metrics:
pve_smpl_batch = np.linalg.norm(pred_vertices_smpl - target_vertices, axis=-1) # (1, 6890)
pve_graph_batch = np.linalg.norm(pred_vertices - target_vertices, axis=-1)
pve_smpl_sum += np.sum(pve_smpl_batch) # scalar
pve_graph_sum += np.sum(pve_graph_batch)
pve_smpl_per_frame.append(np.mean(pve_smpl_batch, axis=-1))
pve_graph_per_frame.append(np.mean(pve_graph_batch, axis=-1))
# Scale and translation correction
if 'pve_scale_corrected' in metrics:
pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl,
target_vertices)
pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices,
target_vertices)
pve_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_sc - target_vertices,
axis=-1) # (1, 6890)
pve_sc_graph_batch = np.linalg.norm(pred_vertices_sc - target_vertices,
axis=-1) # (1, 6890)
pve_scale_corrected_smpl_sum += np.sum(pve_sc_smpl_batch) # scalar
pve_scale_corrected_graph_sum += np.sum(pve_sc_graph_batch) # scalar
pve_scale_corrected_smpl_per_frame.append(np.mean(pve_sc_smpl_batch, axis=-1))
pve_scale_corrected_graph_per_frame.append(np.mean(pve_sc_graph_batch, axis=-1))
# Procrustes analysis
if 'pve_pa' in metrics:
pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices)
pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices)
pve_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_pa - target_vertices, axis=-1) # (1, 6890)
pve_pa_graph_batch = np.linalg.norm(pred_vertices_pa - target_vertices, axis=-1) # (1, 6890)
pve_pa_smpl_sum += np.sum(pve_pa_smpl_batch) # scalar
pve_pa_graph_sum += np.sum(pve_pa_graph_batch) # scalar
pve_pa_smpl_per_frame.append(np.mean(pve_pa_smpl_batch, axis=-1))
pve_pa_graph_per_frame.append(np.mean(pve_pa_graph_batch, axis=-1))
if 'pve-t' in metrics:
pvet_batch = np.linalg.norm(pred_reposed_vertices - target_reposed_vertices, axis=-1)
pvet_sum += np.sum(pvet_batch)
pvet_per_frame.append(np.mean(pvet_batch, axis=-1))
# Scale and translation correction
if 'pve-t_scale_corrected' in metrics:
pred_reposed_vertices_sc = scale_and_translation_transform_batch(pred_reposed_vertices,
target_reposed_vertices)
pvet_scale_corrected_batch = np.linalg.norm(pred_reposed_vertices_sc - target_reposed_vertices,
axis=-1) # (bs, 6890)
pvet_scale_corrected_sum += np.sum(pvet_scale_corrected_batch) # scalar
pvet_scale_corrected_per_frame.append(np.mean(pvet_scale_corrected_batch, axis=-1))
if 'mpjpe' in metrics:
mpjpe_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp - target_joints_h36mlsp, axis=-1) # (bs, 14)
mpjpe_graph_batch = np.linalg.norm(pred_joints_h36mlsp - target_joints_h36mlsp, axis=-1) # (bs, 14)
mpjpe_smpl_sum += np.sum(mpjpe_smpl_batch)
mpjpe_graph_sum += np.sum(mpjpe_graph_batch)
mpjpe_smpl_per_frame.append(np.mean(mpjpe_smpl_batch, axis=-1))
mpjpe_graph_per_frame.append(np.mean(mpjpe_graph_batch, axis=-1))
# Scale and translation correction
if 'mpjpe_scale_corrected' in metrics:
pred_joints_smpl_h36mlsp_sc = scale_and_translation_transform_batch(pred_joints_smpl_h36mlsp,
target_joints_h36mlsp)
pred_joints_h36mlsp_sc = scale_and_translation_transform_batch(pred_joints_h36mlsp,
target_joints_h36mlsp)
mpjpe_scale_corrected_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp_sc - target_joints_h36mlsp,
axis=-1) # (bs, 14)
mpjpe_scale_corrected_graph_batch = np.linalg.norm(pred_joints_h36mlsp_sc - target_joints_h36mlsp,
axis=-1) # (bs, 14)
mpjpe_scale_corrected_smpl_sum += np.sum(mpjpe_scale_corrected_smpl_batch)
mpjpe_scale_corrected_graph_sum += np.sum(mpjpe_scale_corrected_graph_batch)
mpjpe_scale_corrected_smpl_per_frame.append(np.mean(mpjpe_scale_corrected_smpl_batch, axis=-1))
mpjpe_scale_corrected_graph_per_frame.append(np.mean(mpjpe_scale_corrected_graph_batch, axis=-1))
# Procrustes analysis
if 'j3d_rec_err' in metrics:
pred_joints_smpl_h36mlsp_pa = compute_similarity_transform_batch(pred_joints_smpl_h36mlsp,
target_joints_h36mlsp)
pred_joints_h36mlsp_pa = compute_similarity_transform_batch(pred_joints_h36mlsp, target_joints_h36mlsp)
j3d_rec_err_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)
j3d_rec_err_graph_batch = np.linalg.norm(pred_joints_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)
j3d_rec_err_smpl_sum += np.sum(j3d_rec_err_smpl_batch)
j3d_rec_err_graph_sum += np.sum(j3d_rec_err_graph_batch)
j3d_rec_err_smpl_per_frame.append(np.mean(j3d_rec_err_smpl_batch, axis=-1))
j3d_rec_err_graph_per_frame.append(np.mean(j3d_rec_err_graph_batch, axis=-1))
if 'pve_2d' in metrics:
pred_vertices_smpl_2d = pred_vertices_smpl[:, :, :2]
pred_vertices_2d = pred_vertices[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_graph_batch = np.linalg.norm(pred_vertices_2d - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_smpl_sum += np.sum(pve_2d_smpl_batch)
pve_2d_graph_sum += np.sum(pve_2d_graph_batch)
# Scale and translation correction
if 'pve_2d_scale_corrected' in metrics:
pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl,
target_vertices)
pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices,
target_vertices)
pred_vertices_smpl_2d_sc = pred_vertices_smpl_sc[:, :, :2]
pred_vertices_2d_sc = pred_vertices_sc[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d_sc - target_vertices_2d,
axis=-1) # (bs, 6890)
pve_2d_sc_graph_batch = np.linalg.norm(pred_vertices_2d_sc - target_vertices_2d,
axis=-1) # (bs, 6890)
pve_2d_scale_corrected_smpl_sum += np.sum(pve_2d_sc_smpl_batch)
pve_2d_scale_corrected_graph_sum += np.sum(pve_2d_sc_graph_batch)
# Procrustes analysis
if 'pve_2d_pa' in metrics:
pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices)
pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices)
pred_vertices_smpl_2d_pa = pred_vertices_smpl_pa[:, :, :2]
pred_vertices_2d_pa = pred_vertices_pa[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d_pa - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_pa_graph_batch = np.linalg.norm(pred_vertices_2d_pa - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_pa_smpl_sum += np.sum(pve_2d_pa_smpl_batch)
pve_2d_pa_graph_sum += np.sum(pve_2d_pa_graph_batch)
num_samples += target_pose.shape[0]
# ------------------------------- VISUALISE -------------------------------
if vis_every_n_batches is not None:
if batch_num % vis_every_n_batches == 0:
vis_imgs = samples_batch['vis_img'].numpy()
vis_imgs = np.transpose(vis_imgs, [0, 2, 3, 1])
fnames = samples_batch['fname']
plt.figure(figsize=(16, 12))
plt.subplot(341)
plt.imshow(vis_imgs[0])
plt.subplot(342)
plt.imshow(vis_imgs[0])
plt.scatter(pred_vertices_projected2d[0, :, 0], pred_vertices_projected2d[0, :, 1], s=0.1, c='r')
plt.subplot(343)
plt.imshow(vis_imgs[0])
plt.scatter(pred_vertices_smpl_projected2d[0, :, 0], pred_vertices_smpl_projected2d[0, :, 1], s=0.1, c='r')
plt.subplot(345)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices[0, :, 0], pred_vertices[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(346)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_smpl[0, :, 0], pred_vertices_smpl[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(347)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_pa[0, :, 0], pred_vertices_pa[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(348)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_smpl_pa[0, :, 0], pred_vertices_smpl_pa[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(349)
plt.scatter(target_reposed_vertices[0, :, 0], target_reposed_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_reposed_vertices_sc[0, :, 0], pred_reposed_vertices_sc[0, :, 1], s=0.1, c='r')
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 10)
for j in range(num_joints3d):
plt.scatter(pred_joints_h36mlsp[0, j, 0], pred_joints_h36mlsp[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_h36mlsp[0, j, 0], pred_joints_h36mlsp[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 11)
for j in range(num_joints3d):
plt.scatter(pred_joints_h36mlsp_pa[0, j, 0], pred_joints_h36mlsp_pa[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_h36mlsp_pa[0, j, 0], pred_joints_h36mlsp_pa[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 12)
for j in range(num_joints3d):
plt.scatter(pred_joints_smpl_h36mlsp_pa[0, j, 0], pred_joints_smpl_h36mlsp_pa[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_smpl_h36mlsp_pa[0, j, 0], pred_joints_smpl_h36mlsp_pa[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
# plt.show()
save_fig_path = os.path.join(vis_save_path, fnames[0])
plt.savefig(save_fig_path, bbox_inches='tight')
plt.close()
if 'pve' in metrics:
pve_smpl = pve_smpl_sum / (num_samples * num_vertices)
print('PVE SMPL: {:.5f}'.format(pve_smpl))
pve_graph = pve_graph_sum / (num_samples * num_vertices)
print('PVE GRAPH: {:.5f}'.format(pve_graph))
pve_smpl_per_frame = np.concatenate(pve_smpl_per_frame, axis=0)
pve_graph_per_frame = np.concatenate(pve_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_per_frame.npy'), pve_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_graph_per_frame.npy'), pve_graph_per_frame)
if 'pve_scale_corrected' in metrics:
pve_sc_smpl = pve_scale_corrected_smpl_sum / (num_samples * num_vertices)
print('PVE SC SMPL: {:.5f}'.format(pve_sc_smpl))
pve_sc_graph = pve_scale_corrected_graph_sum / (num_samples * num_vertices)
print('PVE SC GRAPH: {:.5f}'.format(pve_sc_graph))
pve_scale_corrected_smpl_per_frame = np.concatenate(pve_scale_corrected_smpl_per_frame, axis=0)
pve_scale_corrected_graph_per_frame = np.concatenate(pve_scale_corrected_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_scale_corrected_per_frame.npy'),
pve_scale_corrected_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_scale_corrected_graph_per_frame.npy'),
pve_scale_corrected_graph_per_frame)
if 'pve_pa' in metrics:
pve_pa_smpl = pve_pa_smpl_sum / (num_samples * num_vertices)
print('PVE PA SMPL: {:.5f}'.format(pve_pa_smpl))
pve_pa_graph = pve_pa_graph_sum / (num_samples * num_vertices)
print('PVE PA GRAPH: {:.5f}'.format(pve_pa_graph))
pve_pa_smpl_per_frame = np.concatenate(pve_pa_smpl_per_frame, axis=0)
pve_pa_graph_per_frame = np.concatenate(pve_pa_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_pa_per_frame.npy'), pve_pa_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_pa_graph_per_frame.npy'), pve_pa_graph_per_frame)
if 'pve-t' in metrics:
pvet = pvet_sum / (num_samples * num_vertices)
print('PVE-T: {:.5f}'.format(pvet))
pvet_per_frame = np.concatenate(pvet_per_frame, axis=0)
np.save(os.path.join(save_path, 'pvet_per_frame.npy'), pvet_per_frame)
if 'pve-t_scale_corrected' in metrics:
pvet_sc = pvet_scale_corrected_sum / (num_samples * num_vertices)
print('PVE-T SC: {:.5f}'.format(pvet_sc))
pvet_scale_corrected_per_frame = np.concatenate(pvet_scale_corrected_per_frame, axis=0)
np.save(os.path.join(save_path, 'pvet_scale_corrected_per_frame.npy'),
pvet_scale_corrected_per_frame)
if 'mpjpe' in metrics:
mpjpe_smpl = mpjpe_smpl_sum / (num_samples * num_joints3d)
print('MPJPE SMPL: {:.5f}'.format(mpjpe_smpl))
mpjpe_graph = mpjpe_graph_sum / (num_samples * num_joints3d)
print('MPJPE GRAPH: {:.5f}'.format(mpjpe_graph))
mpjpe_smpl_per_frame = np.concatenate(mpjpe_smpl_per_frame, axis=0)
mpjpe_graph_per_frame = np.concatenate(mpjpe_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'mpjpe_per_frame.npy'), mpjpe_smpl_per_frame)
np.save(os.path.join(save_path, 'mpjpe_graph_per_frame.npy'), mpjpe_graph_per_frame)
if 'mpjpe_scale_corrected' in metrics:
mpjpe_sc_smpl = mpjpe_scale_corrected_smpl_sum / (num_samples * num_joints3d)
print('MPJPE SC SMPL: {:.5f}'.format(mpjpe_sc_smpl))
mpjpe_sc_graph = mpjpe_scale_corrected_graph_sum / (num_samples * num_joints3d)
print('MPJPE SC GRAPH: {:.5f}'.format(mpjpe_sc_graph))
mpjpe_scale_corrected_smpl_per_frame = np.concatenate(
mpjpe_scale_corrected_smpl_per_frame, axis=0)
mpjpe_scale_corrected_graph_per_frame = np.concatenate(
mpjpe_scale_corrected_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'mpjpe_scale_corrected_per_frame.npy'),
mpjpe_scale_corrected_smpl_per_frame)
np.save(os.path.join(save_path, 'mpjpe_scale_corrected_graph_per_frame.npy'),
mpjpe_scale_corrected_graph_per_frame)
if 'j3d_rec_err' in metrics:
j3d_rec_err_smpl = j3d_rec_err_smpl_sum / (num_samples * num_joints3d)
print('Rec Err SMPL: {:.5f}'.format(j3d_rec_err_smpl))
j3d_rec_err_graph = j3d_rec_err_graph_sum / (num_samples * num_joints3d)
print('Rec Err GRAPH: {:.5f}'.format(j3d_rec_err_graph))
j3d_rec_err_smpl_per_frame = np.concatenate(j3d_rec_err_smpl_per_frame, axis=0)
j3d_rec_err_graph_per_frame = np.concatenate(j3d_rec_err_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'j3d_rec_err_per_frame.npy'),
j3d_rec_err_smpl_per_frame)
np.save(os.path.join(save_path, 'j3d_rec_err_graph_per_frame.npy'),
j3d_rec_err_graph_per_frame)
if 'pve_2d' in metrics:
pve_2d_smpl = pve_2d_smpl_sum / (num_samples * num_vertices)
print('PVE 2D SMPL: {:.5f}'.format(pve_2d_smpl))
pve_2d_graph = pve_2d_graph_sum / (num_samples * num_vertices)
print('PVE 2D GRAPH: {:.5f}'.format(pve_2d_graph))
if 'pve_2d_scale_corrected' in metrics:
pve_2d_sc_smpl = pve_2d_scale_corrected_smpl_sum / (num_samples * num_vertices)
print('PVE 2D SC SMPL: {:.5f}'.format(pve_2d_sc_smpl))
pve_2d_sc_graph = pve_2d_scale_corrected_graph_sum / (num_samples * num_vertices)
print('PVE 2D SC GRAPH: {:.5f}'.format(pve_2d_sc_graph))
if 'pve_2d_pa' in metrics:
pve_2d_pa_smpl = pve_2d_pa_smpl_sum / (num_samples * num_vertices)
print('PVE 2D PA SMPL: {:.5f}'.format(pve_2d_pa_smpl))
pve_2d_pa_graph = pve_2d_pa_graph_sum / (num_samples * num_vertices)
print('PVE 2D PA GRAPH: {:.5f}'.format(pve_2d_pa_graph))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default=None, help='Path to network checkpoint')
parser.add_argument('--gpu', default="0", type=str, help='GPU')
args = parser.parse_args()
# Device
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load model
mesh = Mesh(device=device)
# Our pretrained networks have 5 residual blocks with 256 channels.
# You might want to change this if you use a different architecture.
model = CMR(mesh, 5, 256, pretrained_checkpoint=args.checkpoint, device=device)
model.to(device)
model.eval()
# Setup evaluation dataset
dataset_path = '/scratch2/as2562/datasets/3DPW/test'
dataset = PW3DEvalDataset(dataset_path, img_wh=config.INPUT_RES)
print("Eval examples found:", len(dataset))
# Metrics
metrics = ['pve', 'pve-t', 'pve_pa', 'pve-t_pa', 'mpjpe', 'j3d_rec_err',
'pve_2d', 'pve_2d_pa', 'pve_2d_scale_corrected',
'pve_scale_corrected', 'pve-t_scale_corrected', 'mpjpe_scale_corrected']
save_path = '/data/cvfs/as2562/GraphCMR/evaluations/3dpw'
if not os.path.exists(save_path):
os.makedirs(save_path)
# Run evaluation
evaluate_3dpw(model=model,
eval_dataset=dataset,
metrics=metrics,
device=device,
vis_save_path=save_path,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000)
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
src/prefect/environments/storage/docker.py | import filecmp
import json
import os
import re
import shutil
import sys
import tempfile
import textwrap
import uuid
import warnings
from pathlib import PurePosixPath
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Union
import cloudpickle
import pendulum
from slugify import slugify
import prefect
from prefect.environments.storage import Storage
from prefect.utilities.storage import extract_flow_from_file
if TYPE_CHECKING:
import docker
class Docker(Storage):
"""
Docker storage provides a mechanism for storing Prefect flows in Docker
images and optionally pushing them to a registry.
A user specifies a `registry_url`, `base_image` and other optional
dependencies (e.g., `python_dependencies`) and `build()` will create a
temporary Dockerfile that is used to build the image.
Note that the `base_image` must be capable of `pip` installing. Note that
registry behavior with respect to image names can differ between providers -
for example, Google's GCR registry allows for registry URLs of the form
`gcr.io/my-registry/subdir/my-image-name` whereas DockerHub requires the
registry URL to be separate from the image name.
Custom modules can be packaged up during build by attaching the files and
setting the `PYTHONPATH` to the location of those files. Otherwise the
modules can be set independently when using a custom base image prior to the
build here.
```python
Docker(
files={
# absolute path source -> destination in image
"/Users/me/code/mod1.py": "/modules/mod1.py",
"/Users/me/code/mod2.py": "/modules/mod2.py",
},
env_vars={
# append modules directory to PYTHONPATH
"PYTHONPATH": "$PYTHONPATH:modules/"
},
)
```
Args:
- registry_url (str, optional): URL of a registry to push the image to;
image will not be pushed if not provided
- base_image (str, optional): the base image for this environment (e.g.
`python:3.6`), defaults to the `prefecthq/prefect` image matching your
python version and prefect core library version used at runtime.
- dockerfile (str, optional): a path to a Dockerfile to use in building
this storage; note that, if provided, your present working directory
will be used as the build context
- python_dependencies (List[str], optional): list of pip installable
dependencies for the image
- image_name (str, optional): name of the image to use when building,
populated with a UUID after build
- image_tag (str, optional): tag of the image to use when building,
populated with a UUID after build
- env_vars (dict, optional): a dictionary of environment variables to
use when building
- files (dict, optional): a dictionary of files or directories to copy into
the image when building. Takes the format of `{'src': 'dest'}`
- prefect_version (str, optional): an optional branch, tag, or commit
specifying the version of prefect you want installed into the container;
defaults to the version you are currently using or `"master"` if your
version is ahead of the latest tag
- local_image (bool, optional): an optional flag whether or not to use a
local docker image, if True then a pull will not be attempted
- ignore_healthchecks (bool, optional): if True, the Docker healthchecks
are not added to the Dockerfile. If False (default), healthchecks
are included.
- base_url (str, optional): a URL of a Docker daemon to use when for
Docker related functionality. Defaults to DOCKER_HOST env var if not set
- tls_config (Union[bool, docker.tls.TLSConfig], optional): a TLS configuration to pass
to the Docker client.
[Documentation](https://docker-py.readthedocs.io/en/stable/tls.html#docker.tls.TLSConfig)
- build_kwargs (dict, optional): Additional keyword arguments to pass to Docker's build
step. [Documentation](https://docker-py.readthedocs.io/en/stable/
api.html#docker.api.build.BuildApiMixin.build)
- prefect_directory (str, optional): Path to the directory where prefect configuration/flows
should be stored inside the Docker image. Defaults to `/opt/prefect`.
- path (str, optional): a direct path to the location of the flow file in the Docker image
if `stored_as_script=True`.
- stored_as_script (bool, optional): boolean for specifying if the flow has been stored
as a `.py` file. Defaults to `False`
- extra_dockerfile_commands (list[str], optional): list of Docker build commands
which are injected at the end of generated DockerFile (before the health checks).
Defaults to `None`
- **kwargs (Any, optional): any additional `Storage` initialization options
Raises:
- ValueError: if both `base_image` and `dockerfile` are provided
"""
def __init__(
self,
registry_url: str = None,
base_image: str = None,
dockerfile: str = None,
python_dependencies: List[str] = None,
image_name: str = None,
image_tag: str = None,
env_vars: dict = None,
files: dict = None,
prefect_version: str = None,
local_image: bool = False,
ignore_healthchecks: bool = False,
base_url: str = None,
tls_config: Union[bool, "docker.tls.TLSConfig"] = False,
build_kwargs: dict = None,
prefect_directory: str = "/opt/prefect",
path: str = None,
stored_as_script: bool = False,
extra_dockerfile_commands: List[str] = None,
**kwargs: Any,
) -> None:
self.registry_url = registry_url
if sys.platform == "win32":
default_url = "npipe:////./pipe/docker_engine"
else:
default_url = "unix://var/run/docker.sock"
self.image_name = image_name
self.image_tag = image_tag
self.python_dependencies = python_dependencies or []
self.python_dependencies.append("wheel")
self.prefect_directory = prefect_directory
self.path = path
self.env_vars = env_vars or {}
self.env_vars.setdefault(
"PREFECT__USER_CONFIG_PATH", "{}/config.toml".format(self.prefect_directory)
)
self.files = files or {}
self.flows = dict() # type: Dict[str, str]
self._flows = dict() # type: Dict[str, "prefect.core.flow.Flow"]
self.local_image = local_image
self.installation_commands = [] # type: List[str]
self.ignore_healthchecks = ignore_healthchecks
self.base_url = base_url or os.environ.get("DOCKER_HOST", default_url)
self.tls_config = tls_config
self.build_kwargs = build_kwargs or {}
self.extra_dockerfile_commands = extra_dockerfile_commands
version = prefect.__version__.split("+")
if prefect_version is None:
self.prefect_version = "master" if len(version) > 1 else version[0]
else:
self.prefect_version = prefect_version
if base_image is None and dockerfile is None:
python_version = "{}.{}".format(
sys.version_info.major, sys.version_info.minor
)
if re.match(r"^[0-9]+\.[0-9]+\.[0-9]+$", self.prefect_version) is not None:
self.base_image = "prefecthq/prefect:{}-python{}".format(
self.prefect_version, python_version
)
else:
# create an image from python:*-slim directly
self.base_image = "python:{}-slim".format(python_version)
self.installation_commands.append(
"apt update && apt install -y gcc git && rm -rf /var/lib/apt/lists/*"
)
elif base_image and dockerfile:
raise ValueError(
"Only one of `base_image` and `dockerfile` can be provided."
)
else:
self.base_image = base_image # type: ignore
self.dockerfile = dockerfile
# we should always try to install prefect, unless it is already installed. We can't
# determine this until image build time.
self.installation_commands.append(
f"pip show prefect || "
f"pip install git+https://github.com/PrefectHQ/prefect.git"
f"@{self.prefect_version}#egg=prefect[kubernetes]"
)
not_absolute = [
file_path for file_path in self.files if not os.path.isabs(file_path)
]
if not_absolute:
raise ValueError(
(
"Provided paths {} are not absolute file paths, please provide "
"absolute paths only."
).format(", ".join(not_absolute))
)
super().__init__(stored_as_script=stored_as_script, **kwargs)
def get_env_runner(self, flow_location: str) -> Callable[[Dict[str, str]], None]:
"""
Given a flow_location within this Storage object, returns something with a
`run()` method which accepts the standard runner kwargs and can run the flow.
Args:
- flow_location (str): the location of a flow within this Storage
Returns:
- a runner interface (something with a `run()` method for running the flow)
"""
def runner(env: dict) -> None:
"""
Given a dictionary of environment variables, calls `flow.run()` with these
environment variables set.
"""
image = "{}:{}".format(self.image_name, self.image_tag)
client = self._get_client()
container = client.create_container(image, command="tail -f /dev/null")
client.start(container=container.get("Id"))
python_script = (
f"import cloudpickle; f = open('{flow_location}', 'rb'); "
f"flow = cloudpickle.load(f); f.close(); flow.run()"
)
try:
ee = client.exec_create(
container.get("Id"),
'python -c "{}"'.format(python_script),
environment=env,
)
output = client.exec_start(exec_id=ee, stream=True)
for item in output:
for line in item.decode("utf-8").split("\n"):
if line:
print(line)
finally:
client.stop(container=container.get("Id"))
return runner
def add_flow(self, flow: "prefect.core.flow.Flow") -> str:
"""
Method for adding a new flow to this Storage object.
Args:
- flow (Flow): a Prefect Flow to add
Returns:
- str: the location of the newly added flow in this Storage object
"""
if flow.name in self:
raise ValueError(
'Name conflict: Flow with the name "{}" is already present in this storage.'.format(
flow.name
)
)
flow_path = self.path or "{}/flows/{}.prefect".format(
self.prefect_directory, slugify(flow.name)
)
self.flows[flow.name] = flow_path
self._flows[flow.name] = flow # needed prior to build
return flow_path
def get_flow(self, flow_location: str = None) -> "prefect.core.flow.Flow":
"""
Given a file path within this Docker container, returns the underlying Flow.
Note that this method should only be run _within_ the container itself.
Args:
- flow_location (str, optional): the file path of a flow within this container. Will use
`path` if not provided.
Returns:
- Flow: the requested flow
Raises:
- ValueError: if the flow is not contained in this storage
"""
if flow_location:
if flow_location not in self.flows.values():
raise ValueError("Flow is not contained in this Storage")
elif self.path:
flow_location = self.path
else:
raise ValueError("No flow location provided")
if self.stored_as_script:
return extract_flow_from_file(file_path=flow_location)
with open(flow_location, "rb") as f:
return cloudpickle.load(f)
@property
def name(self) -> str:
"""
Full name of the Docker image.
"""
if None in [self.image_name, self.image_tag]:
raise ValueError(
"Docker storage is missing required fields image_name and image_tag"
)
return "{}:{}".format(
PurePosixPath(self.registry_url or "", self.image_name), # type: ignore
self.image_tag, # type: ignore
)
def __contains__(self, obj: Any) -> bool:
"""
Method for determining whether an object is contained within this storage.
"""
if not isinstance(obj, str):
return False
return obj in self.flows
def build(self, push: bool = True) -> "Storage":
"""
Build the Docker storage object. If image name and tag are not set,
they will be autogenerated.
Args:
- push (bool, optional): Whether or not to push the built Docker image, this
requires the `registry_url` to be set
Returns:
- Docker: a new Docker storage object that contains information about how and
where the flow is stored. Image name and tag are generated during the
build process.
Raises:
- InterruptedError: if either pushing or pulling the image fails
"""
if len(self.flows) != 1:
self.image_name = self.image_name or str(uuid.uuid4())
else:
self.image_name = self.image_name or slugify(list(self.flows.keys())[0])
self.image_tag = self.image_tag or slugify(pendulum.now("utc").isoformat())
self._build_image(push=push)
return self
def _build_image(self, push: bool = True) -> tuple:
"""
Build a Docker image using the docker python library.
Args:
- push (bool, optional): Whether or not to push the built Docker image, this
requires the `registry_url` to be set
Returns:
- tuple: generated UUID strings `image_name`, `image_tag`
Raises:
- ValueError: if the image fails to build
- InterruptedError: if either pushing or pulling the image fails
"""
assert isinstance(self.image_name, str), "Image name must be provided"
assert isinstance(self.image_tag, str), "An image tag must be provided"
# Make temporary directory to hold serialized flow, healthcheck script, and dockerfile
# note that if the user provides a custom dockerfile, we create the temporary directory
# within the current working directory to preserve their build context
with tempfile.TemporaryDirectory(
dir="." if self.dockerfile else None
) as tempdir:
# Build the dockerfile
if self.base_image and not self.local_image:
self.pull_image()
dockerfile_path = self.create_dockerfile_object(directory=tempdir)
client = self._get_client()
# Verify that a registry url has been provided for images that should be pushed
if self.registry_url:
full_name = str(PurePosixPath(self.registry_url, self.image_name))
elif push is True:
warnings.warn(
"This Docker storage object has no `registry_url`, and "
"will not be pushed.",
UserWarning,
stacklevel=2,
)
full_name = self.image_name
else:
full_name = self.image_name
# Use the docker client to build the image
self.logger.info("Building the flow's Docker storage...")
if sys.platform == "win32":
# problem with docker and relative paths only on windows
dockerfile_path = os.path.abspath(dockerfile_path)
output = client.build(
path="." if self.dockerfile else tempdir,
dockerfile=dockerfile_path,
tag="{}:{}".format(full_name, self.image_tag),
forcerm=True,
**self.build_kwargs,
)
self._parse_generator_output(output)
if len(client.images(name="{}:{}".format(full_name, self.image_tag))) == 0:
raise ValueError(
"Your docker image failed to build! Your flow might have "
"failed one of its deployment health checks - please ensure "
"that all necessary files and dependencies have been included."
)
# Push the image if requested
if push and self.registry_url:
self.push_image(full_name, self.image_tag)
# Remove the image locally after being pushed
client.remove_image(
image="{}:{}".format(full_name, self.image_tag), force=True
)
return self.image_name, self.image_tag
########################
# Dockerfile Creation
########################
def create_dockerfile_object(self, directory: str) -> str:
"""
Writes a dockerfile to the provided directory using the specified
arguments on this Docker storage object.
In order for the docker python library to build a container it needs a
Dockerfile that it can use to define the container. This function takes the
specified arguments then writes them to a temporary file called Dockerfile.
*Note*: if `files` are added to this container, they will be copied to this
directory as well.
Args:
- directory (str): A directory where the Dockerfile will be created
Returns:
- str: the absolute file path to the Dockerfile
"""
# Generate single pip install command for python dependencies
pip_installs = "RUN pip install "
if self.python_dependencies:
for dependency in self.python_dependencies:
pip_installs += "{} ".format(dependency)
# Generate ENV variables to load into the image
env_vars = ""
if self.env_vars:
white_space = " " * 20
env_vars = "ENV " + " \\ \n{}".format(white_space).join(
"{k}={v}".format(k=k, v=v) for k, v in self.env_vars.items()
)
# Copy user specified files into the image
copy_files = ""
if self.files:
for src, dest in self.files.items():
fname = os.path.basename(src)
full_fname = os.path.join(directory, fname)
if os.path.exists(full_fname) and filecmp.cmp(src, full_fname) is False:
raise ValueError(
"File {fname} already exists in {directory}".format(
fname=full_fname, directory=directory
)
)
else:
if os.path.isdir(src):
shutil.copytree(
src=src, dst=full_fname, symlinks=False, ignore=None
)
else:
shutil.copy2(src=src, dst=full_fname)
copy_files += "COPY {fname} {dest}\n".format(
fname=full_fname.replace("\\", "/") if self.dockerfile else fname,
dest=dest,
)
# Write all flows to file and load into the image
copy_flows = ""
if not self.stored_as_script:
for flow_name, flow_location in self.flows.items():
clean_name = slugify(flow_name)
flow_path = os.path.join(directory, "{}.flow".format(clean_name))
with open(flow_path, "wb") as f:
cloudpickle.dump(self._flows[flow_name], f)
copy_flows += "COPY {source} {dest}\n".format(
source=flow_path.replace("\\", "/")
if self.dockerfile
else "{}.flow".format(clean_name),
dest=flow_location,
)
else:
if not self.path:
raise ValueError(
"A `path` must be provided to show where flow `.py` file is stored in the image."
)
# Write all extra commands that should be run in the image
installation_commands = ""
for cmd in self.installation_commands:
installation_commands += "RUN {}\n".format(cmd)
# Write final user commands that should be run in the image
final_commands = (
""
if self.extra_dockerfile_commands is None
else str.join("\n", self.extra_dockerfile_commands)
)
# Write a healthcheck script into the image
with open(
os.path.join(os.path.dirname(__file__), "_healthcheck.py"), "r"
) as healthscript:
healthcheck = healthscript.read()
healthcheck_loc = os.path.join(directory, "healthcheck.py")
with open(healthcheck_loc, "w") as health_file:
health_file.write(healthcheck)
if self.dockerfile:
with open(self.dockerfile, "r") as contents:
base_commands = textwrap.indent("\n" + contents.read(), prefix=" " * 16)
else:
base_commands = "FROM {base_image}".format(base_image=self.base_image)
file_contents = textwrap.dedent(
"""\
{base_commands}
RUN pip install pip --upgrade
{installation_commands}
{pip_installs}
RUN mkdir -p {prefect_dir}/
{copy_flows}
COPY {healthcheck_loc} {prefect_dir}/healthcheck.py
{copy_files}
{env_vars}
{final_commands}
""".format(
base_commands=base_commands,
installation_commands=installation_commands,
pip_installs=pip_installs,
copy_flows=copy_flows,
healthcheck_loc=healthcheck_loc.replace("\\", "/")
if self.dockerfile
else "healthcheck.py",
copy_files=copy_files,
env_vars=env_vars,
prefect_dir=self.prefect_directory,
final_commands=final_commands,
)
)
# append the line that runs the healthchecks
# skip over for now if storing flow as file
if not self.ignore_healthchecks:
file_contents += textwrap.dedent(
"""
RUN python {prefect_dir}/healthcheck.py '[{flow_file_paths}]' '{python_version}'
""".format(
flow_file_paths=", ".join(
['"{}"'.format(k) for k in self.flows.values()]
),
python_version=(sys.version_info.major, sys.version_info.minor),
prefect_dir=self.prefect_directory,
)
)
file_contents = "\n".join(line.lstrip() for line in file_contents.split("\n"))
dockerfile_path = os.path.join(directory, "Dockerfile")
with open(dockerfile_path, "w+") as dockerfile:
dockerfile.write(file_contents)
return dockerfile_path
########################
# Docker Utilities
########################
def _get_client(self) -> "docker.APIClient":
# 'import docker' is expensive time-wise, we should do this just-in-time to keep
# the 'import prefect' time low
import docker
return docker.APIClient(
base_url=self.base_url, version="auto", tls=self.tls_config
)
def pull_image(self) -> None:
"""Pull the image specified so it can be built.
In order for the docker python library to use a base image it must be pulled
from either the main docker registry or a separate registry that must be set as
`registry_url` on this class.
Raises:
- InterruptedError: if either pulling the image fails
"""
client = self._get_client()
output = client.pull(self.base_image, stream=True, decode=True)
for line in output:
if line.get("error"):
raise InterruptedError(line.get("error"))
if line.get("progress"):
print(line.get("status"), line.get("progress"), end="\r")
print("")
def push_image(self, image_name: str, image_tag: str) -> None:
"""Push this environment to a registry
Args:
- image_name (str): Name for the image
- image_tag (str): Tag for the image
Raises:
- InterruptedError: if either pushing the image fails
"""
client = self._get_client()
self.logger.info("Pushing image to the registry...")
output = client.push(image_name, tag=image_tag, stream=True, decode=True)
for line in output:
if line.get("error"):
raise InterruptedError(line.get("error"))
if line.get("progress"):
print(line.get("status"), line.get("progress"), end="\r")
print("")
def _parse_generator_output(self, generator: Iterable) -> None:
"""
Parses and writes a Docker command's output to stdout
"""
for item in generator:
item = item.decode("utf-8")
for line in item.split("\n"):
if line:
output = json.loads(line).get("stream") or json.loads(line).get(
"errorDetail", {}
).get("message")
if output and output != "\n":
print(output.strip("\n"))
| []
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | python | 1 | 0 | |
dbConnection/coon2.go | package main
import (
"database/sql"
"fmt"
"log"
"os"
"github.com/go-sql-driver/mysql"
)
var db *sql.DB
type Album struct {
ID int64
Title string
Artist string
Price float32
}
func main() {
// Capture connection properties.
cfg := mysql.Config{
User: os.Getenv("DBUSER"),
Passwd: os.Getenv("DBPASS"),
Net: "tcp",
Addr: "127.0.0.1:3306",
DBName: "latihanGo",
}
// Get a database handle.
var err error
db, err = sql.Open("mysql", cfg.FormatDSN())
if err != nil {
log.Fatal(err)
}
pingErr := db.Ping()
if pingErr != nil {
log.Fatal(pingErr)
}
fmt.Println("Connected!")
albums, err := albumsByArtist("Jon Coltrane")
if err != nil {
log.Fatal(err)
}
fmt.Printf("Albums found: %v\n", albums)
}
func albumsByArtist(name string) ([]Album, error) {
// An albums slice to hold data from returned rows.
var albums []Album
rows, err := db.Query("SELECT * FROM album WHERE artist = ?", name)
if err != nil {
return nil, fmt.Errorf("albumsByArtist %q: %v", name, err)
}
defer rows.Close()
// Loop through rows, using Scan to assign column data to struct fields.
for rows.Next() {
var alb Album
if err := rows.Scan(&alb.ID, &alb.Title, &alb.Artist, &alb.Price); err != nil {
return nil, fmt.Errorf("albumsByArtist %q: %v", name, err)
}
albums = append(albums, alb)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("albumsByArtist %q: %v", name, err)
}
return albums, nil
}
| [
"\"DBUSER\"",
"\"DBPASS\""
]
| []
| [
"DBUSER",
"DBPASS"
]
| [] | ["DBUSER", "DBPASS"] | go | 2 | 0 | |
shared/version/api.go | package version
import (
"os"
"strconv"
)
// APIVersion contains the API base version. Only bumped for backward incompatible changes.
var APIVersion = "1.0"
// APIExtensions is the list of all API extensions in the order they were added.
//
// The following kind of changes come with a new extensions:
//
// - New configuration key
// - New valid values for a configuration key
// - New REST API endpoint
// - New argument inside an existing REST API call
// - New HTTPs authentication mechanisms or protocols
//
// This list is used mainly by the LXD server code, but it's in the shared
// package as well for reference.
var APIExtensions = []string{
"storage_zfs_remove_snapshots",
"container_host_shutdown_timeout",
"container_stop_priority",
"container_syscall_filtering",
"auth_pki",
"container_last_used_at",
"etag",
"patch",
"usb_devices",
"https_allowed_credentials",
"image_compression_algorithm",
"directory_manipulation",
"container_cpu_time",
"storage_zfs_use_refquota",
"storage_lvm_mount_options",
"network",
"profile_usedby",
"container_push",
"container_exec_recording",
"certificate_update",
"container_exec_signal_handling",
"gpu_devices",
"container_image_properties",
"migration_progress",
"id_map",
"network_firewall_filtering",
"network_routes",
"storage",
"file_delete",
"file_append",
"network_dhcp_expiry",
"storage_lvm_vg_rename",
"storage_lvm_thinpool_rename",
"network_vlan",
"image_create_aliases",
"container_stateless_copy",
"container_only_migration",
"storage_zfs_clone_copy",
"unix_device_rename",
"storage_lvm_use_thinpool",
"storage_rsync_bwlimit",
"network_vxlan_interface",
"storage_btrfs_mount_options",
"entity_description",
"image_force_refresh",
"storage_lvm_lv_resizing",
"id_map_base",
"file_symlinks",
"container_push_target",
"network_vlan_physical",
"storage_images_delete",
"container_edit_metadata",
"container_snapshot_stateful_migration",
"storage_driver_ceph",
"storage_ceph_user_name",
"resource_limits",
"storage_volatile_initial_source",
"storage_ceph_force_osd_reuse",
"storage_block_filesystem_btrfs",
"resources",
"kernel_limits",
"storage_api_volume_rename",
"macaroon_authentication",
"network_sriov",
"console",
"restrict_devlxd",
"migration_pre_copy",
"infiniband",
"maas_network",
"devlxd_events",
"proxy",
"network_dhcp_gateway",
"file_get_symlink",
"network_leases",
"unix_device_hotplug",
"storage_api_local_volume_handling",
"operation_description",
"clustering",
"event_lifecycle",
"storage_api_remote_volume_handling",
"nvidia_runtime",
"container_mount_propagation",
"container_backup",
"devlxd_images",
"container_local_cross_pool_handling",
"proxy_unix",
"proxy_udp",
"clustering_join",
"proxy_tcp_udp_multi_port_handling",
"network_state",
"proxy_unix_dac_properties",
"container_protection_delete",
"unix_priv_drop",
"pprof_http",
"proxy_haproxy_protocol",
"network_hwaddr",
"proxy_nat",
"network_nat_order",
"container_full",
"candid_authentication",
"backup_compression",
"candid_config",
"nvidia_runtime_config",
"storage_api_volume_snapshots",
"storage_unmapped",
"projects",
"candid_config_key",
"network_vxlan_ttl",
"container_incremental_copy",
"usb_optional_vendorid",
"snapshot_scheduling",
"container_copy_project",
"clustering_server_address",
"clustering_image_replication",
"container_protection_shift",
"snapshot_expiry",
"container_backup_override_pool",
"snapshot_expiry_creation",
"network_leases_location",
"resources_cpu_socket",
"resources_gpu",
"resources_numa",
"kernel_features",
"id_map_current",
"event_location",
"storage_api_remote_volume_snapshots",
"network_nat_address",
"container_nic_routes",
"rbac",
"cluster_internal_copy",
"seccomp_notify",
"lxc_features",
"container_nic_ipvlan",
"network_vlan_sriov",
"storage_cephfs",
"container_nic_ipfilter",
"resources_v2",
"container_exec_user_group_cwd",
"container_syscall_intercept",
"container_disk_shift",
"storage_shifted",
"resources_infiniband",
"daemon_storage",
"instances",
"image_types",
"resources_disk_sata",
"clustering_roles",
"images_expiry",
"resources_network_firmware",
"backup_compression_algorithm",
"ceph_data_pool_name",
"container_syscall_intercept_mount",
"compression_squashfs",
"container_raw_mount",
"container_nic_routed",
"container_syscall_intercept_mount_fuse",
"container_disk_ceph",
"virtual-machines",
}
// APIExtensionsCount returns the number of available API extensions.
func APIExtensionsCount() int {
count := len(APIExtensions)
// This environment variable is an internal one to force the code
// to believe that we an API extensions version greater than we
// actually have. It's used by integration tests to exercise the
// cluster upgrade process.
artificialBump := os.Getenv("LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS")
if artificialBump != "" {
n, err := strconv.Atoi(artificialBump)
if err == nil {
count += n
}
}
return count
}
| [
"\"LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS\""
]
| []
| [
"LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS"
]
| [] | ["LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS"] | go | 1 | 0 | |
trinity/utils/chains.py | import argparse
import os
from pathlib import Path
from typing import (
Iterable,
Tuple,
Union,
)
from eth_utils import (
decode_hex,
to_dict,
)
from eth_keys import keys
from eth_keys.datatypes import PrivateKey
from eth.chains.mainnet import (
MAINNET_NETWORK_ID,
)
from eth.chains.ropsten import (
ROPSTEN_NETWORK_ID,
)
from p2p.constants import DEFAULT_MAX_PEERS
from trinity.constants import SYNC_LIGHT
from .xdg import (
get_xdg_trinity_root,
)
DEFAULT_DATA_DIRS = {
ROPSTEN_NETWORK_ID: 'ropsten',
MAINNET_NETWORK_ID: 'mainnet',
}
#
# Filesystem path utils
#
def get_local_data_dir(chain_name: str) -> Path:
"""
Returns the base directory path where data for a given chain will be stored.
"""
try:
return Path(os.environ['TRINITY_DATA_DIR'])
except KeyError:
return Path(os.path.join(get_xdg_trinity_root(), chain_name))
def get_data_dir_for_network_id(network_id: int) -> Path:
"""
Returns the data directory for the chain associated with the given network
id. If the network id is unknown, raises a KeyError.
"""
try:
return get_local_data_dir(DEFAULT_DATA_DIRS[network_id])
except KeyError:
raise KeyError("Unknown network id: `{0}`".format(network_id))
LOG_DIRNAME = 'logs'
LOG_FILENAME = 'trinity.log'
def get_logfile_path(data_dir: Path) -> Path:
"""
Return the path to the log file.
"""
return data_dir / LOG_DIRNAME / LOG_FILENAME
NODEKEY_FILENAME = 'nodekey'
def get_nodekey_path(data_dir: Path) -> Path:
"""
Returns the path to the private key used for devp2p connections.
"""
return Path(os.environ.get(
'TRINITY_NODEKEY',
str(data_dir / NODEKEY_FILENAME),
))
DATABASE_SOCKET_FILENAME = 'db.ipc'
def get_database_socket_path(data_dir: Path) -> Path:
"""
Returns the path to the private key used for devp2p connections.
We're still returning 'str' here on ipc-related path because an issue with
multi-processing not being able to interpret 'Path' objects correctly.
"""
return Path(os.environ.get(
'TRINITY_DATABASE_IPC',
data_dir / DATABASE_SOCKET_FILENAME,
))
JSONRPC_SOCKET_FILENAME = 'jsonrpc.ipc'
def get_jsonrpc_socket_path(data_dir: Path) -> Path:
"""
Returns the path to the ipc socket for the JSON-RPC server.
We're still returning 'str' here on ipc-related path because an issue with
multi-processing not being able to interpret 'Path' objects correctly.
"""
return Path(os.environ.get(
'TRINITY_JSONRPC_IPC',
data_dir / JSONRPC_SOCKET_FILENAME,
))
#
# Nodekey loading
#
def load_nodekey(nodekey_path: Path) -> PrivateKey:
with nodekey_path.open('rb') as nodekey_file:
nodekey_raw = nodekey_file.read()
nodekey = keys.PrivateKey(nodekey_raw)
return nodekey
@to_dict
def construct_chain_config_params(
args: argparse.Namespace) -> Iterable[Tuple[str, Union[int, str, Tuple[str, ...]]]]:
"""
Helper function for constructing the kwargs to initialize a ChainConfig object.
"""
yield 'network_id', args.network_id
if args.data_dir is not None:
yield 'data_dir', args.data_dir
if args.nodekey_path and args.nodekey:
raise ValueError("Cannot provide both nodekey_path and nodekey")
elif args.nodekey_path is not None:
yield 'nodekey_path', args.nodekey_path
elif args.nodekey is not None:
yield 'nodekey', decode_hex(args.nodekey)
if args.sync_mode is not None:
yield 'sync_mode', args.sync_mode
if args.max_peers is not None:
yield 'max_peers', args.max_peers
else:
yield 'max_peers', _default_max_peers(args.sync_mode)
if args.port is not None:
yield 'port', args.port
if args.preferred_nodes is None:
yield 'preferred_nodes', tuple()
else:
yield 'preferred_nodes', tuple(args.preferred_nodes)
def _default_max_peers(sync_mode: str) -> int:
if sync_mode == SYNC_LIGHT:
return DEFAULT_MAX_PEERS // 2
else:
return DEFAULT_MAX_PEERS
| []
| []
| [
"TRINITY_JSONRPC_IPC",
"TRINITY_DATA_DIR",
"TRINITY_NODEKEY",
"TRINITY_DATABASE_IPC"
]
| [] | ["TRINITY_JSONRPC_IPC", "TRINITY_DATA_DIR", "TRINITY_NODEKEY", "TRINITY_DATABASE_IPC"] | python | 4 | 0 | |
test/RDFDatabank/rdflib/plugins/parsers/notation3.py | #!/usr/bin/env python
u"""
notation3.py - Standalone Notation3 Parser
Derived from CWM, the Closed World Machine
Authors of the original suite:
* Dan Connolly <@@>
* Tim Berners-Lee <@@>
* Yosi Scharf <@@>
* Joseph M. Reagle Jr. <[email protected]>
* Rich Salz <[email protected]>
http://www.w3.org/2000/10/swap/notation3.py
Copyright 2000-2007, World Wide Web Consortium.
Copyright 2001, MIT.
Copyright 2001, Zolera Systems Inc.
License: W3C Software License
http://www.w3.org/Consortium/Legal/copyright-software
Modified by Sean B. Palmer
Copyright 2007, Sean B. Palmer. \u32E1
Modified to work with rdflib by Gunnar Aastrand Grimnes
Copyright 2010, Gunnar A. Grimnes
"""
# Python standard libraries
import types
import sys
import os
import string
import re
import time
import StringIO
from string import find, rfind
from decimal import Decimal
from rdflib.term import URIRef, BNode, Literal, Variable, _XSD_PFX
from rdflib.graph import QuotedGraph, ConjunctiveGraph
from rdflib.parser import Parser
# Incestuous.. would be nice to separate N3 and XML
# from sax2rdf import XMLtoDOM
def XMLtoDOM(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
# SWAP http://www.w3.org/2000/10/swap
# from diag import verbosity, setVerbosity, progress
def verbosity(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
def setVerbosity(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
def progress(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
def splitFrag(uriref):
"""split a URI reference between the fragment and the rest.
Punctuation is thrown away.
e.g.
>>> splitFrag("abc#def")
('abc', 'def')
>>> splitFrag("abcdef")
('abcdef', None)
"""
i = rfind(uriref, "#")
if i>= 0: return uriref[:i], uriref[i+1:]
else: return uriref, None
def splitFragP(uriref, punct=0):
"""split a URI reference before the fragment
Punctuation is kept.
e.g.
>>> splitFragP("abc#def")
('abc', '#def')
>>> splitFragP("abcdef")
('abcdef', '')
"""
i = rfind(uriref, "#")
if i>= 0: return uriref[:i], uriref[i:]
else: return uriref, ''
def join(here, there):
"""join an absolute URI and URI reference
(non-ascii characters are supported/doctested;
haven't checked the details of the IRI spec though)
here is assumed to be absolute.
there is URI reference.
>>> join('http://example/x/y/z', '../abc')
'http://example/x/abc'
Raise ValueError if there uses relative path
syntax but here has no hierarchical path.
>>> join('mid:foo@example', '../foo')
Traceback (most recent call last):
raise ValueError, here
ValueError: Base <mid:foo@example> has no slash after colon - with relative '../foo'.
>>> join('http://example/x/y/z', '')
'http://example/x/y/z'
>>> join('mid:foo@example', '#foo')
'mid:foo@example#foo'
We grok IRIs
>>> len(u'Andr\\xe9')
5
>>> join('http://example.org/', u'#Andr\\xe9')
u'http://example.org/#Andr\\xe9'
"""
assert(find(here, "#") < 0), "Base may not contain hash: '%s'"% here # caller must splitFrag (why?)
slashl = find(there, '/')
colonl = find(there, ':')
# join(base, 'foo:/') -- absolute
if colonl >= 0 and (slashl < 0 or colonl < slashl):
return there
bcolonl = find(here, ':')
assert(bcolonl >= 0), "Base uri '%s' is not absolute" % here # else it's not absolute
path, frag = splitFragP(there)
if not path: return here + frag
# join('mid:foo@example', '../foo') bzzt
if here[bcolonl+1:bcolonl+2] <> '/':
raise ValueError ("Base <%s> has no slash after colon - with relative '%s'." %(here, there))
if here[bcolonl+1:bcolonl+3] == '//':
bpath = find(here, '/', bcolonl+3)
else:
bpath = bcolonl+1
# join('http://xyz', 'foo')
if bpath < 0:
bpath = len(here)
here = here + '/'
# join('http://xyz/', '//abc') => 'http://abc'
if there[:2] == '//':
return here[:bcolonl+1] + there
# join('http://xyz/', '/abc') => 'http://xyz/abc'
if there[:1] == '/':
return here[:bpath] + there
slashr = rfind(here, '/')
while 1:
if path[:2] == './':
path = path[2:]
if path == '.':
path = ''
elif path[:3] == '../' or path == '..':
path = path[3:]
i = rfind(here, '/', bpath, slashr)
if i >= 0:
here = here[:i+1]
slashr = i
else:
break
return here[:slashr+1] + path + frag
commonHost = re.compile(r'^[-_a-zA-Z0-9.]+:(//[^/]*)?/[^/]*$')
def refTo(base, uri):
"""figure out a relative URI reference from base to uri
>>> refTo('http://example/x/y/z', 'http://example/x/abc')
'../abc'
>>> refTo('file:/ex/x/y', 'file:/ex/x/q/r#s')
'q/r#s'
>>> refTo(None, 'http://ex/x/y')
'http://ex/x/y'
>>> refTo('http://ex/x/y', 'http://ex/x/y')
''
Note the relationship between refTo and join:
join(x, refTo(x, y)) == y
which points out certain strings which cannot be URIs. e.g.
>>> x='http://ex/x/y';y='http://ex/x/q:r';join(x, refTo(x, y)) == y
0
So 'http://ex/x/q:r' is not a URI. Use 'http://ex/x/q%3ar' instead:
>>> x='http://ex/x/y';y='http://ex/x/q%3ar';join(x, refTo(x, y)) == y
1
This one checks that it uses a root-realtive one where that is
all they share. Now uses root-relative where no path is shared.
This is a matter of taste but tends to give more resilience IMHO
-- and shorter paths
Note that base may be None, meaning no base. In some situations, there
just ain't a base. Slife. In these cases, relTo returns the absolute value.
The axiom abs(,rel(b,x))=x still holds.
This saves people having to set the base to "bogus:".
>>> refTo('http://ex/x/y/z', 'http://ex/r')
'/r'
"""
# assert base # don't mask bugs -danc # not a bug. -tim
if not base: return uri
if base == uri: return ""
# Find how many path segments in common
i=0
while i<len(uri) and i<len(base):
if uri[i] == base[i]: i = i + 1
else: break
# print "# relative", base, uri, " same up to ", i
# i point to end of shortest one or first difference
m = commonHost.match(base[:i])
if m:
k=uri.find("//")
if k<0: k=-2 # no host
l=uri.find("/", k+2)
if uri[l+1:l+2] != "/" and base[l+1:l+2] != "/" and uri[:l]==base[:l]:
return uri[l:]
if uri[i:i+1] =="#" and len(base) == i: return uri[i:] # fragment of base
while i>0 and uri[i-1] != '/' : i=i-1 # scan for slash
if i < 3: return uri # No way.
if string.find(base, "//", i-2)>0 \
or string.find(uri, "//", i-2)>0: return uri # An unshared "//"
if string.find(base, ":", i)>0: return uri # An unshared ":"
n = string.count(base, "/", i)
if n == 0 and i<len(uri) and uri[i] == '#':
return "./" + uri[i:]
elif n == 0 and i == len(uri):
return "./"
else:
return ("../" * n) + uri[i:]
def base():
"""The base URI for this process - the Web equiv of cwd
Relative or abolute unix-standard filenames parsed relative to
this yeild the URI of the file.
If we had a reliable way of getting a computer name,
we should put it in the hostname just to prevent ambiguity
"""
# return "file://" + hostname + os.getcwd() + "/"
return "file://" + _fixslash(os.getcwd()) + "/"
def _fixslash(str):
""" Fix windowslike filename to unixlike - (#ifdef WINDOWS)"""
s = str
for i in range(len(s)):
if s[i] == "\\": s = s[:i] + "/" + s[i+1:]
if s[0] != "/" and s[1] == ":": s = s[2:] # @@@ Hack when drive letter present
return s
URI_unreserved = "ABCDEFGHIJJLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~"
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
def canonical(str_in):
"""Convert equivalent URIs (or parts) to the same string
There are many differenet levels of URI canonicalization
which are possible. See http://www.ietf.org/rfc/rfc3986.txt
Done:
- Converfting unicode IRI to utf-8
- Escaping all non-ASCII
- De-escaping, if escaped, ALPHA (%41-%5A and %61-%7A), DIGIT (%30-%39),
hyphen (%2D), period (%2E), underscore (%5F), or tilde (%7E) (Sect 2.4)
- Making all escapes uppercase hexadecimal
Not done:
- Making URI scheme lowercase
- changing /./ or /foo/../ to / with care not to change host part
>>> canonical("foo bar")
'foo%20bar'
>>> canonical(u'http:')
'http:'
>>> canonical('fran%c3%83%c2%a7ois')
'fran%C3%83%C2%A7ois'
>>> canonical('a')
'a'
>>> canonical('%4e')
'N'
>>> canonical('%9d')
'%9D'
>>> canonical('%2f')
'%2F'
>>> canonical('%2F')
'%2F'
"""
if type(str_in) == type(u''):
s8 = str_in.encode('utf-8')
else:
s8 = str_in
s = ''
i = 0
while i < len(s8):
ch = s8[i]; n = ord(ch)
if (n > 126) or (n < 33) : # %-encode controls, SP, DEL, and utf-8
s += "%%%02X" % ord(ch)
elif ch == '%' and i+2 < len(s8):
ch2 = s8[i+1:i+3].decode('hex')
if ch2 in URI_unreserved: s += ch2
else: s += "%%%02X" % ord(ch2)
i = i+3
continue
else:
s += ch
i = i +1
return s
CONTEXT = 0
PRED = 1
SUBJ = 2
OBJ = 3
PARTS = PRED, SUBJ, OBJ
ALL4 = CONTEXT, PRED, SUBJ, OBJ
SYMBOL = 0
FORMULA = 1
LITERAL = 2
LITERAL_DT = 21
LITERAL_LANG = 22
ANONYMOUS = 3
XMLLITERAL = 25
Logic_NS = "http://www.w3.org/2000/10/swap/log#"
NODE_MERGE_URI = Logic_NS + "is" # Pseudo-property indicating node merging
forSomeSym = Logic_NS + "forSome"
forAllSym = Logic_NS + "forAll"
RDF_type_URI = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
RDF_NS_URI = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
OWL_NS = "http://www.w3.org/2002/07/owl#"
DAML_sameAs_URI = OWL_NS+"sameAs"
parsesTo_URI = Logic_NS + "parsesTo"
RDF_spec = "http://www.w3.org/TR/REC-rdf-syntax/"
List_NS = RDF_NS_URI # From 20030808
_Old_Logic_NS = "http://www.w3.org/2000/10/swap/log.n3#"
N3_first = (SYMBOL, List_NS + "first")
N3_rest = (SYMBOL, List_NS + "rest")
N3_li = (SYMBOL, List_NS + "li")
N3_nil = (SYMBOL, List_NS + "nil")
N3_List = (SYMBOL, List_NS + "List")
N3_Empty = (SYMBOL, List_NS + "Empty")
runNamespaceValue = None
def runNamespace():
"Return a URI suitable as a namespace for run-local objects"
# @@@ include hostname (privacy?) (hash it?)
global runNamespaceValue
if runNamespaceValue == None:
try:
runNamespaceValue = os.environ["CWM_RUN_NS"]
except KeyError:
runNamespaceValue = join(
base(), ".run-" + `time.time()` + "p"+ `os.getpid()` +"#")
# was uripath.join, and uripath.base
runNamespaceValue = join(base(), runNamespaceValue) # absolutize
return runNamespaceValue
nextu = 0
def uniqueURI():
"A unique URI"
global nextu
nextu += 1
return runNamespace() + "u_" + `nextu`
class URISyntaxError(ValueError):
"""A parameter is passed to a routine that requires a URI reference"""
pass
tracking = False
chatty_flag = 50
from xml.dom import Node
try:
from xml.ns import XMLNS
except:
class XMLNS:
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
_attrs = lambda E: (E.attributes and E.attributes.values()) or []
_children = lambda E: E.childNodes or []
_IN_XML_NS = lambda n: n.namespaceURI == XMLNS.XML
_inclusive = lambda n: n.unsuppressedPrefixes == None
# Does a document/PI has lesser/greater document order than the
# first element?
_LesserElement, _Element, _GreaterElement = range(3)
def _sorter(n1,n2):
'''_sorter(n1,n2) -> int
Sorting predicate for non-NS attributes.'''
i = cmp(n1.namespaceURI, n2.namespaceURI)
if i: return i
return cmp(n1.localName, n2.localName)
def _sorter_ns(n1,n2):
'''_sorter_ns((n,v),(n,v)) -> int
"(an empty namespace URI is lexicographically least)."'''
if n1[0] == 'xmlns': return -1
if n2[0] == 'xmlns': return 1
return cmp(n1[0], n2[0])
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n=="" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix: return 1
return 0
#_in_subset = lambda subset, node: not subset or node in subset
_in_subset = lambda subset, node: subset is None or node in subset # rich's tweak
class _implementation:
'''Implementation class for C14N. This accompanies a node during it's
processing and includes the parameters and processing state.'''
# Handler for each node type; populated during module instantiation.
handlers = {}
def __init__(self, node, write, **kw):
'''Create and run the implementation.'''
self.write = write
self.subset = kw.get('subset')
self.comments = kw.get('comments', 0)
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
nsdict = kw.get('nsdict', { 'xml': XMLNS.XML, 'xmlns': XMLNS.BASE })
# Processing state.
self.state = (nsdict, {'xml':''}, {}) #0422
if node.nodeType == Node.DOCUMENT_NODE:
self._do_document(node)
elif node.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
if not _inclusive(self):
self._do_element(node)
else:
inherited = self._inherit_context(node)
self._do_element(node, inherited)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
elif node.nodeType == Node.TEXT_NODE:
self._do_text(node)
else:
raise TypeError, str(node)
def _inherit_context(self, node):
'''_inherit_context(self, node) -> list
Scan ancestors of attribute and namespace context. Used only
for single element node canonicalization, not for subset
canonicalization.'''
# Collect the initial list of xml:foo attributes.
xmlattrs = filter(_IN_XML_NS, _attrs(node))
# Walk up and get all xml:XXX attributes we inherit.
inherited, parent = [], node.parentNode
while parent and parent.nodeType == Node.ELEMENT_NODE:
for a in filter(_IN_XML_NS, _attrs(parent)):
n = a.localName
if n not in xmlattrs:
xmlattrs.append(n)
inherited.append(a)
parent = parent.parentNode
return inherited
def _do_document(self, node):
'''_do_document(self, node) -> None
Process a document node. documentOrder holds whether the document
element has been encountered such that PIs/comments can be written
as specified.'''
self.documentOrder = _LesserElement
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
self._do_element(child)
self.documentOrder = _GreaterElement # After document element
elif child.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
self._do_pi(child)
elif child.nodeType == Node.COMMENT_NODE:
self._do_comment(child)
elif child.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(child)
handlers[Node.DOCUMENT_NODE] = _do_document
def _do_text(self, node):
'''_do_text(self, node) -> None
Process a text or CDATA node. Render various special characters
as their C14N entity representations.'''
if not _in_subset(self.subset, node): return
s = string.replace(node.data, "&", "&")
s = string.replace(s, "<", "<")
s = string.replace(s, ">", ">")
s = string.replace(s, "\015", "
")
if s: self.write(s)
handlers[Node.TEXT_NODE] = _do_text
handlers[Node.CDATA_SECTION_NODE] = _do_text
def _do_pi(self, node):
'''_do_pi(self, node) -> None
Process a PI node. Render a leading or trailing #xA if the
document order of the PI is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node): return
W = self.write
if self.documentOrder == _GreaterElement: W('\n')
W('<?')
W(node.nodeName)
s = node.data
if s:
W(' ')
W(s)
W('?>')
if self.documentOrder == _LesserElement: W('\n')
handlers[Node.PROCESSING_INSTRUCTION_NODE] = _do_pi
def _do_comment(self, node):
'''_do_comment(self, node) -> None
Process a comment node. Render a leading or trailing #xA if the
document order of the comment is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node): return
if self.comments:
W = self.write
if self.documentOrder == _GreaterElement: W('\n')
W('<!--')
W(node.data)
W('-->')
if self.documentOrder == _LesserElement: W('\n')
handlers[Node.COMMENT_NODE] = _do_comment
def _do_attr(self, n, value):
''''_do_attr(self, node) -> None
Process an attribute.'''
W = self.write
W(' ')
W(n)
W('="')
s = string.replace(value, "&", "&")
s = string.replace(s, "<", "<")
s = string.replace(s, '"', '"')
s = string.replace(s, '\011', '	')
s = string.replace(s, '\012', '
')
s = string.replace(s, '\015', '
')
W(s)
W('"')
def _do_element(self, node, initial_other_attrs = []):
'''_do_element(self, node, initial_other_attrs = []) -> None
Process an element (and its children).'''
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# ns_local -- NS declarations relevant to this element
# xml_attrs -- Attributes in XML namespace from parent
# xml_attrs_local -- Local attributes in XML namespace.
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1].copy(), self.state[2].copy() #0422
ns_local = ns_parent.copy()
xml_attrs_local = {}
# progress("_do_element node.nodeName=", node.nodeName)
# progress("_do_element node.namespaceURI", node.namespaceURI)
# progress("_do_element node.tocml()", node.toxml())
# Divide attributes into NS, XML, and others.
other_attrs = initial_other_attrs[:]
in_subset = _in_subset(self.subset, node)
for a in _attrs(node):
# progress("\t_do_element a.nodeName=", a.nodeName)
if a.namespaceURI == XMLNS.BASE:
n = a.nodeName
if n == "xmlns:": n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == XMLNS.XML:
if _inclusive(self) or in_subset:
xml_attrs_local[a.nodeName] = a #0426
else:
other_attrs.append(a)
#add local xml:foo attributes to ancestor's xml:foo attributes
xml_attrs.update(xml_attrs_local)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n,v in ns_local.items():
# If default namespace is XMLNS.BASE or empty,
# and if an ancestor was the same
if n == "xmlns" and v in [ XMLNS.BASE, '' ] \
and ns_rendered.get('xmlns') in [ XMLNS.BASE, '', None ]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n in ["xmlns:xml", "xml"] \
and v in [ 'http://www.w3.org/XML/1998/namespace' ]:
continue
# If not previously rendered
# and it's inclusive or utilized
if (n,v) not in ns_rendered.items() \
and (_inclusive(self) or \
_utilized(n, node, other_attrs, self.unsuppressedPrefixes)):
ns_to_render.append((n, v))
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(_sorter_ns)
for n,v in ns_to_render:
self._do_attr(n, v)
ns_rendered[n]=v #0417
# If exclusive or the parent is in the subset, add the local xml attributes
# Else, add all local and ancestor xml attributes
# Sort and render the attributes.
if not _inclusive(self) or _in_subset(self.subset,node.parentNode): #0426
other_attrs.extend(xml_attrs_local.values())
else:
other_attrs.extend(xml_attrs.values())
other_attrs.sort(_sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local, ns_rendered, xml_attrs)
for c in _children(node):
_implementation.handlers[c.nodeType](self, c)
self.state = state
if name: W('</%s>' % name)
handlers[Node.ELEMENT_NODE] = _do_element
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
apply(_implementation, (node, output.write), kw)
else:
s = StringIO.StringIO()
apply(_implementation, (node, s.write), kw)
return s.getvalue()
# end of xmlC14n.py
# from why import BecauseOfData, becauseSubexpression
def BecauseOfData(*args, **kargs):
# print args, kargs
pass
def becauseSubexpression(*args, **kargs):
# print args, kargs
pass
N3_forSome_URI = forSomeSym
N3_forAll_URI = forAllSym
# Magic resources we know about
ADDED_HASH = "#" # Stop where we use this in case we want to remove it!
# This is the hash on namespace URIs
RDF_type = ( SYMBOL , RDF_type_URI )
DAML_sameAs = ( SYMBOL, DAML_sameAs_URI )
LOG_implies_URI = "http://www.w3.org/2000/10/swap/log#implies"
BOOLEAN_DATATYPE = _XSD_PFX + "boolean"
DECIMAL_DATATYPE = _XSD_PFX + "decimal"
DOUBLE_DATATYPE = _XSD_PFX + "double"
FLOAT_DATATYPE = _XSD_PFX + "float"
INTEGER_DATATYPE = _XSD_PFX + "integer"
option_noregen = 0 # If set, do not regenerate genids on output
# @@ I18n - the notname chars need extending for well known unicode non-text
# characters. The XML spec switched to assuming unknown things were name
# characaters.
# _namechars = string.lowercase + string.uppercase + string.digits + '_-'
_notQNameChars = "\t\r\n !\"#$%&'()*.,+/;<=>?@[\\]^`{|}~" # else valid qname :-/
_notNameChars = _notQNameChars + ":" # Assume anything else valid name :-/
_rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
N3CommentCharacter = "#" # For unix script #! compatabilty
########################################## Parse string to sink
#
# Regular expressions:
eol = re.compile(r'[ \t]*(#[^\n]*)?\r?\n') # end of line, poss. w/comment
eof = re.compile(r'[ \t]*(#[^\n]*)?$') # end of file, poss. w/comment
ws = re.compile(r'[ \t]*') # Whitespace not including NL
signed_integer = re.compile(r'[-+]?[0-9]+') # integer
number_syntax = re.compile(r'(?P<integer>[-+]?[0-9]+)(?P<decimal>\.[0-9]+)?(?P<exponent>e[-+]?[0-9]+)?')
digitstring = re.compile(r'[0-9]+') # Unsigned integer
interesting = re.compile(r'[\\\r\n\"]')
langcode = re.compile(r'[a-zA-Z0-9]+(-[a-zA-Z0-9]+)?')
#"
class SinkParser:
def __init__(self, store, openFormula=None, thisDoc="", baseURI=None,
genPrefix = "", flags="",
why=None):
""" note: namespace names should *not* end in #;
the # will get added during qname processing """
self._bindings = {}
self._flags = flags
if thisDoc != "":
assert ':' in thisDoc, "Document URI not absolute: <%s>" % thisDoc
self._bindings[""] = thisDoc + "#" # default
self._store = store
if genPrefix: store.setGenPrefix(genPrefix) # pass it on
self._thisDoc = thisDoc
self.lines = 0 # for error handling
self.startOfLine = 0 # For calculating character number
self._genPrefix = genPrefix
self.keywords = ['a', 'this', 'bind', 'has', 'is', 'of', 'true', 'false' ]
self.keywordsSet = 0 # Then only can others be considerd qnames
self._anonymousNodes = {} # Dict of anon nodes already declared ln: Term
self._variables = {}
self._parentVariables = {}
self._reason = why # Why the parser was asked to parse this
self._reason2 = None # Why these triples
# was: diag.tracking
if tracking: self._reason2 = BecauseOfData(
store.newSymbol(thisDoc), because=self._reason)
if baseURI: self._baseURI = baseURI
else:
if thisDoc:
self._baseURI = thisDoc
else:
self._baseURI = None
assert not self._baseURI or ':' in self._baseURI
if not self._genPrefix:
if self._thisDoc: self._genPrefix = self._thisDoc + "#_g"
else: self._genPrefix = uniqueURI()
if openFormula ==None:
if self._thisDoc:
self._formula = store.newFormula(thisDoc + "#_formula")
else:
self._formula = store.newFormula()
else:
self._formula = openFormula
self._context = self._formula
self._parentContext = None
def here(self, i):
"""String generated from position in file
This is for repeatability when refering people to bnodes in a document.
This has diagnostic uses less formally, as it should point one to which
bnode the arbitrary identifier actually is. It gives the
line and character number of the '[' charcacter or path character
which introduced the blank node. The first blank node is boringly _L1C1.
It used to be used only for tracking, but for tests in general
it makes the canonical ordering of bnodes repeatable."""
return "%s_L%iC%i" % (self._genPrefix , self.lines,
i - self.startOfLine + 1)
def formula(self):
return self._formula
def loadStream(self, stream):
return self.loadBuf(stream.read()) # Not ideal
def loadBuf(self, buf):
"""Parses a buffer and returns its top level formula"""
self.startDoc()
self.feed(buf)
return self.endDoc() # self._formula
def feed(self, octets):
"""Feed an octet stream tothe parser
if BadSyntax is raised, the string
passed in the exception object is the
remainder after any statements have been parsed.
So if there is more data to feed to the
parser, it should be straightforward to recover."""
if not isinstance(octets, unicode):
str = octets.decode('utf-8')
else:
str=octets
i = 0
while i >= 0:
j = self.skipSpace(str, i)
if j<0: return
i = self.directiveOrStatement(str,j)
if i<0:
print "# next char: ", `str[j]`
raise BadSyntax(self._thisDoc, self.lines, str, j,
"expected directive or statement")
def directiveOrStatement(self, str,h):
i = self.skipSpace(str, h)
if i<0: return i # EOF
j = self.directive(str, i)
if j>=0: return self.checkDot(str,j)
j = self.statement(str, i)
if j>=0: return self.checkDot(str,j)
return j
#@@I18N
global _notNameChars
#_namechars = string.lowercase + string.uppercase + string.digits + '_-'
def tok(self, tok, str, i):
"""Check for keyword. Space must have been stripped on entry and
we must not be at end of file."""
assert tok[0] not in _notNameChars # not for punctuation
if str[i:i+1] == "@":
i = i+1
else:
if tok not in self.keywords:
return -1 # No, this has neither keywords declaration nor "@"
if (str[i:i+len(tok)] == tok
and (str[i+len(tok)] in _notQNameChars )):
i = i + len(tok)
return i
else:
return -1
def directive(self, str, i):
j = self.skipSpace(str, i)
if j<0: return j # eof
res = []
j = self.tok('bind', str, i) # implied "#". Obsolete.
if j>0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"keyword bind is obsolete: use @prefix")
j = self.tok('keywords', str, i)
if j>0:
i = self.commaSeparatedList(str, j, res, self.bareWord)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, str, i,
"'@keywords' needs comma separated list of words")
self.setKeywords(res[:])
# was: diag.chatty_flag
if chatty_flag > 80: progress("Keywords ", self.keywords)
return i
j = self.tok('forAll', str, i)
if j > 0:
i = self.commaSeparatedList(str, j, res, self.uri_ref2)
if i <0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"Bad variable list after @forAll")
for x in res:
#self._context.declareUniversal(x)
if x not in self._variables or x in self._parentVariables:
self._variables[x] = self._context.newUniversal(x)
return i
j = self.tok('forSome', str, i)
if j > 0:
i = self. commaSeparatedList(str, j, res, self.uri_ref2)
if i <0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"Bad variable list after @forSome")
for x in res:
self._context.declareExistential(x)
return i
j=self.tok('prefix', str, i) # no implied "#"
if j>=0:
t = []
i = self.qname(str, j, t)
if i<0: raise BadSyntax(self._thisDoc, self.lines, str, j,
"expected qname after @prefix")
j = self.uri_ref2(str, i, t)
if j<0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"expected <uriref> after @prefix _qname_")
ns = self.uriOf(t[1])
if self._baseURI:
ns = join(self._baseURI, ns)
elif ":" not in ns:
raise BadSyntax(self._thisDoc, self.lines, str, j,
"With no base URI, cannot use relative URI in @prefix <"+ns+">")
assert ':' in ns # must be absolute
self._bindings[t[0][0]] = ns
self.bind(t[0][0], hexify(ns))
return j
j=self.tok('base', str, i) # Added 2007/7/7
if j >= 0:
t = []
i = self.uri_ref2(str, j, t)
if i<0: raise BadSyntax(self._thisDoc, self.lines, str, j,
"expected <uri> after @base ")
ns = self.uriOf(t[0])
if self._baseURI:
ns = join(self._baseURI, ns)
else:
raise BadSyntax(self._thisDoc, self.lines, str, j,
"With no previous base URI, cannot use relative URI in @base <"+ns+">")
assert ':' in ns # must be absolute
self._baseURI = ns
return i
return -1 # Not a directive, could be something else.
def bind(self, qn, uri):
assert isinstance(uri,
types.StringType), "Any unicode must be %x-encoded already"
if qn == "":
self._store.setDefaultNamespace(uri)
else:
self._store.bind(qn, uri)
def setKeywords(self, k):
"Takes a list of strings"
if k == None:
self.keywordsSet = 0
else:
self.keywords = k
self.keywordsSet = 1
def startDoc(self):
# was: self._store.startDoc()
self._store.startDoc(self._formula)
def endDoc(self):
"""Signal end of document and stop parsing. returns formula"""
self._store.endDoc(self._formula) # don't canonicalize yet
return self._formula
def makeStatement(self, quadruple):
#$$$$$$$$$$$$$$$$$$$$$
# print "# Parser output: ", `quadruple`
self._store.makeStatement(quadruple, why=self._reason2)
def statement(self, str, i):
r = []
i = self.object(str, i, r) # Allow literal for subject - extends RDF
if i<0: return i
j = self.property_list(str, i, r[0])
if j<0: raise BadSyntax(self._thisDoc, self.lines,
str, i, "expected propertylist")
return j
def subject(self, str, i, res):
return self.item(str, i, res)
def verb(self, str, i, res):
""" has _prop_
is _prop_ of
a
=
_prop_
>- prop ->
<- prop -<
_operator_"""
j = self.skipSpace(str, i)
if j<0:return j # eof
r = []
j = self.tok('has', str, i)
if j>=0:
i = self.prop(str, j, r)
if i < 0: raise BadSyntax(self._thisDoc, self.lines,
str, j, "expected property after 'has'")
res.append(('->', r[0]))
return i
j = self.tok('is', str, i)
if j>=0:
i = self.prop(str, j, r)
if i < 0: raise BadSyntax(self._thisDoc, self.lines, str, j,
"expected <property> after 'is'")
j = self.skipSpace(str, i)
if j<0:
raise BadSyntax(self._thisDoc, self.lines, str, i,
"End of file found, expected property after 'is'")
return j # eof
i=j
j = self.tok('of', str, i)
if j<0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"expected 'of' after 'is' <prop>")
res.append(('<-', r[0]))
return j
j = self.tok('a', str, i)
if j>=0:
res.append(('->', RDF_type))
return j
if str[i:i+2] == "<=":
res.append(('<-', self._store.newSymbol(Logic_NS+"implies")))
return i+2
if str[i:i+1] == "=":
if str[i+1:i+2] == ">":
res.append(('->', self._store.newSymbol(Logic_NS+"implies")))
return i+2
res.append(('->', DAML_sameAs))
return i+1
if str[i:i+2] == ":=":
# patch file relates two formulae, uses this @@ really?
res.append(('->', Logic_NS+"becomes"))
return i+2
j = self.prop(str, i, r)
if j >= 0:
res.append(('->', r[0]))
return j
if str[i:i+2] == ">-" or str[i:i+2] == "<-":
raise BadSyntax(self._thisDoc, self.lines, str, j,
">- ... -> syntax is obsolete.")
return -1
def prop(self, str, i, res):
return self.item(str, i, res)
def item(self, str, i, res):
return self.path(str, i, res)
def blankNode(self, uri=None):
if "B" not in self._flags:
return self._context.newBlankNode(uri, why=self._reason2)
x = self._context.newSymbol(uri)
self._context.declareExistential(x)
return x
def path(self, str, i, res):
"""Parse the path production.
"""
j = self.nodeOrLiteral(str, i, res)
if j<0: return j # nope
while str[j:j+1] in "!^.": # no spaces, must follow exactly (?)
ch = str[j:j+1] # @@ Allow "." followed IMMEDIATELY by a node.
if ch == ".":
ahead = str[j+1:j+2]
if not ahead or (ahead in _notNameChars
and ahead not in ":?<[{("): break
subj = res.pop()
obj = self.blankNode(uri=self.here(j))
j = self.node(str, j+1, res)
if j<0: raise BadSyntax(self._thisDoc, self.lines, str, j,
"EOF found in middle of path syntax")
pred = res.pop()
if ch == "^": # Reverse traverse
self.makeStatement((self._context, pred, obj, subj))
else:
self.makeStatement((self._context, pred, subj, obj))
res.append(obj)
return j
def anonymousNode(self, ln):
"""Remember or generate a term for one of these _: anonymous nodes"""
term = self._anonymousNodes.get(ln, None)
if term != None: return term
term = self._store.newBlankNode(self._context, why=self._reason2)
self._anonymousNodes[ln] = term
return term
def node(self, str, i, res, subjectAlready=None):
"""Parse the <node> production.
Space is now skipped once at the beginning
instead of in multipe calls to self.skipSpace().
"""
subj = subjectAlready
j = self.skipSpace(str,i)
if j<0: return j #eof
i=j
ch = str[i:i+1] # Quick 1-character checks first:
if ch == "[":
bnodeID = self.here(i)
j=self.skipSpace(str,i+1)
if j<0: raise BadSyntax(self._thisDoc,
self.lines, str, i, "EOF after '['")
if str[j:j+1] == "=": # Hack for "is" binding name to anon node
i = j+1
objs = []
j = self.objectList(str, i, objs);
if j>=0:
subj = objs[0]
if len(objs)>1:
for obj in objs:
self.makeStatement((self._context,
DAML_sameAs, subj, obj))
j = self.skipSpace(str, j)
if j<0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"EOF when objectList expected after [ = ")
if str[j:j+1] == ";":
j=j+1
else:
raise BadSyntax(self._thisDoc, self.lines, str, i,
"objectList expected after [= ")
if subj is None:
subj=self.blankNode(uri= bnodeID)
i = self.property_list(str, j, subj)
if i<0: raise BadSyntax(self._thisDoc, self.lines, str, j,
"property_list expected")
j = self.skipSpace(str, i)
if j<0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"EOF when ']' expected after [ <propertyList>")
if str[j:j+1] != "]":
raise BadSyntax(self._thisDoc,
self.lines, str, j, "']' expected")
res.append(subj)
return j+1
if ch == "{":
ch2 = str[i+1:i+2]
if ch2 == '$':
i += 1
j = i + 1
List = []
first_run = True
while 1:
i = self.skipSpace(str, j)
if i<0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"needed '$}', found end.")
if str[i:i+2] == '$}':
j = i+2
break
if not first_run:
if str[i:i+1] == ',':
i+=1
else:
raise BadSyntax(self._thisDoc, self.lines,
str, i, "expected: ','")
else: first_run = False
item = []
j = self.item(str,i, item) #@@@@@ should be path, was object
if j<0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"expected item in set or '$}'")
List.append(self._store.intern(item[0]))
res.append(self._store.newSet(List, self._context))
return j
else:
j=i+1
oldParentContext = self._parentContext
self._parentContext = self._context
parentAnonymousNodes = self._anonymousNodes
grandParentVariables = self._parentVariables
self._parentVariables = self._variables
self._anonymousNodes = {}
self._variables = self._variables.copy()
reason2 = self._reason2
self._reason2 = becauseSubexpression
if subj is None: subj = self._store.newFormula()
self._context = subj
while 1:
i = self.skipSpace(str, j)
if i<0: raise BadSyntax(self._thisDoc, self.lines,
str, i, "needed '}', found end.")
if str[i:i+1] == "}":
j = i+1
break
j = self.directiveOrStatement(str,i)
if j<0: raise BadSyntax(self._thisDoc, self.lines,
str, i, "expected statement or '}'")
self._anonymousNodes = parentAnonymousNodes
self._variables = self._parentVariables
self._parentVariables = grandParentVariables
self._context = self._parentContext
self._reason2 = reason2
self._parentContext = oldParentContext
res.append(subj.close()) # No use until closed
return j
if ch == "(":
thing_type = self._store.newList
ch2 = str[i+1:i+2]
if ch2 == '$':
thing_type = self._store.newSet
i += 1
j=i+1
List = []
while 1:
i = self.skipSpace(str, j)
if i<0: raise BadSyntax(self._thisDoc, self.lines,
str, i, "needed ')', found end.")
if str[i:i+1] == ')':
j = i+1
break
item = []
j = self.item(str,i, item) #@@@@@ should be path, was object
if j<0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"expected item in list or ')'")
List.append(self._store.intern(item[0]))
res.append(thing_type(List, self._context))
return j
j = self.tok('this', str, i) # This context
if j>=0:
raise BadSyntax(self._thisDoc, self.lines, str, i,
"Keyword 'this' was ancient N3. Now use @forSome and @forAll keywords.")
res.append(self._context)
return j
#booleans
j = self.tok('true', str, i)
if j>=0:
res.append(True)
return j
j = self.tok('false', str, i)
if j>=0:
res.append(False)
return j
if subj is None: # If this can be a named node, then check for a name.
j = self.uri_ref2(str, i, res)
if j >= 0:
return j
return -1
def property_list(self, str, i, subj):
"""Parse property list
Leaves the terminating punctuation in the buffer
"""
while 1:
j = self.skipSpace(str, i)
if j<0:
raise BadSyntax(self._thisDoc, self.lines, str, i,
"EOF found when expected verb in property list")
return j #eof
if str[j:j+2] ==":-":
i = j + 2
res = []
j = self.node(str, i, res, subj)
if j<0: raise BadSyntax(self._thisDoc, self.lines, str, i,
"bad {} or () or [] node after :- ")
i=j
continue
i=j
v = []
j = self.verb(str, i, v)
if j<=0:
return i # void but valid
objs = []
i = self.objectList(str, j, objs)
if i<0: raise BadSyntax(self._thisDoc, self.lines, str, j,
"objectList expected")
for obj in objs:
dir, sym = v[0]
if dir == '->':
self.makeStatement((self._context, sym, subj, obj))
else:
self.makeStatement((self._context, sym, obj, subj))
j = self.skipSpace(str, i)
if j<0:
raise BadSyntax(self._thisDoc, self.lines, str, j,
"EOF found in list of objects")
return j #eof
if str[i:i+1] != ";":
return i
i = i+1 # skip semicolon and continue
def commaSeparatedList(self, str, j, res, what):
"""return value: -1 bad syntax; >1 new position in str
res has things found appended
"""
i = self.skipSpace(str, j)
if i<0:
raise BadSyntax(self._thisDoc, self.lines, str, i,
"EOF found expecting comma sep list")
return i
if str[i] == ".": return j # empty list is OK
i = what(str, i, res)
if i<0: return -1
while 1:
j = self.skipSpace(str, i)
if j<0: return j # eof
ch = str[j:j+1]
if ch != ",":
if ch != ".":
return -1
return j # Found but not swallowed "."
i = what(str, j+1, res)
if i<0:
raise BadSyntax(self._thisDoc, self.lines, str, i,
"bad list content")
return i
def objectList(self, str, i, res):
i = self.object(str, i, res)
if i<0: return -1
while 1:
j = self.skipSpace(str, i)
if j<0:
raise BadSyntax(self._thisDoc, self.lines, str, j,
"EOF found after object")
return j #eof
if str[j:j+1] != ",":
return j # Found something else!
i = self.object(str, j+1, res)
if i<0: return i
def checkDot(self, str, i):
j = self.skipSpace(str, i)
if j<0: return j #eof
if str[j:j+1] == ".":
return j+1 # skip
if str[j:j+1] == "}":
return j # don't skip it
if str[j:j+1] == "]":
return j
raise BadSyntax(self._thisDoc, self.lines,
str, j, "expected '.' or '}' or ']' at end of statement")
return i
def uri_ref2(self, str, i, res):
"""Generate uri from n3 representation.
Note that the RDF convention of directly concatenating
NS and local name is now used though I prefer inserting a '#'
to make the namesapces look more like what XML folks expect.
"""
qn = []
j = self.qname(str, i, qn)
if j>=0:
pfx, ln = qn[0]
if pfx is None:
assert 0, "not used?"
ns = self._baseURI + ADDED_HASH
else:
try:
ns = self._bindings[pfx]
except KeyError:
if pfx == "_": # Magic prefix 2001/05/30, can be overridden
res.append(self.anonymousNode(ln))
return j
raise BadSyntax(self._thisDoc, self.lines, str, i,
"Prefix \"%s:\" not bound" % (pfx))
symb = self._store.newSymbol(ns + ln)
if symb in self._variables:
res.append(self._variables[symb])
else:
res.append(symb) # @@@ "#" CONVENTION
if not string.find(ns, "#"):progress(
"Warning: no # on namespace %s," % ns)
return j
i = self.skipSpace(str, i)
if i<0: return -1
if str[i] == "?":
v = []
j = self.variable(str,i,v)
if j>0: #Forget varibles as a class, only in context.
res.append(v[0])
return j
return -1
elif str[i]=="<":
i = i + 1
st = i
while i < len(str):
if str[i] == ">":
uref = str[st:i] # the join should dealt with "":
if self._baseURI:
uref = join(self._baseURI, uref) # was: uripath.join
else:
assert ":" in uref, \
"With no base URI, cannot deal with relative URIs"
if str[i-1:i]=="#" and not uref[-1:]=="#":
uref = uref + "#" # She meant it! Weirdness in urlparse?
symb = self._store.newSymbol(uref)
if symb in self._variables:
res.append(self._variables[symb])
else:
res.append(symb)
return i+1
i = i + 1
raise BadSyntax(self._thisDoc, self.lines, str, j,
"unterminated URI reference")
elif self.keywordsSet:
v = []
j = self.bareWord(str,i,v)
if j<0: return -1 #Forget varibles as a class, only in context.
if v[0] in self.keywords:
raise BadSyntax(self._thisDoc, self.lines, str, i,
'Keyword "%s" not allowed here.' % v[0])
res.append(self._store.newSymbol(self._bindings[""]+v[0]))
return j
else:
return -1
def skipSpace(self, str, i):
"""Skip white space, newlines and comments.
return -1 if EOF, else position of first non-ws character"""
while 1:
m = eol.match(str, i)
if m == None: break
self.lines = self.lines + 1
i = m.end() # Point to first character unmatched
self.startOfLine = i
m = ws.match(str, i)
if m != None:
i = m.end()
m = eof.match(str, i)
if m != None: return -1
return i
def variable(self, str, i, res):
""" ?abc -> variable(:abc)
"""
j = self.skipSpace(str, i)
if j<0: return -1
if str[j:j+1] != "?": return -1
j=j+1
i = j
if str[j] in "0123456789-":
raise BadSyntax(self._thisDoc, self.lines, str, j,
"Varible name can't start with '%s'" % str[j])
return -1
while i <len(str) and str[i] not in _notNameChars:
i = i+1
if self._parentContext == None:
varURI = self._store.newSymbol(self._baseURI + "#" +str[j:i])
if varURI not in self._variables:
self._variables[varURI] = self._context.newUniversal(varURI
, why=self._reason2)
res.append(self._variables[varURI])
return i
# @@ was:
# raise BadSyntax(self._thisDoc, self.lines, str, j,
# "Can't use ?xxx syntax for variable in outermost level: %s"
# % str[j-1:i])
varURI = self._store.newSymbol(self._baseURI + "#" +str[j:i])
if varURI not in self._parentVariables:
self._parentVariables[varURI] = self._parentContext.newUniversal(varURI
, why=self._reason2)
res.append(self._parentVariables[varURI])
return i
def bareWord(self, str, i, res):
""" abc -> :abc
"""
j = self.skipSpace(str, i)
if j<0: return -1
if str[j] in "0123456789-" or str[j] in _notNameChars: return -1
i = j
while i <len(str) and str[i] not in _notNameChars:
i = i+1
res.append(str[j:i])
return i
def qname(self, str, i, res):
"""
xyz:def -> ('xyz', 'def')
If not in keywords and keywordsSet: def -> ('', 'def')
:def -> ('', 'def')
"""
i = self.skipSpace(str, i)
if i<0: return -1
c = str[i]
if c in "0123456789-+": return -1
if c not in _notNameChars:
ln = c
i = i + 1
while i < len(str):
c = str[i]
if c not in _notNameChars:
ln = ln + c
i = i + 1
else: break
else: # First character is non-alpha
ln = '' # Was: None - TBL (why? useful?)
if i<len(str) and str[i] == ':':
pfx = ln
i = i + 1
ln = ''
while i < len(str):
c = str[i]
if c not in _notNameChars:
ln = ln + c
i = i + 1
else: break
res.append((pfx, ln))
return i
else: # delimiter was not ":"
if ln and self.keywordsSet and ln not in self.keywords:
res.append(('', ln))
return i
return -1
def object(self, str, i, res):
j = self.subject(str, i, res)
if j>= 0:
return j
else:
j = self.skipSpace(str, i)
if j<0: return -1
else: i=j
if str[i]=='"':
if str[i:i+3] == '"""': delim = '"""'
else: delim = '"'
i = i + len(delim)
j, s = self.strconst(str, i, delim)
res.append(self._store.newLiteral(s))
progress("New string const ", s, j)
return j
else:
return -1
def nodeOrLiteral(self, str, i, res):
j = self.node(str, i, res)
startline = self.lines # Remember where for error messages
if j>= 0:
return j
else:
j = self.skipSpace(str, i)
if j<0: return -1
else: i=j
ch = str[i]
if ch in "-+0987654321":
m = number_syntax.match(str, i)
if m == None:
raise BadSyntax(self._thisDoc, self.lines, str, i,
"Bad number syntax")
j = m.end()
if m.group('exponent') != None: # includes decimal exponent
res.append(float(str[i:j]))
# res.append(self._store.newLiteral(str[i:j],
# self._store.newSymbol(FLOAT_DATATYPE)))
elif m.group('decimal') != None:
res.append(Decimal(str[i:j]))
else:
res.append(long(str[i:j]))
# res.append(self._store.newLiteral(str[i:j],
# self._store.newSymbol(INTEGER_DATATYPE)))
return j
if str[i]=='"':
if str[i:i+3] == '"""': delim = '"""'
else: delim = '"'
i = i + len(delim)
dt = None
j, s = self.strconst(str, i, delim)
lang = None
if str[j:j+1] == "@": # Language?
m = langcode.match(str, j+1)
if m == None:
raise BadSyntax(self._thisDoc, startline, str, i,
"Bad language code syntax on string literal, after @")
i = m.end()
lang = str[j+1:i]
j = i
if str[j:j+2] == "^^":
res2 = []
j = self.uri_ref2(str, j+2, res2) # Read datatype URI
dt = res2[0]
# if dt.uriref() == "http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral":
if dt == "http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral":
try:
dom = XMLtoDOM('<rdf:envelope xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns">'
+ s
+ '</rdf:envelope>').firstChild
except:
raise ValueError('s="%s"' % s)
res.append(self._store.newXMLLiteral(dom))
return j
res.append(self._store.newLiteral(s, dt, lang))
return j
else:
return -1
def uriOf(self, sym):
if isinstance(sym, types.TupleType):
return sym[1] # old system for --pipe
# return sym.uriref() # cwm api
return sym
def strconst(self, str, i, delim):
"""parse an N3 string constant delimited by delim.
return index, val
"""
j = i
ustr = u"" # Empty unicode string
startline = self.lines # Remember where for error messages
while j<len(str):
i = j + len(delim)
if str[j:i] == delim: # done.
return i, ustr
if str[j] == '"':
ustr = ustr + '"'
j = j + 1
continue
m = interesting.search(str, j) # was str[j:].
# Note for pos param to work, MUST be compiled ... re bug?
assert m , "Quote expected in string at ^ in %s^%s" %(
str[j-20:j], str[j:j+20]) # we at least have to find a quote
i = m.start()
try:
ustr = ustr + str[j:i]
except UnicodeError:
err = ""
for c in str[j:i]:
err = err + (" %02x" % ord(c))
streason = sys.exc_info()[1].__str__()
raise BadSyntax(self._thisDoc, startline, str, j,
"Unicode error appending characters %s to string, because\n\t%s"
% (err, streason))
# print "@@@ i = ",i, " j=",j, "m.end=", m.end()
ch = str[i]
if ch == '"':
j = i
continue
elif ch == "\r": # Strip carriage returns
j = i+1
continue
elif ch == "\n":
if delim == '"':
raise BadSyntax(self._thisDoc, startline, str, i,
"newline found in string literal")
self.lines = self.lines + 1
ustr = ustr + ch
j = i + 1
self.startOfLine = j
elif ch == "\\":
j = i + 1
ch = str[j:j+1] # Will be empty if string ends
if not ch:
raise BadSyntax(self._thisDoc, startline, str, i,
"unterminated string literal (2)")
k = string.find('abfrtvn\\"', ch)
if k >= 0:
uch = '\a\b\f\r\t\v\n\\"'[k]
ustr = ustr + uch
j = j + 1
elif ch == "u":
j, ch = self.uEscape(str, j+1, startline)
ustr = ustr + ch
elif ch == "U":
j, ch = self.UEscape(str, j+1, startline)
ustr = ustr + ch
else:
raise BadSyntax(self._thisDoc, self.lines, str, i,
"bad escape")
raise BadSyntax(self._thisDoc, self.lines, str, i,
"unterminated string literal")
def uEscape(self, str, i, startline):
j = i
count = 0
value = 0
while count < 4: # Get 4 more characters
ch = str[j:j+1].lower()
# sbp http://ilrt.org/discovery/chatlogs/rdfig/2002-07-05
j = j + 1
if ch == "":
raise BadSyntax(self._thisDoc, startline, str, i,
"unterminated string literal(3)")
k = string.find("0123456789abcdef", ch)
if k < 0:
raise BadSyntax(self._thisDoc, startline, str, i,
"bad string literal hex escape")
value = value * 16 + k
count = count + 1
uch = unichr(value)
return j, uch
def UEscape(self, str, i, startline):
stringType = type('')
j = i
count = 0
value = '\\U'
while count < 8: # Get 8 more characters
ch = str[j:j+1].lower()
# sbp http://ilrt.org/discovery/chatlogs/rdfig/2002-07-05
j = j + 1
if ch == "":
raise BadSyntax(self._thisDoc, startline, str, i,
"unterminated string literal(3)")
k = string.find("0123456789abcdef", ch)
if k < 0:
raise BadSyntax(self._thisDoc, startline, str, i,
"bad string literal hex escape")
value = value + ch
count = count + 1
uch = stringType(value).decode('unicode-escape')
return j, uch
wide_build = True
try:
unichr(0x10000)
except ValueError:
wide_build = False
# If we are going to do operators then they should generate
# [ is operator:plus of ( \1 \2 ) ]
class BadSyntax(SyntaxError):
def __init__(self, uri, lines, str, i, why):
self._str = str.encode('utf-8') # Better go back to strings for errors
self._i = i
self._why = why
self.lines = lines
self._uri = uri
def __str__(self):
str = self._str
i = self._i
st = 0
if i>60:
pre="..."
st = i - 60
else: pre=""
if len(str)-i > 60: post="..."
else: post=""
return 'at line %i of <%s>:\nBad syntax (%s) at ^ in:\n"%s%s^%s%s"' \
% (self.lines +1, self._uri, self._why, pre,
str[st:i], str[i:i+60], post)
def stripCR(str):
res = ""
for ch in str:
if ch != "\r":
res = res + ch
return res
def dummyWrite(x):
pass
################################################################################
def toBool(s):
if s == 'true' or s == 'True' or s == '1':
return True
if s == 'false' or s == 'False' or s == '0':
return False
raise ValueError(s)
class Formula(object):
number = 0
def __init__(self, parent):
self.counter = 0
Formula.number += 1
self.number = Formula.number
self.existentials = {}
self.universals = {}
self.quotedgraph=QuotedGraph(store=parent.store, identifier=self.id())
def __str__(self):
return '_:Formula%s' % self.number
def id(self):
return BNode('_:Formula%s' % self.number)
def newBlankNode(self, uri=None, why=None):
if uri is None:
self.counter += 1
b = BNode('f%sb%s' % (id(self), self.counter))
else: b = BNode(uri.split('#').pop().replace('_', 'b'))
return b
def newUniversal(self, uri, why=None):
return Variable(uri.split('#').pop())
def declareExistential(self, x):
self.existentials[x] = self.newBlankNode()
def close(self):
return self.quotedgraph
r_hibyte = re.compile(r'([\x80-\xff])')
def iri(uri):
return uri.decode('utf-8')
# return unicode(r_hibyte.sub(lambda m: '%%%02X' % ord(m.group(1)), uri))
class RDFSink(object):
def __init__(self, graph):
self.rootFormula = None
self.counter = 0
self.graph=graph
def newFormula(self):
assert self.graph.store.formula_aware
f = Formula(self.graph)
return f
def newSymbol(self, *args):
uri = args[0].encode('utf-8')
return URIRef(iri(uri))
def newBlankNode(self, arg=None, **kargs):
if isinstance(arg, Formula):
return arg.newBlankNode()
elif arg is None:
self.counter += 1
b = BNode('n' + str(self.counter))
else: b = BNode(str(arg[0]).split('#').pop().replace('_', 'b'))
return b
def newLiteral(self, s, dt, lang):
if dt: return Literal(s, datatype=dt)
else: return Literal(s, lang=lang)
def newList(self, n, f):
if not n:
return self.newSymbol(
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil'
)
a = self.newBlankNode(f)
first = self.newSymbol(
'http://www.w3.org/1999/02/22-rdf-syntax-ns#first'
)
rest = self.newSymbol('http://www.w3.org/1999/02/22-rdf-syntax-ns#rest')
self.makeStatement((f, first, a, n[0]))
self.makeStatement((f, rest, a, self.newList(n[1:], f)))
return a
def newSet(self, *args):
return set(args)
def setDefaultNamespace(self, *args):
return ':'.join(repr(n) for n in args)
def makeStatement(self, quadruple, why=None):
f, p, s, o = quadruple
if hasattr(p, 'formula'):
raise Exception("Formula used as predicate")
s = self.normalise(f, s)
p = self.normalise(f, p)
o = self.normalise(f, o)
if f == self.rootFormula:
# print s, p, o, '.'
self.graph.add((s, p, o))
else:
f.quotedgraph.add((s,p,o))
#return str(quadruple)
def normalise(self, f, n):
if isinstance(n, tuple):
return URIRef(unicode(n[1]))
# if isinstance(n, list):
# rdflist, f = n
# name = self.newBlankNode()
# if f == self.rootFormula:
# sublist = name
# for i in xrange(0, len(rdflist) - 1):
# print sublist, 'first', rdflist[i]
# rest = self.newBlankNode()
# print sublist, 'rest', rest
# sublist = rest
# print sublist, 'first', rdflist[-1]
# print sublist, 'rest', 'nil'
# return name
if isinstance(n, bool):
s = Literal(str(n).lower(), datatype=BOOLEAN_DATATYPE)
return s
if isinstance(n, int) or isinstance(n, long):
s = Literal(unicode(n), datatype=INTEGER_DATATYPE)
return s
if isinstance(n, Decimal):
value = str(n.normalize())
if value == '-0':
value = '0'
s = Literal(value, datatype=DECIMAL_DATATYPE )
return s
if isinstance(n, float):
s = Literal(str(n), datatype=DOUBLE_DATATYPE )
return s
if f.existentials.has_key(n):
return f.existentials[n]
# if isinstance(n, Var):
# if f.universals.has_key(n):
# return f.universals[n]
# f.universals[n] = f.newBlankNode()
# return f.universals[n]
return n
def intern(self, something):
return something
def bind(self, pfx, uri):
pass # print pfx, ':', uri
def startDoc(self, formula):
self.rootFormula = formula
def endDoc(self, formula):
pass
###################################################
#
# Utilities
#
Escapes = {'a': '\a',
'b': '\b',
'f': '\f',
'r': '\r',
't': '\t',
'v': '\v',
'n': '\n',
'\\': '\\',
'"': '"'}
forbidden1 = re.compile(ur'[\\\"\a\b\f\r\v\u0080-\U0000ffff]')
forbidden2 = re.compile(ur'[\\\"\a\b\f\r\v\t\n\u0080-\U0000ffff]')
#"
def stringToN3(str, singleLine=0, flags=""):
res = ''
if (len(str) > 20 and
str[-1] <> '"' and
not singleLine and
(string.find(str, "\n") >=0
or string.find(str, '"') >=0)):
delim= '"""'
forbidden = forbidden1 # (allow tabs too now)
else:
delim = '"'
forbidden = forbidden2
i = 0
while i < len(str):
m = forbidden.search(str, i)
if not m:
break
j = m.start()
res = res + str[i:j]
ch = m.group(0)
if ch == '"' and delim == '"""' and str[j:j+3] != '"""': #"
res = res + ch
else:
k = string.find('\a\b\f\r\t\v\n\\"', ch)
if k >= 0: res = res + "\\" + 'abfrtvn\\"'[k]
else:
if 'e' in flags:
# res = res + ('\\u%04x' % ord(ch))
res = res + ('\\u%04X' % ord(ch))
# http://www.w3.org/TR/rdf-testcases/#ntriples
else:
res = res + ch
i = j + 1
# The following code fixes things for really high range Unicode
newstr = ""
for ch in res + str[i:]:
if ord(ch)>65535:
newstr = newstr + ('\\U%08X' % ord(ch))
# http://www.w3.org/TR/rdf-testcases/#ntriples
else:
newstr = newstr + ch
#
return delim + newstr + delim
def backslashUify(ustr):
"""Use URL encoding to return an ASCII string corresponding
to the given unicode"""
# progress("String is "+`ustr`)
# s1=ustr.encode('utf-8')
str = ""
for ch in ustr: # .encode('utf-8'):
if ord(ch) > 65535:
ch = "\\U%08X" % ord(ch)
elif ord(ch) > 126:
ch = "\\u%04X" % ord(ch)
else:
ch = "%c" % ord(ch)
str = str + ch
return str
def hexify(ustr):
"""Use URL encoding to return an ASCII string
corresponding to the given UTF8 string
>>> hexify("http://example/a b")
'http://example/a%20b'
""" #"
# progress("String is "+`ustr`)
# s1=ustr.encode('utf-8')
str = ""
for ch in ustr: # .encode('utf-8'):
if ord(ch) > 126 or ord(ch) < 33 :
ch = "%%%02X" % ord(ch)
else:
ch = "%c" % ord(ch)
str = str + ch
return str
def dummy():
res = ""
if len(str) > 20 and (string.find(str, "\n") >=0
or string.find(str, '"') >=0):
delim= '"""'
forbidden = "\\\"\a\b\f\r\v" # (allow tabs too now)
else:
delim = '"'
forbidden = "\\\"\a\b\f\r\v\t\n"
for i in range(len(str)):
ch = str[i]
j = string.find(forbidden, ch)
if ch == '"' and delim == '"""' \
and i+1 < len(str) and str[i+1] != '"':
j=-1 # Single quotes don't need escaping in long format
if j>=0: ch = "\\" + '\\"abfrvtn'[j]
elif ch not in "\n\t" and (ch < " " or ch > "}"):
ch = "[[" + `ch` + "]]" #[2:-1] # Use python
res = res + ch
return delim + res + delim
class N3Parser(Parser):
def __init__(self):
pass
def parse(self, source, graph):
# we're currently being handed a Graph, not a ConjunctiveGraph
assert graph.store.context_aware # is this implied by formula_aware
assert graph.store.formula_aware
conj_graph = ConjunctiveGraph(store=graph.store)
conj_graph.default_context = graph # TODO: CG __init__ should have a default_context arg
# TODO: update N3Processor so that it can use conj_graph as the sink
conj_graph.namespace_manager = graph.namespace_manager
sink = RDFSink(conj_graph)
baseURI = graph.absolutize(source.getPublicId() or source.getSystemId() or "")
p = SinkParser(sink, baseURI=baseURI)
p.loadStream(source.getByteStream())
for prefix, namespace in p._bindings.items():
conj_graph.bind(prefix, namespace)
def _test():
import doctest
doctest.testmod()
# if __name__ == '__main__':
# _test()
def main():
g=ConjunctiveGraph()
sink = RDFSink(g)
base = 'file://' + os.path.join(os.getcwd(), sys.argv[1])
p = SinkParser(sink, baseURI=base)
p._bindings[''] = p._baseURI + '#'
p.startDoc()
f = open(sys.argv[1], 'rb')
bytes = f.read()
f.close()
p.feed(bytes)
p.endDoc()
for t in g.quads((None,None,None)):
print t
if __name__ == '__main__':
main()
#ends
| []
| []
| [
"CWM_RUN_NS"
]
| [] | ["CWM_RUN_NS"] | python | 1 | 0 | |
pkg/system/phase2_creating.go | package system
import (
"encoding/json"
"fmt"
"os"
"reflect"
"strings"
"time"
nbv1 "github.com/noobaa/noobaa-operator/v2/pkg/apis/noobaa/v1alpha1"
"github.com/noobaa/noobaa-operator/v2/pkg/options"
"github.com/noobaa/noobaa-operator/v2/pkg/util"
cloudcredsv1 "github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// ReconcilePhaseCreating runs the reconcile phase
func (r *Reconciler) ReconcilePhaseCreating() error {
r.SetPhase(
nbv1.SystemPhaseCreating,
"SystemPhaseCreating",
"noobaa operator started phase 2/4 - \"Creating\"",
)
if err := r.ReconcileObject(r.ServiceAccount, r.SetDesiredServiceAccount); err != nil {
return err
}
if err := r.ReconcilePhaseCreatingForMainClusters(); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceS3, r.SetDesiredServiceS3); err != nil {
return err
}
if err := r.ReconcileObjectOptional(r.RouteS3, nil); err != nil {
return err
}
// the credentials that are created by cloud-credentials-operator sometimes take time
// to be valid (requests sometimes returns InvalidAccessKeyId for 1-2 minutes)
// creating the credential request as early as possible to try and avoid it
if err := r.ReconcileBackingStoreCredentials(); err != nil {
r.Logger.Errorf("failed to create CredentialsRequest. will retry in phase 4. error: %v", err)
return err
}
return nil
}
// ReconcilePhaseCreatingForMainClusters reconcile all object for full deployment clusters
func (r *Reconciler) ReconcilePhaseCreatingForMainClusters() error {
// Skip if joining another NooBaa
if r.JoinSecret != nil {
return nil
}
// A failure to discover OAuth endpoints should not fail the entire reconcile phase.
oAuthEndpoints, err := util.DiscoverOAuthEndpoints()
if err != nil {
r.Logger.Warnf("Discovery of OAuth endpoints failed, got: %v", err)
}
r.OAuthEndpoints = oAuthEndpoints
if err := r.ReconcileObject(r.SecretServer, nil); err != nil {
return err
}
if err := r.UpgradeSplitDB(); err != nil {
return err
}
if err := r.ReconcileObject(r.CoreApp, r.SetDesiredCoreApp); err != nil {
return err
}
if err := r.ReconcileObject(r.NooBaaDB, r.SetDesiredNooBaaDB); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceMgmt, r.SetDesiredServiceMgmt); err != nil {
return err
}
if err := r.ReconcileObject(r.ServiceDb, r.SetDesiredServiceDB); err != nil {
return err
}
if err := r.ReconcileObjectOptional(r.RouteMgmt, nil); err != nil {
return err
}
return nil
}
// SetDesiredServiceAccount updates the ServiceAccount as desired for reconciling
func (r *Reconciler) SetDesiredServiceAccount() error {
if r.ServiceAccount.Annotations == nil {
r.ServiceAccount.Annotations = map[string]string{}
}
r.ServiceAccount.Annotations["serviceaccounts.openshift.io/oauth-redirectreference.noobaa-mgmt"] =
`{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"` + r.RouteMgmt.Name + `"}}`
return nil
}
// SetDesiredServiceMgmt updates the ServiceMgmt as desired for reconciling
func (r *Reconciler) SetDesiredServiceMgmt() error {
r.ServiceMgmt.Spec.Selector["noobaa-mgmt"] = r.Request.Name
return nil
}
// SetDesiredServiceS3 updates the ServiceS3 as desired for reconciling
func (r *Reconciler) SetDesiredServiceS3() error {
r.ServiceS3.Spec.Selector["noobaa-s3"] = r.Request.Name
return nil
}
// SetDesiredServiceDB updates the ServiceS3 as desired for reconciling
func (r *Reconciler) SetDesiredServiceDB() error {
r.ServiceDb.Spec.Selector["noobaa-db"] = r.Request.Name
return nil
}
// SetDesiredNooBaaDB updates the NooBaaDB as desired for reconciling
func (r *Reconciler) SetDesiredNooBaaDB() error {
r.NooBaaDB.Spec.Template.Labels["noobaa-db"] = r.Request.Name
r.NooBaaDB.Spec.Selector.MatchLabels["noobaa-db"] = r.Request.Name
r.NooBaaDB.Spec.ServiceName = r.ServiceDb.Name
podSpec := &r.NooBaaDB.Spec.Template.Spec
podSpec.ServiceAccountName = "noobaa"
for i := range podSpec.InitContainers {
c := &podSpec.InitContainers[i]
if c.Name == "init" {
c.Image = r.NooBaa.Status.ActualImage
}
}
for i := range podSpec.Containers {
c := &podSpec.Containers[i]
if c.Name == "db" {
c.Image = options.DBImage
if os.Getenv("NOOBAA_DB_IMAGE") != "" {
c.Image = os.Getenv("NOOBAA_DB_IMAGE")
}
if r.NooBaa.Spec.DBImage != nil {
c.Image = *r.NooBaa.Spec.DBImage
}
if r.NooBaa.Spec.DBResources != nil {
c.Resources = *r.NooBaa.Spec.DBResources
}
}
}
if r.NooBaa.Spec.ImagePullSecret == nil {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{}
} else {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{*r.NooBaa.Spec.ImagePullSecret}
}
if r.NooBaa.Spec.Tolerations != nil {
podSpec.Tolerations = r.NooBaa.Spec.Tolerations
}
if r.NooBaa.Spec.Affinity != nil {
podSpec.Affinity = r.NooBaa.Spec.Affinity
}
if r.NooBaaDB.UID == "" {
for i := range r.NooBaaDB.Spec.VolumeClaimTemplates {
pvc := &r.NooBaaDB.Spec.VolumeClaimTemplates[i]
pvc.Namespace = r.NooBaaDB.Namespace
r.Own(pvc)
// unsetting BlockOwnerDeletion to acoid error when trying to own pvc:
// "cannot set blockOwnerDeletion if an ownerReference refers to a resource you can't set finalizers on"
pvc.OwnerReferences[0].BlockOwnerDeletion = nil
switch pvc.Name {
case "db":
if r.NooBaa.Spec.DBStorageClass != nil {
pvc.Spec.StorageClassName = r.NooBaa.Spec.DBStorageClass
}
if r.NooBaa.Spec.DBVolumeResources != nil {
pvc.Spec.Resources = *r.NooBaa.Spec.DBVolumeResources
}
}
}
} else {
// when already exists we check that there is no update requested to the volumes
// otherwise we report that volume update is unsupported
for i := range r.NooBaaDB.Spec.VolumeClaimTemplates {
pvc := &r.NooBaaDB.Spec.VolumeClaimTemplates[i]
switch pvc.Name {
case "db":
currentClass := ""
desiredClass := ""
if pvc.Spec.StorageClassName != nil {
currentClass = *pvc.Spec.StorageClassName
}
if r.NooBaa.Spec.DBStorageClass != nil {
desiredClass = *r.NooBaa.Spec.DBStorageClass
}
if desiredClass != currentClass {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "DBStorageClassIsImmutable",
"spec.dbStorageClass is immutable and cannot be updated for volume %q in existing %s %q"+
" since it requires volume recreate and migrate which is unsupported by the operator",
pvc.Name, r.CoreApp.TypeMeta.Kind, r.CoreApp.Name)
}
if r.NooBaa.Spec.DBVolumeResources != nil &&
!reflect.DeepEqual(pvc.Spec.Resources, *r.NooBaa.Spec.DBVolumeResources) {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeWarning, "DBVolumeResourcesIsImmutable",
"spec.dbVolumeResources is immutable and cannot be updated for volume %q in existing %s %q"+
" since it requires volume recreate and migrate which is unsupported by the operator",
pvc.Name, r.CoreApp.TypeMeta.Kind, r.CoreApp.Name)
}
}
}
}
return nil
}
// SetDesiredCoreApp updates the CoreApp as desired for reconciling
func (r *Reconciler) SetDesiredCoreApp() error {
r.CoreApp.Spec.Template.Labels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.Template.Labels["noobaa-mgmt"] = r.Request.Name
r.CoreApp.Spec.Selector.MatchLabels["noobaa-core"] = r.Request.Name
r.CoreApp.Spec.ServiceName = r.ServiceMgmt.Name
podSpec := &r.CoreApp.Spec.Template.Spec
podSpec.ServiceAccountName = "noobaa"
coreImageChanged := false
for i := range podSpec.Containers {
c := &podSpec.Containers[i]
switch c.Name {
case "core":
if c.Image != r.NooBaa.Status.ActualImage {
coreImageChanged = true
c.Image = r.NooBaa.Status.ActualImage
}
for j := range c.Env {
switch c.Env[j].Name {
case "AGENT_PROFILE":
c.Env[j].Value = r.SetDesiredAgentProfile(c.Env[j].Value)
case "MONGODB_URL":
c.Env[j].Value = "mongodb://" + r.NooBaaDB.Name + "-0." + r.NooBaaDB.Spec.ServiceName + "/nbcore"
case "OAUTH_AUTHORIZATION_ENDPOINT":
if r.OAuthEndpoints != nil {
c.Env[j].Value = r.OAuthEndpoints.AuthorizationEndpoint
}
case "OAUTH_TOKEN_ENDPOINT":
if r.OAuthEndpoints != nil {
c.Env[j].Value = r.OAuthEndpoints.TokenEndpoint
}
}
}
util.ReflectEnvVariable(&c.Env, "HTTP_PROXY")
util.ReflectEnvVariable(&c.Env, "HTTPS_PROXY")
util.ReflectEnvVariable(&c.Env, "NO_PROXY")
if r.NooBaa.Spec.CoreResources != nil {
c.Resources = *r.NooBaa.Spec.CoreResources
}
}
}
if r.NooBaa.Spec.ImagePullSecret == nil {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{}
} else {
podSpec.ImagePullSecrets =
[]corev1.LocalObjectReference{*r.NooBaa.Spec.ImagePullSecret}
}
if r.NooBaa.Spec.Tolerations != nil {
podSpec.Tolerations = r.NooBaa.Spec.Tolerations
}
if r.CoreApp.UID == "" {
// generate info event for the first creation of noobaa
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"NooBaaImage", `Using NooBaa image %q for the creation of %q`, r.NooBaa.Status.ActualImage, r.NooBaa.Name)
}
} else {
if coreImageChanged {
// generate info event for the first creation of noobaa
if r.Recorder != nil {
r.Recorder.Eventf(r.NooBaa, corev1.EventTypeNormal,
"NooBaaImage", `Updating NooBaa image to %q for %q`, r.NooBaa.Status.ActualImage, r.NooBaa.Name)
}
}
}
return nil
}
// ReconcileBackingStoreCredentials creates a CredentialsRequest resource if neccesary and returns
// the bucket name allowed for the credentials. nil is returned if cloud credentials are not supported
func (r *Reconciler) ReconcileBackingStoreCredentials() error {
// Skip if joining another NooBaa
if r.JoinSecret != nil {
return nil
}
if util.IsAWSPlatform() {
return r.ReconcileAWSCredentials()
}
if util.IsAzurePlatform() {
return r.ReconcileAzureCredentials()
}
return r.ReconcileRGWCredentials()
}
// ReconcileRGWCredentials creates a ceph objectstore user if a ceph objectstore exists in the same namespace
func (r *Reconciler) ReconcileRGWCredentials() error {
r.Logger.Info("Not running in AWS. will attempt to create a ceph objectstore user")
util.KubeCheck(r.CephObjectstoreUser)
if r.CephObjectstoreUser.UID != "" {
return nil
}
// create user if not already exists
// list ceph objectstores and pick the first one
r.CephObjectstoreUser.Spec.Store = ""
cephObjectStoresList := &cephv1.CephObjectStoreList{}
if util.KubeList(cephObjectStoresList, &client.ListOptions{Namespace: options.Namespace}) {
if len(cephObjectStoresList.Items) > 0 {
r.Logger.Infof("found %d ceph objectstores: %v", len(cephObjectStoresList.Items), cephObjectStoresList.Items)
// for now take the first one. need to decide what to do if multiple objectstores in one namespace
storeName := cephObjectStoresList.Items[0].ObjectMeta.Name
r.Logger.Infof("using objectstore %q as a default backing store", storeName)
r.CephObjectstoreUser.Spec.Store = storeName
} else {
r.Logger.Info("did not find any ceph objectstore to use as backing store, assuming independent mode")
}
} else {
r.Logger.Info("failed to list ceph objectstore to use as backing store, assuming independent mode")
}
if r.CephObjectstoreUser.Spec.Store == "" {
if r.NooBaa.Labels == nil || r.NooBaa.Labels["rgw-endpoint"] == "" {
r.Logger.Warn("did not find an rgw-endpoint label on the noobaa CR")
return nil
}
}
r.Own(r.CephObjectstoreUser)
// create ceph objectstore user
err := r.Client.Create(r.Ctx, r.CephObjectstoreUser)
if err != nil {
r.Logger.Errorf("got error on CephObjectstoreUser creation. error: %v", err)
return err
}
return nil
}
// ReconcileAWSCredentials creates a CredentialsRequest resource if cloud credentials operator is available
func (r *Reconciler) ReconcileAWSCredentials() error {
r.Logger.Info("Running in AWS. will create a CredentialsRequest resource")
var bucketName string
err := r.Client.Get(r.Ctx, util.ObjectKey(r.AWSCloudCreds), r.AWSCloudCreds)
if err == nil {
// credential request alread exist. get the bucket name
codec, err := cloudcredsv1.NewCodec()
if err != nil {
r.Logger.Error("error creating codec for cloud credentials providerSpec")
return err
}
awsProviderSpec := &cloudcredsv1.AWSProviderSpec{}
err = codec.DecodeProviderSpec(r.AWSCloudCreds.Spec.ProviderSpec, awsProviderSpec)
if err != nil {
r.Logger.Error("error decoding providerSpec from cloud credentials request")
return err
}
bucketName = strings.TrimPrefix(awsProviderSpec.StatementEntries[0].Resource, "arn:aws:s3:::")
r.Logger.Infof("found existing credential request for bucket %s", bucketName)
r.DefaultBackingStore.Spec.AWSS3 = &nbv1.AWSS3Spec{
TargetBucket: bucketName,
}
return nil
}
if meta.IsNoMatchError(err) || runtime.IsNotRegisteredError(err) {
// cloud credentials crd is missing. skip this stage
return nil
}
if errors.IsNotFound(err) {
// credential request does not exist. create one
r.Logger.Info("Creating CredentialsRequest resource")
bucketName = r.generateBackingStoreTargetName()
codec, err := cloudcredsv1.NewCodec()
if err != nil {
r.Logger.Error("error creating codec for cloud credentials providerSpec")
return err
}
awsProviderSpec := &cloudcredsv1.AWSProviderSpec{}
err = codec.DecodeProviderSpec(r.AWSCloudCreds.Spec.ProviderSpec, awsProviderSpec)
if err != nil {
r.Logger.Error("error decoding providerSpec from cloud credentials request")
return err
}
// fix creds request according to bucket name
awsProviderSpec.StatementEntries[0].Resource = "arn:aws:s3:::" + bucketName
awsProviderSpec.StatementEntries[1].Resource = "arn:aws:s3:::" + bucketName + "/*"
updatedProviderSpec, err := codec.EncodeProviderSpec(awsProviderSpec)
if err != nil {
r.Logger.Error("error encoding providerSpec for cloud credentials request")
return err
}
r.AWSCloudCreds.Spec.ProviderSpec = updatedProviderSpec
r.Own(r.AWSCloudCreds)
err = r.Client.Create(r.Ctx, r.AWSCloudCreds)
if err != nil {
r.Logger.Errorf("got error when trying to create credentials request for bucket %s. %v", bucketName, err)
return err
}
r.DefaultBackingStore.Spec.AWSS3 = &nbv1.AWSS3Spec{
TargetBucket: bucketName,
}
return nil
}
return err
}
// ReconcileAzureCredentials creates a CredentialsRequest resource if cloud credentials operator is available
func (r *Reconciler) ReconcileAzureCredentials() error {
r.Logger.Info("Running in Azure. will create a CredentialsRequest resource")
err := r.Client.Get(r.Ctx, util.ObjectKey(r.AzureCloudCreds), r.AzureCloudCreds)
if err == nil || meta.IsNoMatchError(err) || runtime.IsNotRegisteredError(err) {
return nil
}
if errors.IsNotFound(err) {
// credential request does not exist. create one
r.Logger.Info("Creating CredentialsRequest resource")
r.Own(r.AzureCloudCreds)
err = r.Client.Create(r.Ctx, r.AzureCloudCreds)
if err != nil {
r.Logger.Errorf("got error when trying to create credentials request for azure. %v", err)
return err
}
return nil
}
return err
}
// SetDesiredAgentProfile updates the value of the AGENT_PROFILE env
func (r *Reconciler) SetDesiredAgentProfile(profileString string) string {
agentProfile := map[string]interface{}{}
err := json.Unmarshal([]byte(profileString), &agentProfile)
if err != nil {
r.Logger.Infof("SetDesiredAgentProfile: ignore non-json AGENT_PROFILE value %q: %v", profileString, err)
}
agentProfile["image"] = r.NooBaa.Status.ActualImage
if r.NooBaa.Spec.PVPoolDefaultStorageClass != nil {
agentProfile["storage_class"] = *r.NooBaa.Spec.PVPoolDefaultStorageClass
} else {
delete(agentProfile, "storage_class")
}
profileBytes, err := json.Marshal(agentProfile)
util.Panic(err)
return string(profileBytes)
}
// UpgradeSplitDB removes the old pvc and create a new one with the same PV
func (r *Reconciler) UpgradeSplitDB() error {
oldPvc := &corev1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{Kind: "PersistentVolumeClaim"},
ObjectMeta: metav1.ObjectMeta{
Name: "db-noobaa-core-0",
Namespace: options.Namespace,
},
}
if util.KubeCheckQuiet(oldPvc) {
r.Logger.Infof("UpgradeSplitDB: Old OVC found, upgrading...")
if err := r.UpgradeSplitDBSetReclaimPolicy(oldPvc, corev1.PersistentVolumeReclaimRetain); err != nil {
return err
}
if err := r.UpgradeSplitDBCreateNewPVC(oldPvc); err != nil {
return err
}
if err := r.UpgradeSplitDBSetReclaimPolicy(oldPvc, corev1.PersistentVolumeReclaimDelete); err != nil {
return err
}
if err := r.UpgradeSplitDBDeleteOldSTS(); err != nil {
return err
}
if err := r.UpgradeSplitDBDeleteOldPVC(oldPvc); err != nil {
return err
}
}
return nil
}
// UpgradeSplitDBSetReclaimPolicy sets the reclaim policy to reclaim parameter and checks it
func (r *Reconciler) UpgradeSplitDBSetReclaimPolicy(oldPvc *corev1.PersistentVolumeClaim, reclaim corev1.PersistentVolumeReclaimPolicy) error {
pv := &corev1.PersistentVolume{
TypeMeta: metav1.TypeMeta{Kind: "PersistentVolume"},
ObjectMeta: metav1.ObjectMeta{Name: oldPvc.Spec.VolumeName},
}
if !util.KubeCheck(pv) {
return fmt.Errorf("UpgradeSplitDBSetReclaimPolicy(%s): PV not found", reclaim)
}
if pv.Spec.PersistentVolumeReclaimPolicy != reclaim {
pv.Spec.PersistentVolumeReclaimPolicy = reclaim
if pv.Spec.ClaimRef != nil &&
pv.Spec.ClaimRef.Name == oldPvc.Name &&
pv.Spec.ClaimRef.Namespace == oldPvc.Namespace {
pv.Spec.ClaimRef = nil
}
util.KubeUpdate(pv)
if !util.KubeCheck(pv) {
return fmt.Errorf("UpgradeSplitDBSetReclaimPolicy(%s): PV not found after update", reclaim)
}
if pv.Spec.PersistentVolumeReclaimPolicy != reclaim {
return fmt.Errorf("UpgradeSplitDBSetReclaimPolicy(%s): PV reclaim policy could not be updated", reclaim)
}
}
return nil
}
// UpgradeSplitDBCreateNewPVC creates new pvc and checks it
func (r *Reconciler) UpgradeSplitDBCreateNewPVC(oldPvc *corev1.PersistentVolumeClaim) error {
newPvc := &corev1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{Kind: "PersistentVolumeClaim"},
ObjectMeta: metav1.ObjectMeta{
Name: "db-" + r.NooBaaDB.Name + "-0",
Namespace: options.Namespace,
},
Spec: oldPvc.Spec,
}
util.KubeCreateSkipExisting(newPvc)
time.Sleep(2 * time.Second)
if !util.KubeCheck(newPvc) {
return fmt.Errorf("UpgradeSplitDBCreateNewPVC: New PVC not found")
}
if newPvc.Status.Phase != corev1.ClaimBound {
return fmt.Errorf("UpgradeSplitDBCreateNewPVC: New PVC not bound yet")
}
if newPvc.Spec.VolumeName != oldPvc.Spec.VolumeName {
// TODO how to recover?? since this is not expected maybe just return persistent error and wait for manual fix
return fmt.Errorf("UpgradeSplitDBCreateNewPVC: New PVC bound to another PV")
}
return nil
}
// UpgradeSplitDBDeleteOldSTS deletes old STS named noobaa-core and checks it
func (r *Reconciler) UpgradeSplitDBDeleteOldSTS() error {
oldSts := &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{Kind: "StatefulSet"},
ObjectMeta: metav1.ObjectMeta{
Name: "noobaa-core",
Namespace: options.Namespace,
},
}
util.KubeDelete(oldSts)
if util.KubeCheck(oldSts) {
return fmt.Errorf("UpgradeSplitDBDeleteOldSTS: Old STS still exists")
}
return nil
}
// UpgradeSplitDBDeleteOldPVC deletes the parameter oldPvc and checks it
func (r *Reconciler) UpgradeSplitDBDeleteOldPVC(oldPVC *corev1.PersistentVolumeClaim) error {
util.KubeDelete(oldPVC)
if util.KubeCheck(oldPVC) {
return fmt.Errorf("UpgradeSplitDBDeleteOldPVC: Old PVC still exists")
}
return nil
}
| [
"\"NOOBAA_DB_IMAGE\"",
"\"NOOBAA_DB_IMAGE\""
]
| []
| [
"NOOBAA_DB_IMAGE"
]
| [] | ["NOOBAA_DB_IMAGE"] | go | 1 | 0 | |
lib/modules/configure.go | package modules
import (
"fmt"
"io/ioutil"
"os"
"os/signal"
"reflect"
"syscall"
"time"
"github.com/davidscholberg/go-i3barjson"
"gopkg.in/yaml.v2"
)
// Block contains all functions and objects necessary to configure and update
// a single status block.
type Block struct {
I3barBlock i3barjson.Block
Config BlockConfig
}
// PreConfig is the struct used to initially unmarshal the configuration. Once
// the configuration has been fully processed, it is stored in the Config
// struct.
type PreConfig struct {
Global GlobalConfig `yaml:"global"`
Blocks []map[string]interface{} `yaml:"blocks"`
}
// Config is the root configuration struct.
type Config struct {
Global GlobalConfig
Blocks []BlockConfig
}
// GlobalConfig represents global config options.
type GlobalConfig struct {
Debug bool `yaml:"debug"`
RefreshInterval float64 `yaml:"refresh_interval"`
}
// BlockConfig is an interface for Block configuration structs.
type BlockConfig interface {
GetUpdateInterval() float64
GetUpdateSignal() int
GetBlockType() string
UpdateBlock(b *i3barjson.Block)
}
// BlockConfigBase is a base struct for Block configuration structs. It
// implements all of the methods of the BlockConfig interface except the
// UpdateBlock method. That method should be implemented by each Block
// configuration struct, which should also embed the BlockConfigBase struct as
// an anonymous field. That way, each Block configuration struct will implement
// the full BlockConfig interface.
type BlockConfigBase struct {
Type string `yaml:"type"`
UpdateInterval float64 `yaml:"update_interval"`
Label string `yaml:"label"`
Color string `yaml:"color"`
UpdateSignal int `yaml:"update_signal"`
}
// GetUpdateInterval returns the block's update interval in seconds.
func (c BlockConfigBase) GetUpdateInterval() float64 {
return c.UpdateInterval
}
// GetUpdateSignal returns the block's update signal that forces an update and
// refresh.
func (c BlockConfigBase) GetUpdateSignal() int {
return c.UpdateSignal
}
// GetBlockType returns the block's type as a string.
func (c BlockConfigBase) GetBlockType() string {
return c.Type
}
// getBlockConfigInstance returns a BlockConfig object whose underlying type is
// determined from the passed-in config map.
func getBlockConfigInstance(m map[string]interface{}) (*BlockConfig, error) {
yamlStr, err := yaml.Marshal(m)
if err != nil {
return nil, err
}
t := m["type"].(string)
switch t {
case "battery":
c := Battery{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "disk":
c := Disk{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "interface":
c := Interface{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "key":
c := KeyIndicator{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "load":
c := Load{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "memory":
c := Memory{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "raid":
c := Raid{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "temperature":
c := Temperature{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "time":
c := Time{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "uptime":
c := Uptime{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "volume":
c := Volume{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "wifi":
c := Wifi{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "zfs":
c := Zfs{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
case "command":
c := Command{}
err := yaml.Unmarshal(yamlStr, &c)
b := BlockConfig(c)
return &b, err
}
return nil, fmt.Errorf("type %s not valid", t)
}
const confPathFmt = "%s/.config/goblocks/goblocks.yml"
// GetConfig loads the Goblocks configuration object.
func GetConfig(cfg *Config) error {
// TODO: set up default values
confPath := fmt.Sprintf(confPathFmt, os.Getenv("HOME"))
confStr, err := ioutil.ReadFile(confPath)
if err != nil {
return err
}
preCfg := PreConfig{}
err = yaml.Unmarshal(confStr, &preCfg)
if err != nil {
return err
}
cfg.Global = preCfg.Global
for _, m := range preCfg.Blocks {
block, err := getBlockConfigInstance(m)
if err != nil {
return err
}
cfg.Blocks = append(cfg.Blocks, *block)
}
return nil
}
// GetBlocks initializes and returns a Block slice based on the
// given configuration.
func GetBlocks(blockConfigSlice []BlockConfig) ([]*Block, error) {
blocks := make([]*Block, len(blockConfigSlice))
for i, blockConfig := range blockConfigSlice {
blocks[i] = &Block{
i3barjson.Block{Separator: true, SeparatorBlockWidth: 20},
blockConfig,
}
}
return blocks, nil
}
// SelectCases represents the set of channels that Goblocks selects on in the
// main program loop, as well as the functions and data to run and operate on,
// respectively.
type SelectCases struct {
Cases []reflect.SelectCase
Actions []SelectAction
Blocks []*Block
}
const sigrtmin = syscall.Signal(34)
// AddSignalSelectCases loads the select cases related to OS signals.
func (s *SelectCases) AddSignalSelectCases(blocks []*Block) {
sigReloadChan := make(chan os.Signal, 1)
signal.Notify(sigReloadChan, syscall.SIGHUP)
s.addChanSelectCase(
sigReloadChan,
SelectActionReload,
)
sigEndChan := make(chan os.Signal, 1)
signal.Notify(sigEndChan, syscall.SIGINT, syscall.SIGTERM)
s.addChanSelectCase(
sigEndChan,
SelectActionExit,
)
for _, block := range blocks {
updateSignal := block.Config.GetUpdateSignal()
if updateSignal > 0 {
sigUpdateChan := make(chan os.Signal, 1)
signal.Notify(sigUpdateChan, sigrtmin+syscall.Signal(updateSignal))
s.add(
sigUpdateChan,
func(b *Block) SelectReturn {
b.Config.UpdateBlock(&b.I3barBlock)
return SelectActionForceRefresh(b)
},
block,
)
}
}
}
// add adds a channel, action, and Block to the SelectCases object.
func (s *SelectCases) add(c interface{}, a SelectAction, b *Block) {
selectCase := reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(c),
}
s.Cases = append(s.Cases, selectCase)
s.Actions = append(s.Actions, a)
s.Blocks = append(s.Blocks, b)
}
// addChanSelectCase is a helper function that adds a non-Block channel and
// action to SelectCases. This can be used for signal handling and other non-
// block specific operations.
func (s *SelectCases) addChanSelectCase(c interface{}, a SelectAction) {
s.add(
c,
a,
nil,
)
}
// addBlockToSelectCase is a helper function to add a Block to SelectCases.
// The channel used is a time.Ticker channel set to tick according to the
// block's configuration. The SelectAction function updates the block's status
// and tells Goblocks that a refresh should occur at the next refresh interval
// tick.
func addBlockToSelectCase(s *SelectCases, b *Block, c <-chan time.Time) {
s.add(
c,
func(b *Block) SelectReturn {
b.Config.UpdateBlock(&b.I3barBlock)
return SelectActionSignalRefresh(b)
},
b,
)
}
// SelectAction is a function type that performs an action when a channel is
// selected on in the main program loop. The return value indicates some action
// for the caller to take.
type SelectAction func(*Block) SelectReturn
// SelectReturn is returned by a SelectAction type function and tells the caller
// a certain action to take.
type SelectReturn struct {
Exit bool
ForceRefresh bool
Refresh bool
Reload bool
SignalRefresh bool
}
// SelectActionExit is a helper function of type SelectAction that tells
// Goblocks to exit.
func SelectActionExit(b *Block) SelectReturn {
return SelectReturn{Exit: true}
}
// SelectActionForceRefresh is a helper function of type SelectAction that tells
// Goblocks to immediately refresh the output. This differs from
// SelectActionRefresh in that a refresh is performed regardless of whether
// SelectActionSignalRefresh has been called.
func SelectActionForceRefresh(b *Block) SelectReturn {
return SelectReturn{ForceRefresh: true}
}
// SelectActionRefresh is a helper function of type SelectAction that tells
// Goblocks to refresh the output. Note that the output is only refreshed if
// SelectActionSignalRefresh was returned at least once since the last refresh
// interval tick. This prevents needlessly refreshing the output when nothing
// changed.
func SelectActionRefresh(b *Block) SelectReturn {
return SelectReturn{Refresh: true}
}
// SelectActionReload is a helper function of type SelectAction that tells
// Goblocks to reload the configuration.
func SelectActionReload(b *Block) SelectReturn {
return SelectReturn{Reload: true}
}
// SelectActionSignalRefresh is a helper function of type SelectAction that
// tells Goblocks to signal the refresher that a refresh should be performed.
// The actual refresh won't be performed until the refresh interval timer fires
// again.
func SelectActionSignalRefresh(b *Block) SelectReturn {
return SelectReturn{SignalRefresh: true}
}
// Goblocks contains all configuration and runtime data needed for the
// application.
type Goblocks struct {
Cfg Config
SelectCases SelectCases
Tickers []*time.Ticker
StatusLine i3barjson.StatusLine
}
// NewGoblocks returns a Goblocks instance with all configuration and runtime
// data loaded in.
func NewGoblocks() (*Goblocks, error) {
gb := Goblocks{}
err := GetConfig(&gb.Cfg)
if err != nil {
return nil, err
}
// set config defaults
if gb.Cfg.Global.RefreshInterval == 0 {
gb.Cfg.Global.RefreshInterval = 1
}
blocks, err := GetBlocks(gb.Cfg.Blocks)
if err != nil {
return nil, err
}
gb.SelectCases.AddSignalSelectCases(blocks)
gb.AddBlockSelectCases(blocks)
gb.AddUpdateTickerSelectCase()
for _, block := range blocks {
gb.StatusLine = append(gb.StatusLine, &block.I3barBlock)
// update block so it's ready for first run
block.Config.UpdateBlock(&block.I3barBlock)
}
return &gb, nil
}
// AddBlockSelectCases is a helper function to add all configured Block
// objects to Goblocks' SelectCases.
func (gb *Goblocks) AddBlockSelectCases(b []*Block) {
for _, block := range b {
blockUpdateInterval := block.Config.GetUpdateInterval()
if blockUpdateInterval == 0 {
blockUpdateInterval = gb.Cfg.Global.RefreshInterval
}
ticker := time.NewTicker(
time.Duration(
blockUpdateInterval * float64(time.Second),
),
)
gb.Tickers = append(gb.Tickers, ticker)
addBlockToSelectCase(&gb.SelectCases, block, ticker.C)
}
}
// AddUpdateTickerSelectCase adds the Goblocks update ticker that controls
// refreshing the Goblocks output.
func (gb *Goblocks) AddUpdateTickerSelectCase() {
updateTicker := time.NewTicker(
time.Duration(gb.Cfg.Global.RefreshInterval * float64(time.Second)),
)
gb.SelectCases.addChanSelectCase(
updateTicker.C,
SelectActionRefresh,
)
gb.Tickers = append(gb.Tickers, updateTicker)
}
// Reset stops all tickers and resets all signal handlers.
func (gb *Goblocks) Reset() {
for _, ticker := range gb.Tickers {
ticker.Stop()
}
signal.Reset()
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
Assets/Python/Plugins/Resources/Lib/wsgiref/handlers.py | """Base classes for server/gateway implementations"""
from types import StringType
from util import FileWrapper, guess_scheme, is_hop_by_hop
from headers import Headers
import sys, os, time
__all__ = ['BaseHandler', 'SimpleHandler', 'BaseCGIHandler', 'CGIHandler']
try:
dict
except NameError:
def dict(items):
d = {}
for k,v in items:
d[k] = v
return d
# Uncomment for 2.2 compatibility.
#try:
# True
# False
#except NameError:
# True = not None
# False = not True
# Weekday and month names for HTTP date/time formatting; always English!
_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
_monthname = [None, # Dummy so we can use 1-based month numbers
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
def format_date_time(timestamp):
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd], day, _monthname[month], year, hh, mm, ss
)
class BaseHandler:
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = None # String name of server software, if any
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 Internal Server Error"
error_headers = [('Content-Type','text/plain')]
error_body = "A server error occurred. Please contact the administrator."
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will
want to redefine this method, such that it sets up callbacks
in the event loop to iterate over the data, and to call
'self.close()' once the response is finished.
"""
try:
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
finally:
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError,AttributeError,NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert type(status) is StringType,"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert type(name) is StringType,"Header names must be strings"
assert type(val) is StringType,"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % format_date_time(time.time())
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert type(data) is StringType,"write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
# Only zero Content-Length if not set by the application (so
# that HEAD requests can be satisfied properly, see #3839)
self.headers.setdefault('Content-Length', "0")
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
"""Close the iterable (if needed) and reset all instance vars
Subclasses may want to also drop the client connection.
"""
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
"""WSGI mini-app to create error output
By default, this just uses the 'error_status', 'error_headers',
and 'error_body' attributes to generate an output page. It can
be overridden in a subclass to dynamically generate diagnostics,
choose an appropriate message for the user's preferred language, etc.
Note, however, that it's not recommended from a security perspective to
spit out diagnostics to any old user; ideally, you should have to do
something special to enable diagnostic output, which is why we don't
include any here!
"""
start_response(self.error_status,self.error_headers[:],sys.exc_info())
return [self.error_body]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
"""Override in subclass to buffer data for send to client
It's okay if this method actually transmits the data; BaseHandler
just separates write and flush operations for greater efficiency
when the underlying system actually has such a distinction.
"""
raise NotImplementedError
def _flush(self):
"""Override in subclass to force sending of recent '_write()' calls
It's okay if this method is a no-op (i.e., if '_write()' actually
sends the data.
"""
raise NotImplementedError
def get_stdin(self):
"""Override in subclass to return suitable 'wsgi.input'"""
raise NotImplementedError
def get_stderr(self):
"""Override in subclass to return suitable 'wsgi.errors'"""
raise NotImplementedError
def add_cgi_vars(self):
"""Override in subclass to insert CGI variables in 'self.environ'"""
raise NotImplementedError
class SimpleHandler(BaseHandler):
"""Handler that's just initialized with streams, environment, etc.
This handler subclass is intended for synchronous HTTP/1.0 origin servers,
and handles sending the entire response output, given the correct inputs.
Usage::
handler = SimpleHandler(
inp,out,err,env, multithread=False, multiprocess=True
)
handler.run(app)"""
def __init__(self,stdin,stdout,stderr,environ,
multithread=True, multiprocess=False
):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
class BaseCGIHandler(SimpleHandler):
"""CGI-like systems using input/output/error streams and environ mapping
Usage::
handler = BaseCGIHandler(inp,out,err,env)
handler.run(app)
This handler class is useful for gateway protocols like ReadyExec and
FastCGI, that have usable input/output/error streams and an environment
mapping. It's also the base class for CGIHandler, which just uses
sys.stdin, os.environ, and so on.
The constructor also takes keyword arguments 'multithread' and
'multiprocess' (defaulting to 'True' and 'False' respectively) to control
the configuration sent to the application. It sets 'origin_server' to
False (to enable CGI-like output), and assumes that 'wsgi.run_once' is
False.
"""
origin_server = False
class CGIHandler(BaseCGIHandler):
"""CGI-based invocation via sys.stdin/stdout/stderr and os.environ
Usage::
CGIHandler().run(app)
The difference between this class and BaseCGIHandler is that it always
uses 'wsgi.run_once' of 'True', 'wsgi.multithread' of 'False', and
'wsgi.multiprocess' of 'True'. It does not take any initialization
parameters, but always uses 'sys.stdin', 'os.environ', and friends.
If you need to override any of these parameters, use BaseCGIHandler
instead.
"""
wsgi_run_once = True
# Do not allow os.environ to leak between requests in Google App Engine
# and other multi-run CGI use cases. This is not easily testable.
# See http://bugs.python.org/issue7250
os_environ = {}
def __init__(self):
BaseCGIHandler.__init__(
self, sys.stdin, sys.stdout, sys.stderr, dict(os.environ.items()),
multithread=False, multiprocess=True
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
reframe/__init__.py | # Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import sys
VERSION = '3.11.0-dev.2'
INSTALL_PREFIX = os.path.normpath(
os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
)
MIN_PYTHON_VERSION = (3, 6, 0)
# Check python version
if sys.version_info[:3] < MIN_PYTHON_VERSION:
sys.stderr.write('Unsupported Python version: '
'Python >= %d.%d.%d is required\n' % MIN_PYTHON_VERSION)
sys.exit(1)
os.environ['RFM_INSTALL_PREFIX'] = INSTALL_PREFIX
# Import important names for user tests
from reframe.core.pipeline import * # noqa: F401, F403
from reframe.core.decorators import * # noqa: F401, F403
| []
| []
| [
"RFM_INSTALL_PREFIX"
]
| [] | ["RFM_INSTALL_PREFIX"] | python | 1 | 0 | |
fstest/test_all/run.go | // Run a test
package main
import (
"bytes"
"fmt"
"go/build"
"io"
"log"
"os"
"os/exec"
"path"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/testserver"
)
// Control concurrency per backend if required
var (
oneOnlyMu sync.Mutex
oneOnly = map[string]*sync.Mutex{}
)
// Run holds info about a running test
//
// A run just runs one command line, but it can be run multiple times
// if retries are needed.
type Run struct {
// Config
Remote string // name of the test remote
Backend string // name of the backend
Path string // path to the source directory
FastList bool // add -fast-list to tests
Short bool // add -short
NoRetries bool // don't retry if set
OneOnly bool // only run test for this backend at once
NoBinary bool // set to not build a binary
SizeLimit int64 // maximum test file size
Ignore map[string]struct{}
// Internals
cmdLine []string
cmdString string
try int
err error
output []byte
failedTests []string
runFlag string
logDir string // directory to place the logs
trialName string // name/log file name of current trial
trialNames []string // list of all the trials
}
// Runs records multiple Run objects
type Runs []*Run
// Sort interface
func (rs Runs) Len() int { return len(rs) }
func (rs Runs) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] }
func (rs Runs) Less(i, j int) bool {
a, b := rs[i], rs[j]
if a.Backend < b.Backend {
return true
} else if a.Backend > b.Backend {
return false
}
if a.Remote < b.Remote {
return true
} else if a.Remote > b.Remote {
return false
}
if a.Path < b.Path {
return true
} else if a.Path > b.Path {
return false
}
if !a.FastList && b.FastList {
return true
} else if a.FastList && !b.FastList {
return false
}
return false
}
// dumpOutput prints the error output
func (r *Run) dumpOutput() {
log.Println("------------------------------------------------------------")
log.Printf("---- %q ----", r.cmdString)
log.Println(string(r.output))
log.Println("------------------------------------------------------------")
}
// This converts a slice of test names into a regexp which matches
// them.
func testsToRegexp(tests []string) string {
var split []map[string]struct{}
// Make a slice with maps of the used parts at each level
for _, test := range tests {
for i, name := range strings.Split(test, "/") {
if i >= len(split) {
split = append(split, make(map[string]struct{}))
}
split[i][name] = struct{}{}
}
}
var out []string
for _, level := range split {
var testsInLevel = []string{}
for name := range level {
testsInLevel = append(testsInLevel, name)
}
sort.Strings(testsInLevel)
if len(testsInLevel) > 1 {
out = append(out, "^("+strings.Join(testsInLevel, "|")+")$")
} else {
out = append(out, "^"+testsInLevel[0]+"$")
}
}
return strings.Join(out, "/")
}
var failRe = regexp.MustCompile(`(?m)^\s*--- FAIL: (Test.*?) \(`)
// findFailures looks for all the tests which failed
func (r *Run) findFailures() {
oldFailedTests := r.failedTests
r.failedTests = nil
excludeParents := map[string]struct{}{}
ignored := 0
for _, matches := range failRe.FindAllSubmatch(r.output, -1) {
failedTest := string(matches[1])
// Skip any ignored failures
if _, found := r.Ignore[failedTest]; found {
ignored++
} else {
r.failedTests = append(r.failedTests, failedTest)
}
// Find all the parents of this test
parts := strings.Split(failedTest, "/")
for i := len(parts) - 1; i >= 1; i-- {
excludeParents[strings.Join(parts[:i], "/")] = struct{}{}
}
}
// Exclude the parents
var newTests = r.failedTests[:0]
for _, failedTest := range r.failedTests {
if _, excluded := excludeParents[failedTest]; !excluded {
newTests = append(newTests, failedTest)
}
}
r.failedTests = newTests
if len(r.failedTests) == 0 && ignored > 0 {
log.Printf("%q - Found %d ignored errors only - marking as good", r.cmdString, ignored)
r.err = nil
r.dumpOutput()
return
}
if len(r.failedTests) != 0 {
r.runFlag = testsToRegexp(r.failedTests)
} else {
r.runFlag = ""
}
if r.passed() && len(r.failedTests) != 0 {
log.Printf("%q - Expecting no errors but got: %v", r.cmdString, r.failedTests)
r.dumpOutput()
} else if !r.passed() && len(r.failedTests) == 0 {
log.Printf("%q - Expecting errors but got none: %v", r.cmdString, r.failedTests)
r.dumpOutput()
r.failedTests = oldFailedTests
}
}
// nextCmdLine returns the next command line
func (r *Run) nextCmdLine() []string {
cmdLine := r.cmdLine
if r.runFlag != "" {
cmdLine = append(cmdLine, "-test.run", r.runFlag)
}
return cmdLine
}
// trial runs a single test
func (r *Run) trial() {
cmdLine := r.nextCmdLine()
cmdString := toShell(cmdLine)
msg := fmt.Sprintf("%q - Starting (try %d/%d)", cmdString, r.try, *maxTries)
log.Println(msg)
logName := path.Join(r.logDir, r.trialName)
out, err := os.Create(logName)
if err != nil {
log.Fatalf("Couldn't create log file: %v", err)
}
defer func() {
err := out.Close()
if err != nil {
log.Fatalf("Failed to close log file: %v", err)
}
}()
_, _ = fmt.Fprintln(out, msg)
// Early exit if --try-run
if *dryRun {
log.Printf("Not executing as --dry-run: %v", cmdLine)
_, _ = fmt.Fprintln(out, "--dry-run is set - not running")
return
}
// Start the test server if required
finish, err := testserver.Start(r.Remote)
if err != nil {
log.Printf("%s: Failed to start test server: %v", r.Remote, err)
_, _ = fmt.Fprintf(out, "%s: Failed to start test server: %v\n", r.Remote, err)
r.err = err
return
}
defer finish()
// Internal buffer
var b bytes.Buffer
multiOut := io.MultiWriter(out, &b)
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
cmd.Stderr = multiOut
cmd.Stdout = multiOut
cmd.Dir = r.Path
start := time.Now()
r.err = cmd.Run()
r.output = b.Bytes()
duration := time.Since(start)
r.findFailures()
if r.passed() {
msg = fmt.Sprintf("%q - Finished OK in %v (try %d/%d)", cmdString, duration, r.try, *maxTries)
} else {
msg = fmt.Sprintf("%q - Finished ERROR in %v (try %d/%d): %v: Failed %v", cmdString, duration, r.try, *maxTries, r.err, r.failedTests)
}
log.Println(msg)
_, _ = fmt.Fprintln(out, msg)
}
// passed returns true if the test passed
func (r *Run) passed() bool {
return r.err == nil
}
// GOPATH returns the current GOPATH
func GOPATH() string {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
}
return gopath
}
// BinaryName turns a package name into a binary name
func (r *Run) BinaryName() string {
binary := path.Base(r.Path) + ".test"
if runtime.GOOS == "windows" {
binary += ".exe"
}
return binary
}
// BinaryPath turns a package name into a binary path
func (r *Run) BinaryPath() string {
return path.Join(r.Path, r.BinaryName())
}
// PackagePath returns the path to the package
func (r *Run) PackagePath() string {
return path.Join(GOPATH(), "src", r.Path)
}
// MakeTestBinary makes the binary we will run
func (r *Run) MakeTestBinary() {
binary := r.BinaryPath()
binaryName := r.BinaryName()
log.Printf("%s: Making test binary %q", r.Path, binaryName)
cmdLine := []string{"go", "test", "-c"}
if *dryRun {
log.Printf("Not executing: %v", cmdLine)
return
}
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
cmd.Dir = r.Path
err := cmd.Run()
if err != nil {
log.Fatalf("Failed to make test binary: %v", err)
}
if _, err := os.Stat(binary); err != nil {
log.Fatalf("Couldn't find test binary %q", binary)
}
}
// RemoveTestBinary removes the binary made in makeTestBinary
func (r *Run) RemoveTestBinary() {
if *dryRun {
return
}
binary := r.BinaryPath()
err := os.Remove(binary) // Delete the binary when finished
if err != nil {
log.Printf("Error removing test binary %q: %v", binary, err)
}
}
// Name returns the run name as a file name friendly string
func (r *Run) Name() string {
ns := []string{
r.Backend,
strings.Replace(r.Path, "/", ".", -1),
r.Remote,
}
if r.FastList {
ns = append(ns, "fastlist")
}
ns = append(ns, fmt.Sprintf("%d", r.try))
s := strings.Join(ns, "-")
s = strings.Replace(s, ":", "", -1)
return s
}
// Init the Run
func (r *Run) Init() {
prefix := "-test."
if r.NoBinary {
prefix = "-"
r.cmdLine = []string{"go", "test"}
} else {
r.cmdLine = []string{"./" + r.BinaryName()}
}
r.cmdLine = append(r.cmdLine, prefix+"v", prefix+"timeout", timeout.String(), "-remote", r.Remote)
if *listRetries > 0 {
r.cmdLine = append(r.cmdLine, "-list-retries", fmt.Sprint(*listRetries))
}
r.try = 1
if *verbose {
r.cmdLine = append(r.cmdLine, "-verbose")
fs.Config.LogLevel = fs.LogLevelDebug
}
if *runOnly != "" {
r.cmdLine = append(r.cmdLine, prefix+"run", *runOnly)
}
if r.FastList {
r.cmdLine = append(r.cmdLine, "-fast-list")
}
if r.Short {
r.cmdLine = append(r.cmdLine, "-short")
}
if r.SizeLimit > 0 {
r.cmdLine = append(r.cmdLine, "-size-limit", strconv.FormatInt(r.SizeLimit, 10))
}
r.cmdString = toShell(r.cmdLine)
}
// Logs returns all the log names
func (r *Run) Logs() []string {
return r.trialNames
}
// FailedTests returns the failed tests as a comma separated string, limiting the number
func (r *Run) FailedTests() string {
const maxTests = 5
ts := r.failedTests
if len(ts) > maxTests {
ts = ts[:maxTests:maxTests]
ts = append(ts, fmt.Sprintf("… (%d more)", len(r.failedTests)-maxTests))
}
return strings.Join(ts, ", ")
}
// Run runs all the trials for this test
func (r *Run) Run(logDir string, result chan<- *Run) {
if r.OneOnly {
oneOnlyMu.Lock()
mu := oneOnly[r.Backend]
if mu == nil {
mu = new(sync.Mutex)
oneOnly[r.Backend] = mu
}
oneOnlyMu.Unlock()
mu.Lock()
defer mu.Unlock()
}
r.Init()
r.logDir = logDir
for r.try = 1; r.try <= *maxTries; r.try++ {
r.trialName = r.Name() + ".txt"
r.trialNames = append(r.trialNames, r.trialName)
log.Printf("Starting run with log %q", r.trialName)
r.trial()
if r.passed() || r.NoRetries {
break
}
}
if !r.passed() {
r.dumpOutput()
}
result <- r
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go | package imagebuildah
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/containers/buildah"
"github.com/containers/buildah/copier"
"github.com/containers/buildah/define"
buildahdocker "github.com/containers/buildah/docker"
"github.com/containers/buildah/internal"
"github.com/containers/buildah/pkg/rusage"
"github.com/containers/buildah/util"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/chrootarchive"
docker "github.com/fsouza/go-dockerclient"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/parser"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// StageExecutor bundles up what we need to know when executing one stage of a
// (possibly multi-stage) build.
// Each stage may need to produce an image to be used as the base in a later
// stage (with the last stage's image being the end product of the build), and
// it may need to leave its working container in place so that the container's
// root filesystem's contents can be used as the source for a COPY instruction
// in a later stage.
// Each stage has its own base image, so it starts with its own configuration
// and set of volumes.
// If we're naming the result of the build, only the last stage will apply that
// name to the image that it produces.
type StageExecutor struct {
ctx context.Context
executor *Executor
log func(format string, args ...interface{})
index int
stages imagebuilder.Stages
name string
builder *buildah.Builder
preserved int
volumes imagebuilder.VolumeSet
volumeCache map[string]string
volumeCacheInfo map[string]os.FileInfo
mountPoint string
output string
containerIDs []string
stage *imagebuilder.Stage
}
// Preserve informs the stage executor that from this point on, it needs to
// ensure that only COPY and ADD instructions can modify the contents of this
// directory or anything below it.
// The StageExecutor handles this by caching the contents of directories which
// have been marked this way before executing a RUN instruction, invalidating
// that cache when an ADD or COPY instruction sets any location under the
// directory as the destination, and using the cache to reset the contents of
// the directory tree after processing each RUN instruction.
// It would be simpler if we could just mark the directory as a read-only bind
// mount of itself during Run(), but the directory is expected to be remain
// writeable while the RUN instruction is being handled, even if any changes
// made within the directory are ultimately discarded.
func (s *StageExecutor) Preserve(path string) error {
logrus.Debugf("PRESERVE %q", path)
if s.volumes.Covers(path) {
// This path is already a subdirectory of a volume path that
// we're already preserving, so there's nothing new to be done
// except ensure that it exists.
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return errors.Wrapf(err, "error ensuring volume path exists")
}
if err := s.volumeCacheInvalidate(path); err != nil {
return errors.Wrapf(err, "error ensuring volume path %q is preserved", filepath.Join(s.mountPoint, path))
}
return nil
}
// Figure out where the cache for this volume would be stored.
s.preserved++
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
if err != nil {
return errors.Errorf("unable to locate temporary directory for container")
}
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
// Save info about the top level of the location that we'll be archiving.
var archivedPath string
// Try and resolve the symlink (if one exists)
// Set archivedPath and path based on whether a symlink is found or not
if evaluated, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, path), copier.EvalOptions{}); err == nil {
symLink, err := filepath.Rel(s.mountPoint, evaluated)
if err != nil {
return errors.Wrapf(err, "making evaluated path %q relative to %q", evaluated, s.mountPoint)
}
if strings.HasPrefix(symLink, ".."+string(os.PathSeparator)) {
return errors.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint)
}
archivedPath = evaluated
path = string(os.PathSeparator) + symLink
} else {
return errors.Wrapf(err, "error evaluating path %q", path)
}
st, err := os.Stat(archivedPath)
if os.IsNotExist(err) {
createdDirPerms := os.FileMode(0755)
if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return errors.Wrapf(err, "error ensuring volume path exists")
}
st, err = os.Stat(archivedPath)
}
if err != nil {
logrus.Debugf("error reading info about %q: %v", archivedPath, err)
return err
}
s.volumeCacheInfo[path] = st
if !s.volumes.Add(path) {
// This path is not a subdirectory of a volume path that we're
// already preserving, so adding it to the list should work.
return errors.Errorf("error adding %q to the volume cache", path)
}
s.volumeCache[path] = cacheFile
// Now prune cache files for volumes that are now supplanted by this one.
removed := []string{}
for cachedPath := range s.volumeCache {
// Walk our list of cached volumes, and check that they're
// still in the list of locations that we need to cache.
found := false
for _, volume := range s.volumes {
if volume == cachedPath {
// We need to keep this volume's cache.
found = true
break
}
}
if !found {
// We don't need to keep this volume's cache. Make a
// note to remove it.
removed = append(removed, cachedPath)
}
}
// Actually remove the caches that we decided to remove.
for _, cachedPath := range removed {
archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("no longer need cache of %q in %q", archivedPath, s.volumeCache[cachedPath])
if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
if os.IsNotExist(err) {
continue
}
return err
}
delete(s.volumeCache, cachedPath)
}
return nil
}
// Remove any volume cache item which will need to be re-saved because we're
// writing to part of it.
func (s *StageExecutor) volumeCacheInvalidate(path string) error {
invalidated := []string{}
for cachedPath := range s.volumeCache {
if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) {
invalidated = append(invalidated, cachedPath)
}
}
for _, cachedPath := range invalidated {
if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
if os.IsNotExist(err) {
continue
}
return err
}
archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("invalidated volume cache %q for %q from %q", archivedPath, path, s.volumeCache[cachedPath])
}
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
return nil, errors.Wrapf(err, "error evaluating volume path")
}
relativePath, err := filepath.Rel(s.mountPoint, archivedPath)
if err != nil {
return nil, errors.Wrapf(err, "error converting %q into a path relative to %q", archivedPath, s.mountPoint)
}
if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) {
return nil, errors.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint)
}
_, err = os.Stat(cacheFile)
if err == nil {
logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile)
continue
}
if !os.IsNotExist(err) {
return nil, err
}
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return nil, errors.Wrapf(err, "error ensuring volume path exists")
}
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
cache, err := os.Create(cacheFile)
if err != nil {
return nil, err
}
defer cache.Close()
rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
if err != nil {
return nil, errors.Wrapf(err, "error archiving %q", archivedPath)
}
defer rc.Close()
_, err = io.Copy(cache, rc)
if err != nil {
return nil, errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile)
}
mount := specs.Mount{
Source: archivedPath,
Destination: string(os.PathSeparator) + relativePath,
Type: "bind",
Options: []string{"private"},
}
mounts = append(mounts, mount)
}
return nil, nil
}
// Restore the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
return errors.Wrapf(err, "error evaluating volume path")
}
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
if err != nil {
return err
}
defer cache.Close()
if err := copier.Remove(s.mountPoint, archivedPath, copier.RemoveOptions{All: true}); err != nil {
return err
}
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return err
}
err = chrootarchive.Untar(cache, archivedPath, nil)
if err != nil {
return errors.Wrapf(err, "error extracting archive at %q", archivedPath)
}
if st, ok := s.volumeCacheInfo[cachedPath]; ok {
if err := os.Chmod(archivedPath, st.Mode()); err != nil {
return err
}
uid := 0
gid := 0
if st.Sys() != nil {
uid = util.UID(st)
gid = util.GID(st)
}
if err := os.Chown(archivedPath, uid, gid); err != nil {
return err
}
if err := os.Chtimes(archivedPath, st.ModTime(), st.ModTime()); err != nil {
return err
}
}
}
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err error) {
for cachedPath := range s.volumeCache {
err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{})
if err != nil {
return nil, errors.Wrapf(err, "ensuring volume exists")
}
volumePath := filepath.Join(s.mountPoint, cachedPath)
mount := specs.Mount{
Source: volumePath,
Destination: cachedPath,
Options: []string{"O", "private"},
}
mounts = append(mounts, mount)
}
return mounts, nil
}
// Reset the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestoreOverlay() error {
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSave() (mounts []specs.Mount, err error) {
switch s.executor.store.GraphDriverName() {
case "overlay":
return s.volumeCacheSaveOverlay()
}
return s.volumeCacheSaveVFS()
}
// Reset the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestore() error {
switch s.executor.store.GraphDriverName() {
case "overlay":
return s.volumeCacheRestoreOverlay()
}
return s.volumeCacheRestoreVFS()
}
// Copy copies data into the working tree. The "Download" field is how
// imagebuilder tells us the instruction was "ADD" and not "COPY".
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
s.builder.ContentDigester.Restart()
for _, copy := range copies {
if copy.Download {
logrus.Debugf("ADD %#v, %#v", excludes, copy)
} else {
logrus.Debugf("COPY %#v, %#v", excludes, copy)
}
if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
return err
}
var sources []string
// The From field says to read the content from another
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
var idMappingOptions *define.IDMappingOptions
var copyExcludes []string
stripSetuid := false
stripSetgid := false
preserveOwnership := false
contextDir := s.executor.contextDir
if len(copy.From) > 0 {
// If from has an argument within it, resolve it to its
// value. Otherwise just return the value found.
from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
if fromErr != nil {
return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From)
}
if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return err
}
if other, ok := s.executor.stages[from]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[copy.From]; ok {
contextDir = builder.MountPoint
idMappingOptions = &builder.IDMappingOptions
} else {
return errors.Errorf("the stage %q has not been built", copy.From)
}
preserveOwnership = true
copyExcludes = excludes
} else {
copyExcludes = append(s.executor.excludes, excludes...)
stripSetuid = true // did this change between 18.06 and 19.03?
stripSetgid = true // did this change between 18.06 and 19.03?
}
for _, src := range copy.Src {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// Source is a URL, allowed for ADD but not COPY.
if copy.Download {
sources = append(sources, src)
} else {
// returns an error to be compatible with docker
return errors.Errorf("source can't be a URL for COPY")
}
} else {
sources = append(sources, filepath.Join(contextDir, src))
}
}
options := buildah.AddAndCopyOptions{
Chmod: copy.Chmod,
Chown: copy.Chown,
PreserveOwnership: preserveOwnership,
ContextDir: contextDir,
Excludes: copyExcludes,
IgnoreFile: s.executor.ignoreFile,
IDMappingOptions: idMappingOptions,
StripSetuidBit: stripSetuid,
StripSetgidBit: stripSetgid,
}
if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
return err
}
}
return nil
}
// Returns a map of StageName/ImageName:internal.StageMountDetails for RunOpts if any --mount with from is provided
// Stage can automatically cleanup this mounts when a stage is removed
// check if RUN contains `--mount` with `from`. If yes pre-mount images or stages from executor for Run.
// stages mounted here will we used be Run().
func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) {
stageMountPoints := make(map[string]internal.StageMountDetails)
for _, flag := range mountList {
if strings.Contains(flag, "from") {
arr := strings.SplitN(flag, ",", 2)
if len(arr) < 2 {
return nil, errors.Errorf("Invalid --mount command: %s", flag)
}
tokens := strings.Split(arr[1], ",")
for _, val := range tokens {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "from":
if len(kv) == 1 {
return nil, errors.Errorf("unable to resolve argument for `from=`: bad argument")
}
if kv[1] == "" {
return nil, errors.Errorf("unable to resolve argument for `from=`: from points to an empty value")
}
from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments())
if fromErr != nil {
return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1])
}
// If the source's name corresponds to the
// result of an earlier stage, wait for that
// stage to finish being built.
if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return nil, err
}
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
stageMountPoints[from] = internal.StageMountDetails{IsStage: true, MountPoint: otherStage.mountPoint}
break
} else {
mountPoint, err := s.getImageRootfs(s.ctx, from)
if err != nil {
return nil, errors.Errorf("%s from=%s: no stage or image found with that name", flag, from)
}
stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint}
break
}
default:
continue
}
}
}
}
return stageMountPoints, nil
}
// Run executes a RUN instruction using the stage's current working container
// as a root directory.
func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
logrus.Debugf("RUN %#v, %#v", run, config)
stageMountPoints, err := s.runStageMountPoints(run.Mounts)
if err != nil {
return err
}
if s.builder == nil {
return errors.Errorf("no build container available")
}
stdin := s.executor.in
if stdin == nil {
devNull, err := os.Open(os.DevNull)
if err != nil {
return errors.Errorf("error opening %q for reading: %v", os.DevNull, err)
}
defer devNull.Close()
stdin = devNull
}
options := buildah.RunOptions{
Logger: s.executor.logger,
Hostname: config.Hostname,
Runtime: s.executor.runtime,
Args: s.executor.runtimeArgs,
NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "",
Mounts: append([]Mount{}, s.executor.transientMounts...),
Env: config.Env,
User: config.User,
WorkingDir: config.WorkingDir,
Entrypoint: config.Entrypoint,
ContextDir: s.executor.contextDir,
Cmd: config.Cmd,
Stdin: stdin,
Stdout: s.executor.out,
Stderr: s.executor.err,
Quiet: s.executor.quiet,
NamespaceOptions: s.executor.namespaceOptions,
Terminal: buildah.WithoutTerminal,
Secrets: s.executor.secrets,
SSHSources: s.executor.sshsources,
RunMounts: run.Mounts,
StageMountPoints: stageMountPoints,
SystemContext: s.executor.systemContext,
}
if config.NetworkDisabled {
options.ConfigureNetwork = buildah.NetworkDisabled
} else {
options.ConfigureNetwork = buildah.NetworkEnabled
}
args := run.Args
if run.Shell {
if len(config.Shell) > 0 && s.builder.Format == define.Dockerv2ImageManifest {
args = append(config.Shell, args...)
} else {
args = append([]string{"/bin/sh", "-c"}, args...)
}
}
mounts, err := s.volumeCacheSave()
if err != nil {
return err
}
options.Mounts = append(options.Mounts, mounts...)
err = s.builder.Run(args, options)
if err2 := s.volumeCacheRestore(); err2 != nil {
if err == nil {
return err2
}
}
return err
}
// UnrecognizedInstruction is called when we encounter an instruction that the
// imagebuilder parser didn't understand.
func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", strings.ToUpper(step.Command))
err := fmt.Sprintf(errStr+"%#v", step)
if s.executor.ignoreUnrecognizedInstructions {
logrus.Debugf(err)
return nil
}
switch logrus.GetLevel() {
case logrus.ErrorLevel:
s.executor.logger.Errorf(errStr)
case logrus.DebugLevel:
logrus.Debugf(err)
default:
s.executor.logger.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
}
return errors.Errorf(err)
}
// prepare creates a working container based on the specified image, or if one
// isn't specified, the first argument passed to the first FROM instruction we
// can find in the stage's parsed tree.
func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBConfig, rebase bool, pullPolicy define.PullPolicy) (builder *buildah.Builder, err error) {
stage := s.stage
ib := stage.Builder
node := stage.Node
if from == "" {
base, err := ib.From(node)
if err != nil {
logrus.Debugf("prepare(node.Children=%#v)", node.Children)
return nil, errors.Wrapf(err, "error determining starting point for build")
}
from = base
}
displayFrom := from
// stage.Name will be a numeric string for all stages without an "AS" clause
asImageName := stage.Name
if asImageName != "" {
if _, err := strconv.Atoi(asImageName); err != nil {
displayFrom = from + " AS " + asImageName
}
}
if initializeIBConfig && rebase {
logrus.Debugf("FROM %#v", displayFrom)
if !s.executor.quiet {
s.log("FROM %s", displayFrom)
}
}
builderOptions := buildah.BuilderOptions{
Args: ib.Args,
FromImage: from,
PullPolicy: pullPolicy,
ContainerSuffix: s.executor.containerSuffix,
Registry: s.executor.registry,
BlobDirectory: s.executor.blobDirectory,
SignaturePolicyPath: s.executor.signaturePolicyPath,
ReportWriter: s.executor.reportWriter,
SystemContext: s.executor.systemContext,
Isolation: s.executor.isolation,
NamespaceOptions: s.executor.namespaceOptions,
ConfigureNetwork: s.executor.configureNetwork,
CNIPluginPath: s.executor.cniPluginPath,
CNIConfigDir: s.executor.cniConfigDir,
NetworkInterface: s.executor.networkInterface,
IDMappingOptions: s.executor.idmappingOptions,
CommonBuildOpts: s.executor.commonBuildOptions,
DefaultMountsFilePath: s.executor.defaultMountsFilePath,
Format: s.executor.outputFormat,
Capabilities: s.executor.capabilities,
Devices: s.executor.devices,
MaxPullRetries: s.executor.maxPullPushRetries,
PullRetryDelay: s.executor.retryPullPushDelay,
OciDecryptConfig: s.executor.ociDecryptConfig,
Logger: s.executor.logger,
ProcessLabel: s.executor.processLabel,
MountLabel: s.executor.mountLabel,
}
builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
if err != nil {
return nil, errors.Wrapf(err, "error creating build container")
}
// If executor's ProcessLabel and MountLabel is empty means this is the first stage
// Make sure we share first stage's ProcessLabel and MountLabel with all other subsequent stages
// Doing this will ensure and one stage in same build can mount another stage even if `selinux`
// is enabled.
if s.executor.mountLabel == "" && s.executor.processLabel == "" {
s.executor.mountLabel = builder.MountLabel
s.executor.processLabel = builder.ProcessLabel
}
if initializeIBConfig {
volumes := map[string]struct{}{}
for _, v := range builder.Volumes() {
volumes[v] = struct{}{}
}
ports := map[docker.Port]struct{}{}
for _, p := range builder.Ports() {
ports[docker.Port(p)] = struct{}{}
}
dConfig := docker.Config{
Hostname: builder.Hostname(),
Domainname: builder.Domainname(),
User: builder.User(),
Env: builder.Env(),
Cmd: builder.Cmd(),
Image: from,
Volumes: volumes,
WorkingDir: builder.WorkDir(),
Entrypoint: builder.Entrypoint(),
Labels: builder.Labels(),
Shell: builder.Shell(),
StopSignal: builder.StopSignal(),
OnBuild: builder.OnBuild(),
ExposedPorts: ports,
}
var rootfs *docker.RootFS
if builder.Docker.RootFS != nil {
rootfs = &docker.RootFS{
Type: builder.Docker.RootFS.Type,
}
for _, id := range builder.Docker.RootFS.DiffIDs {
rootfs.Layers = append(rootfs.Layers, id.String())
}
}
dImage := docker.Image{
Parent: builder.FromImage,
ContainerConfig: dConfig,
Container: builder.Container,
Author: builder.Maintainer(),
Architecture: builder.Architecture(),
RootFS: rootfs,
}
dImage.Config = &dImage.ContainerConfig
err = ib.FromImage(&dImage, node)
if err != nil {
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to update: %v", err2)
}
return nil, errors.Wrapf(err, "error updating build context")
}
}
mountPoint, err := builder.Mount(builder.MountLabel)
if err != nil {
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to mount: %v", err2)
}
return nil, errors.Wrapf(err, "error mounting new container")
}
if rebase {
// Make this our "current" working container.
s.mountPoint = mountPoint
s.builder = builder
}
logrus.Debugln("Container ID:", builder.ContainerID)
return builder, nil
}
// Delete deletes the stage's working container, if we have one.
func (s *StageExecutor) Delete() (err error) {
if s.builder != nil {
err = s.builder.Delete()
s.builder = nil
}
return err
}
// stepRequiresLayer indicates whether or not the step should be followed by
// committing a layer container when creating an intermediate image.
func (*StageExecutor) stepRequiresLayer(step *imagebuilder.Step) bool {
switch strings.ToUpper(step.Command) {
case "ADD", "COPY", "RUN":
return true
}
return false
}
// getImageRootfs checks for an image matching the passed-in name in local
// storage. If it isn't found, it pulls down a copy. Then, if we don't have a
// working container root filesystem based on the image, it creates one. Then
// it returns that root filesystem's location.
func (s *StageExecutor) getImageRootfs(ctx context.Context, image string) (mountPoint string, err error) {
if builder, ok := s.executor.containerMap[image]; ok {
return builder.MountPoint, nil
}
builder, err := s.prepare(ctx, image, false, false, s.executor.pullPolicy)
if err != nil {
return "", err
}
s.executor.containerMap[image] = builder
return builder.MountPoint, nil
}
// Execute runs each of the steps in the stage's parsed tree, in turn.
func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, err error) {
var resourceUsage rusage.Rusage
stage := s.stage
ib := stage.Builder
checkForLayers := s.executor.layers && s.executor.useCache
moreStages := s.index < len(s.stages)-1
lastStage := !moreStages
imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)])
rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)])
// If the base image's name corresponds to the result of an earlier
// stage, make sure that stage has finished building an image, and
// substitute that image's ID for the base image's name here and force
// the pull policy to "never" to avoid triggering an error when it's
// set to "always", which doesn't make sense for image IDs.
// If not, then go on assuming that it's just a regular image that's
// either in local storage, or one that we have to pull from a
// registry, subject to the passed-in pull policy.
if isStage, err := s.executor.waitForStage(ctx, base, s.stages[:s.index]); isStage && err != nil {
return "", nil, err
}
pullPolicy := s.executor.pullPolicy
s.executor.stagesLock.Lock()
if stageImage, isPreviousStage := s.executor.imageMap[base]; isPreviousStage {
base = stageImage
pullPolicy = define.PullNever
}
s.executor.stagesLock.Unlock()
// Set things up so that we can log resource usage as we go.
logRusage := func() {
if rusage.Supported() {
usage, err := rusage.Get()
if err != nil {
fmt.Fprintf(s.executor.out, "error gathering resource usage information: %v\n", err)
return
}
if s.executor.rusageLogFile != nil {
fmt.Fprintf(s.executor.rusageLogFile, "%s\n", rusage.FormatDiff(usage.Subtract(resourceUsage)))
}
resourceUsage = usage
}
}
// Start counting resource usage before we potentially pull a base image.
if rusage.Supported() {
if resourceUsage, err = rusage.Get(); err != nil {
return "", nil, err
}
// Log the final incremental resource usage counter before we return.
defer logRusage()
}
// Create the (first) working container for this stage. Reinitializing
// the imagebuilder configuration may alter the list of steps we have,
// so take a snapshot of them *after* that.
if _, err := s.prepare(ctx, base, true, true, pullPolicy); err != nil {
return "", nil, err
}
children := stage.Node.Children
// A helper function to only log "COMMIT" as an explicit step if it's
// the very last step of a (possibly multi-stage) build.
logCommit := func(output string, instruction int) {
moreInstructions := instruction < len(children)-1
if moreInstructions || moreStages {
return
}
commitMessage := "COMMIT"
if output != "" {
commitMessage = fmt.Sprintf("%s %s", commitMessage, output)
}
logrus.Debugf(commitMessage)
if !s.executor.quiet {
s.log(commitMessage)
}
}
logCacheHit := func(cacheID string) {
if !s.executor.quiet {
cacheHitMessage := "--> Using cache"
fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, cacheID)
}
}
logImageID := func(imgID string) {
if len(imgID) > 11 {
imgID = imgID[0:11]
}
if s.executor.iidfile == "" {
fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
}
}
if len(children) == 0 {
// There are no steps.
if s.builder.FromImageID == "" || s.executor.squash {
// We either don't have a base image, or we need to
// squash the contents of the base image. Whichever is
// the case, we need to commit() to create a new image.
logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output); err != nil {
return "", nil, errors.Wrapf(err, "error committing base container")
}
} else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 {
// The image would be modified by the labels passed
// via the command line, so we need to commit.
logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output); err != nil {
return "", nil, err
}
} else {
// We don't need to squash the base image, and the
// image wouldn't be modified by the command line
// options, so just reuse the base image.
logCommit(s.output, -1)
if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
return "", nil, err
}
}
logImageID(imgID)
}
for i, node := range children {
logRusage()
moreInstructions := i < len(children)-1
lastInstruction := !moreInstructions
// Resolve any arguments in this instruction.
step := ib.Step()
if err := step.Resolve(node); err != nil {
return "", nil, errors.Wrapf(err, "error resolving step %+v", *node)
}
logrus.Debugf("Parsed Step: %+v", *step)
if !s.executor.quiet {
s.log("%s", step.Original)
}
// Check if there's a --from if the step command is COPY.
// Also check the chmod and the chown flags for validity.
for _, flag := range step.Flags {
command := strings.ToUpper(step.Command)
// chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from='
if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") {
return "", nil, errors.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
}
if command == "ADD" && (flag == "--chmod" || flag == "--chown") {
return "", nil, errors.Errorf("ADD only supports the --chmod=<permissions> and the --chown=<uid:gid> flags")
}
if strings.Contains(flag, "--from") && command == "COPY" {
arr := strings.Split(flag, "=")
if len(arr) != 2 {
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
// If arr[1] has an argument within it, resolve it to its
// value. Otherwise just return the value found.
from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
if fromErr != nil {
return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1])
}
// If the source's name corresponds to the
// result of an earlier stage, wait for that
// stage to finish being built.
if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
return "", nil, err
}
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
break
} else if _, err = s.getImageRootfs(ctx, from); err != nil {
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
}
break
}
}
// Determine if there are any RUN instructions to be run after
// this step. If not, we won't have to bother preserving the
// contents of any volumes declared between now and when we
// finish.
noRunsRemaining := false
if moreInstructions {
noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]})
}
// If we're doing a single-layer build, just process the
// instruction.
if !s.executor.layers {
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
// In case we added content, retrieve its digest.
addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
addedContentSummary := addedContentType
if addedContentDigest != "" {
if addedContentSummary != "" {
addedContentSummary = addedContentSummary + ":"
}
addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
logrus.Debugf("added content %s", addedContentSummary)
}
if moreInstructions {
// There are still more instructions to process
// for this stage. Make a note of the
// instruction in the history that we'll write
// for the image when we eventually commit it.
timestamp := time.Now().UTC()
if s.executor.timestamp != nil {
timestamp = *s.executor.timestamp
}
s.builder.AddPrependedEmptyLayer(×tamp, s.getCreatedBy(node, addedContentSummary), "", "")
continue
} else {
// This is the last instruction for this stage,
// so we should commit this container to create
// an image, but only if it's the last stage,
// or if it's used as the basis for a later
// stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
logImageID(imgID)
} else {
imgID = ""
}
break
}
}
// We're in a multi-layered build.
var (
commitName string
cacheID string
err error
rebase bool
addedContentSummary string
)
// If we have to commit for this instruction, only assign the
// stage's configured output name to the last layer.
if lastInstruction {
commitName = s.output
}
// Check if there's already an image based on our parent that
// has the same change that we're about to make, so far as we
// can tell.
// Only do this if the step we are on is not an ARG step,
// we need to call ib.Run() to correctly put the args together before
// determining if a cached layer with the same build args already exists
// and that is done in the if block below.
if checkForLayers && step.Command != "arg" {
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
}
// If we didn't find a cache entry, or we need to add content
// to find the digest of the content to check for a cached
// image, run the step so that we can check if the result
// matches a cache.
if cacheID == "" {
// Process the instruction directly.
if err = ib.Run(step, s, noRunsRemaining); err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
// In case we added content, retrieve its digest.
addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
addedContentSummary = addedContentType
if addedContentDigest != "" {
if addedContentSummary != "" {
addedContentSummary = addedContentSummary + ":"
}
addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
logrus.Debugf("added content %s", addedContentSummary)
}
// Check if there's already an image based on our parent that
// has the same change that we just made.
if checkForLayers {
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
}
} else {
// If the instruction would affect our configuration,
// process the configuration change so that, if we fall
// off the cache path, the filesystem changes from the
// last cache image will be all that we need, since we
// still don't want to restart using the image's
// configuration blob.
if !s.stepRequiresLayer(step) {
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
}
}
// We want to save history for other layers during a squashed build.
// Toggle flag allows executor to treat other instruction and layers
// as regular builds and only perform squashing at last
squashToggle := false
// Note: If the build has squash, we must try to re-use as many layers as possible if cache is found.
// So only perform commit if its the lastInstruction of lastStage.
if cacheID != "" {
logCacheHit(cacheID)
// A suitable cached image was found, so we can just
// reuse it. If we need to add a name to the resulting
// image because it's the last step in this stage, add
// the name to the image.
imgID = cacheID
if commitName != "" {
logCommit(commitName, i)
if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
return "", nil, err
}
}
} else {
if s.executor.squash {
// We want to save history for other layers during a squashed build.
// squashToggle flag allows executor to treat other instruction and layers
// as regular builds and only perform squashing at last
s.executor.squash = false
squashToggle = true
}
// We're not going to find any more cache hits, so we
// can stop looking for them.
checkForLayers = false
// Create a new image, maybe with a new layer, with the
// name for this stage if it's the last instruction.
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
}
// Perform final squash for this build as we are one the,
// last instruction of last stage
if (s.executor.squash || squashToggle) && lastInstruction && lastStage {
s.executor.squash = true
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step)
}
}
logImageID(imgID)
// Update our working container to be based off of the cached
// image, if we might need to use it as a basis for the next
// instruction, or if we need the root filesystem to match the
// image contents for the sake of a later stage that wants to
// copy content from it.
rebase = moreInstructions || rootfsIsUsedLater
if rebase {
// Since we either committed the working container or
// are about to replace it with one based on a cached
// image, add the current working container's ID to the
// list of successful intermediate containers that
// we'll clean up later.
s.containerIDs = append(s.containerIDs, s.builder.ContainerID)
// Prepare for the next step or subsequent phases by
// creating a new working container with the
// just-committed or updated cached image as its new
// base image.
// Enforce pull "never" since we already have an image
// ID that we really should not be pulling anymore (see
// containers/podman/issues/10307).
if _, err := s.prepare(ctx, imgID, false, true, define.PullNever); err != nil {
return "", nil, errors.Wrap(err, "error preparing container for next step")
}
}
}
return imgID, ref, nil
}
func historyEntriesEqual(base, derived v1.History) bool {
if base.CreatedBy != derived.CreatedBy {
return false
}
if base.Comment != derived.Comment {
return false
}
if base.Author != derived.Author {
return false
}
if base.EmptyLayer != derived.EmptyLayer {
return false
}
if base.Created != nil && derived.Created == nil {
return false
}
if base.Created == nil && derived.Created != nil {
return false
}
if base.Created != nil && derived.Created != nil && !base.Created.Equal(*derived.Created) {
return false
}
return true
}
// historyAndDiffIDsMatch returns true if a candidate history matches the
// history of our base image (if we have one), plus the current instruction,
// and if the list of diff IDs for the images do for the part of the history
// that we're comparing.
// Used to verify whether a cache of the intermediate image exists and whether
// to run the build again.
func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool) bool {
// our history should be as long as the base's, plus one entry for what
// we're doing
if len(history) != len(baseHistory)+1 {
return false
}
// check that each entry in the base history corresponds to an entry in
// our history, and count how many of them add a layer diff
expectedDiffIDs := 0
for i := range baseHistory {
if !historyEntriesEqual(baseHistory[i], history[i]) {
return false
}
if !baseHistory[i].EmptyLayer {
expectedDiffIDs++
}
}
if len(baseDiffIDs) != expectedDiffIDs {
return false
}
if buildAddsLayer {
// we're adding a layer, so we should have exactly one more
// layer than the base image
if len(diffIDs) != expectedDiffIDs+1 {
return false
}
} else {
// we're not adding a layer, so we should have exactly the same
// layers as the base image
if len(diffIDs) != expectedDiffIDs {
return false
}
}
// compare the diffs for the layers that we should have in common
for i := range baseDiffIDs {
if diffIDs[i] != baseDiffIDs[i] {
return false
}
}
return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentSummary)
}
// getCreatedBy returns the command the image at node will be created by. If
// the passed-in CompositeDigester is not nil, it is assumed to have the digest
// information for the content if the node is ADD or COPY.
func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string) string {
if node == nil {
return "/bin/sh"
}
switch strings.ToUpper(node.Value) {
case "ARG":
buildArgs := s.getBuildArgsKey()
return "/bin/sh -c #(nop) ARG " + buildArgs
case "RUN":
buildArgs := s.getBuildArgsResolvedForRun()
if buildArgs != "" {
return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:]
}
return "/bin/sh -c " + node.Original[4:]
case "ADD", "COPY":
destination := node
for destination.Next != nil {
destination = destination.Next
}
return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentSummary + " in " + destination.Value + " "
default:
return "/bin/sh -c #(nop) " + node.Original
}
}
// getBuildArgs returns a string of the build-args specified during the build process
// it excludes any build-args that were not used in the build process
// values for args are overridden by the values specified using ENV.
// Reason: Values from ENV will always override values specified arg.
func (s *StageExecutor) getBuildArgsResolvedForRun() string {
var envs []string
configuredEnvs := make(map[string]string)
dockerConfig := s.stage.Builder.Config()
for _, env := range dockerConfig.Env {
splitv := strings.SplitN(env, "=", 2)
if len(splitv) == 2 {
configuredEnvs[splitv[0]] = splitv[1]
}
}
for key, value := range s.stage.Builder.Args {
if _, ok := s.stage.Builder.AllowedArgs[key]; ok {
// if value was in image it will be given higher priority
// so please embed that into build history
_, inImage := configuredEnvs[key]
if inImage {
envs = append(envs, fmt.Sprintf("%s=%s", key, configuredEnvs[key]))
} else {
envs = append(envs, fmt.Sprintf("%s=%s", key, value))
}
}
}
sort.Strings(envs)
return strings.Join(envs, " ")
}
// getBuildArgs key returns set args are key which were specified during the build process
// following function will be exclusively used by build history
func (s *StageExecutor) getBuildArgsKey() string {
var envs []string
for key := range s.stage.Builder.Args {
if _, ok := s.stage.Builder.AllowedArgs[key]; ok {
envs = append(envs, key)
}
}
sort.Strings(envs)
return strings.Join(envs, " ")
}
// tagExistingImage adds names to an image already in the store
func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) {
// If we don't need to attach a name to the image, just return the cache ID.
if output == "" {
return cacheID, nil, nil
}
// Get the destination image reference.
dest, err := s.executor.resolveNameToImageRef(output)
if err != nil {
return "", nil, err
}
policyContext, err := util.GetPolicyContext(s.executor.systemContext)
if err != nil {
return "", nil, err
}
defer func() {
if destroyErr := policyContext.Destroy(); destroyErr != nil {
if err == nil {
err = destroyErr
} else {
err = errors.Wrap(err, destroyErr.Error())
}
}
}()
// Look up the source image, expecting it to be in local storage
src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID)
if err != nil {
return "", nil, errors.Wrapf(err, "error getting source imageReference for %q", cacheID)
}
options := cp.Options{
RemoveSignatures: true, // more like "ignore signatures", since they don't get removed when src and dest are the same image
}
manifestBytes, err := cp.Image(ctx, policyContext, dest, src, &options)
if err != nil {
return "", nil, errors.Wrapf(err, "error copying image %q", cacheID)
}
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
return "", nil, errors.Wrapf(err, "error computing digest of manifest for image %q", cacheID)
}
img, err := is.Transport.GetStoreImage(s.executor.store, dest)
if err != nil {
return "", nil, errors.Wrapf(err, "error locating new copy of image %q (i.e., %q)", cacheID, transports.ImageName(dest))
}
var ref reference.Canonical
if dref := dest.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q (i.e., %q)", cacheID, transports.ImageName(dest))
}
}
return img.ID, ref, nil
}
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
// It verifies this by checking the parent of the top layer of the image and the history.
func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
// Get the list of images available in the image store
images, err := s.executor.store.Images()
if err != nil {
return "", errors.Wrap(err, "error getting image list from store")
}
var baseHistory []v1.History
var baseDiffIDs []digest.Digest
if s.builder.FromImageID != "" {
_, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID)
}
}
for _, image := range images {
var imageTopLayer *storage.Layer
var imageParentLayerID string
if image.TopLayer != "" {
imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
if err != nil {
return "", errors.Wrapf(err, "error getting top layer info")
}
// Figure out which layer from this image we should
// compare our container's base layer to.
imageParentLayerID = imageTopLayer.ID
// If we haven't added a layer here, then our base
// layer should be the same as the image's layer. If
// did add a layer, then our base layer should be the
// same as the parent of the image's layer.
if buildAddsLayer {
imageParentLayerID = imageTopLayer.Parent
}
}
// If the parent of the top layer of an image is equal to the current build image's top layer,
// it means that this image is potentially a cached intermediate image from a previous
// build.
if s.builder.TopLayer != imageParentLayerID {
continue
}
// Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build.
manifestType, history, diffIDs, err := s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, image.ID)
if err != nil {
// It's possible that this image is for another architecture, which results
// in a custom-crafted error message that we'd have to use substring matching
// to recognize. Instead, ignore the image.
logrus.Debugf("error getting history of %q (%v), ignoring it", image.ID, err)
continue
}
// If this candidate isn't of the type that we're building, then it may have lost
// some format-specific information that a building-without-cache run wouldn't lose.
if manifestType != s.executor.outputFormat {
continue
}
// children + currNode is the point of the Dockerfile we are currently at.
if s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer) {
return image.ID, nil
}
}
return "", nil
}
// commit writes the container's contents to an image, using a passed-in tag as
// the name if there is one, generating a unique ID-based one otherwise.
func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) {
ib := s.stage.Builder
var imageRef types.ImageReference
if output != "" {
imageRef2, err := s.executor.resolveNameToImageRef(output)
if err != nil {
return "", nil, err
}
imageRef = imageRef2
}
if ib.Author != "" {
s.builder.SetMaintainer(ib.Author)
}
config := ib.Config()
if createdBy != "" {
s.builder.SetCreatedBy(createdBy)
}
s.builder.SetHostname(config.Hostname)
s.builder.SetDomainname(config.Domainname)
if s.executor.architecture != "" {
s.builder.SetArchitecture(s.executor.architecture)
}
if s.executor.os != "" {
s.builder.SetOS(s.executor.os)
}
s.builder.SetUser(config.User)
s.builder.ClearPorts()
for p := range config.ExposedPorts {
s.builder.SetPort(string(p))
}
for _, envSpec := range config.Env {
spec := strings.SplitN(envSpec, "=", 2)
s.builder.SetEnv(spec[0], spec[1])
}
s.builder.SetCmd(config.Cmd)
s.builder.ClearVolumes()
for v := range config.Volumes {
s.builder.AddVolume(v)
}
s.builder.ClearOnBuild()
for _, onBuildSpec := range config.OnBuild {
s.builder.SetOnBuild(onBuildSpec)
}
s.builder.SetWorkDir(config.WorkingDir)
s.builder.SetEntrypoint(config.Entrypoint)
s.builder.SetShell(config.Shell)
s.builder.SetStopSignal(config.StopSignal)
if config.Healthcheck != nil {
s.builder.SetHealthcheck(&buildahdocker.HealthConfig{
Test: append([]string{}, config.Healthcheck.Test...),
Interval: config.Healthcheck.Interval,
Timeout: config.Healthcheck.Timeout,
StartPeriod: config.Healthcheck.StartPeriod,
Retries: config.Healthcheck.Retries,
})
} else {
s.builder.SetHealthcheck(nil)
}
s.builder.ClearLabels()
for k, v := range config.Labels {
s.builder.SetLabel(k, v)
}
for _, labelSpec := range s.executor.labels {
label := strings.SplitN(labelSpec, "=", 2)
if len(label) > 1 {
s.builder.SetLabel(label[0], label[1])
} else {
s.builder.SetLabel(label[0], "")
}
}
s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
for _, annotationSpec := range s.executor.annotations {
annotation := strings.SplitN(annotationSpec, "=", 2)
if len(annotation) > 1 {
s.builder.SetAnnotation(annotation[0], annotation[1])
} else {
s.builder.SetAnnotation(annotation[0], "")
}
}
if imageRef != nil {
logName := transports.ImageName(imageRef)
logrus.Debugf("COMMIT %q", logName)
} else {
logrus.Debugf("COMMIT")
}
writer := s.executor.reportWriter
if s.executor.layers || !s.executor.useCache {
writer = nil
}
options := buildah.CommitOptions{
Compression: s.executor.compression,
SignaturePolicyPath: s.executor.signaturePolicyPath,
ReportWriter: writer,
PreferredManifestType: s.executor.outputFormat,
SystemContext: s.executor.systemContext,
Squash: s.executor.squash,
EmptyLayer: emptyLayer,
BlobDirectory: s.executor.blobDirectory,
SignBy: s.executor.signBy,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
HistoryTimestamp: s.executor.timestamp,
Manifest: s.executor.manifest,
UnsetEnvs: s.executor.unsetEnvs,
}
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {
return "", nil, err
}
var ref reference.Canonical
if imageRef != nil {
if dref := imageRef.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID)
}
}
}
return imgID, ref, nil
}
func (s *StageExecutor) EnsureContainerPath(path string) error {
return copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{})
}
| [
"\"BUILDAH_NOPIVOT\""
]
| []
| [
"BUILDAH_NOPIVOT"
]
| [] | ["BUILDAH_NOPIVOT"] | go | 1 | 0 | |
salt/utils/dictdiffer.py | # -*- coding: utf-8 -*-
'''
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
Originally posted at http://stackoverflow.com/questions/1165352/fast-comparison-between-two-python-dictionary/1165552#1165552
Available at repository: https://github.com/hughdbrown/dictdiffer
Added the ability to recursively compare dictionaries
'''
from __future__ import absolute_import, print_function, unicode_literals
import copy
from collections import Mapping
from salt.ext import six
def diff(current_dict, past_dict):
return DictDiffer(current_dict, past_dict)
class DictDiffer(object):
'''
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
'''
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(list(current_dict)), set(list(past_dict))
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
def deep_diff(old, new, ignore=None):
ignore = ignore or []
res = {}
old = copy.deepcopy(old)
new = copy.deepcopy(new)
stack = [(old, new, False)]
while stack:
tmps = []
tmp_old, tmp_new, reentrant = stack.pop()
for key in set(list(tmp_old) + list(tmp_new)):
if key in tmp_old and key in tmp_new \
and tmp_old[key] == tmp_new[key]:
del tmp_old[key]
del tmp_new[key]
continue
if not reentrant:
if key in tmp_old and key in ignore:
del tmp_old[key]
if key in tmp_new and key in ignore:
del tmp_new[key]
if isinstance(tmp_old.get(key), Mapping) \
and isinstance(tmp_new.get(key), Mapping):
tmps.append((tmp_old[key], tmp_new[key], False))
if tmps:
stack.extend([(tmp_old, tmp_new, True)] + tmps)
if old:
res['old'] = old
if new:
res['new'] = new
return res
def recursive_diff(past_dict, current_dict, ignore_missing_keys=True):
'''
Returns a RecursiveDictDiffer object that computes the recursive diffs
between two dictionaries
past_dict
Past dictionary
current_dict
Current dictionary
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
Default is True.
'''
return RecursiveDictDiffer(past_dict, current_dict, ignore_missing_keys)
class RecursiveDictDiffer(DictDiffer):
'''
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format aboce (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
'''
NONE_VALUE = '<_null_>'
def __init__(self, past_dict, current_dict, ignore_missing_keys):
'''
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
'''
super(RecursiveDictDiffer, self).__init__(current_dict, past_dict)
self._diffs = \
self._get_diffs(self.current_dict, self.past_dict,
ignore_missing_keys)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
'''
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
'''
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(dict1[p], dict2[p],
ignore_missing_keys)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {'new': cls.NONE_VALUE,
'old': dict2[p]}})
return ret_dict
@classmethod
def _get_values(cls, diff_dict, type='new'):
'''
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
'''
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update(
{p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
@classmethod
def _get_changes(cls, diff_dict):
'''
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
if diff_dict[p]['old'] == cls.NONE_VALUE:
old_value = 'nothing'
elif isinstance(diff_dict[p]['old'], six.string_types):
old_value = '\'{0}\''.format(diff_dict[p]['old'])
elif isinstance(diff_dict[p]['old'], list):
old_value = '\'{0}\''.format(
', '.join(diff_dict[p]['old']))
new_value = diff_dict[p]['new']
if diff_dict[p]['new'] == cls.NONE_VALUE:
new_value = 'nothing'
elif isinstance(diff_dict[p]['new'], six.string_types):
new_value = '\'{0}\''.format(diff_dict[p]['new'])
elif isinstance(diff_dict[p]['new'], list):
new_value = '\'{0}\''.format(', '.join(diff_dict[p]['new']))
changes_strings.append('{0} from {1} to {2}'.format(
p, old_value, new_value))
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend([' {0}'.format(c)
for c in sub_changes])
return changes_strings
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=''))
def removed(self):
'''
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_removed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['new'] == self.NONE_VALUE:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key]['new'], dict):
keys.extend(
_removed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_removed(self._diffs, prefix=''))
def changed(self):
'''
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
continue
if self.ignore_unset_values:
if 'old' in diffs[key] and 'new' in diffs[key] and \
diffs[key]['old'] != self.NONE_VALUE and \
diffs[key]['new'] != self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
else:
if 'old' in diffs[key] and 'new' in diffs[key]:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_changed(self._diffs, prefix=''))
def unchanged(self):
'''
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(current_dict[key], dict):
if 'new' in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(current_dict[key],
diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
@property
def diffs(self):
'''Returns a dict with the recursive diffs current_dict - past_dict'''
return self._diffs
@property
def new_values(self):
'''Returns a dictionary with the new values'''
return self._get_values(self._diffs, type='new')
@property
def old_values(self):
'''Returns a dictionary with the old values'''
return self._get_values(self._diffs, type='old')
@property
def changes_str(self):
'''Returns a string describing the changes'''
return '\n'.join(self._get_changes(self._diffs))
| []
| []
| []
| [] | [] | python | null | null | null |
vendor/github.com/gardener/gardener/pkg/client/core/listers/core/v1alpha1/shootstate.go | /*
Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// ShootStateLister helps list ShootStates.
type ShootStateLister interface {
// List lists all ShootStates in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.ShootState, err error)
// ShootStates returns an object that can list and get ShootStates.
ShootStates(namespace string) ShootStateNamespaceLister
ShootStateListerExpansion
}
// shootStateLister implements the ShootStateLister interface.
type shootStateLister struct {
indexer cache.Indexer
}
// NewShootStateLister returns a new ShootStateLister.
func NewShootStateLister(indexer cache.Indexer) ShootStateLister {
return &shootStateLister{indexer: indexer}
}
// List lists all ShootStates in the indexer.
func (s *shootStateLister) List(selector labels.Selector) (ret []*v1alpha1.ShootState, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.ShootState))
})
return ret, err
}
// ShootStates returns an object that can list and get ShootStates.
func (s *shootStateLister) ShootStates(namespace string) ShootStateNamespaceLister {
return shootStateNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// ShootStateNamespaceLister helps list and get ShootStates.
type ShootStateNamespaceLister interface {
// List lists all ShootStates in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha1.ShootState, err error)
// Get retrieves the ShootState from the indexer for a given namespace and name.
Get(name string) (*v1alpha1.ShootState, error)
ShootStateNamespaceListerExpansion
}
// shootStateNamespaceLister implements the ShootStateNamespaceLister
// interface.
type shootStateNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all ShootStates in the indexer for a given namespace.
func (s shootStateNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ShootState, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.ShootState))
})
return ret, err
}
// Get retrieves the ShootState from the indexer for a given namespace and name.
func (s shootStateNamespaceLister) Get(name string) (*v1alpha1.ShootState, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("shootstate"), name)
}
return obj.(*v1alpha1.ShootState), nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
tests/unit/test_validator_cli.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import os
import unittest
from txnmain.validator_cli import get_configuration
class TestValidatorCLI(unittest.TestCase):
def test_currency_home(self):
os.environ.clear()
os.environ["CURRENCYHOME"] = "/test_path"
cfg = get_configuration(args=[], config_files_required=False)
self.assertIn("CurrencyHome", cfg)
self.assertEquals(cfg["CurrencyHome"], "/test_path")
self.assertEquals(cfg["ConfigDirectory"], "/test_path/etc")
self.assertEquals(cfg["LogDirectory"], "/test_path/logs")
self.assertEquals(cfg["DataDirectory"], "/test_path/data")
def test_default_config_posix(self):
os.environ.clear()
cfg = get_configuration(args=[],
os_name='posix',
config_files_required=False)
self.assertNotIn("CurrencyHome", cfg)
self.assertEquals(cfg["ConfigDirectory"], "/etc/sawtooth-validator")
self.assertEquals(cfg["LogDirectory"], "/var/log/sawtooth-validator")
self.assertEquals(cfg["DataDirectory"], "/var/lib/sawtooth-validator")
def test_default_config_nt(self):
os.environ.clear()
cfg = get_configuration(args=[],
os_name='nt',
config_files_required=False)
self.assertNotIn("CurrencyHome", cfg)
self.assertEquals(
cfg["ConfigDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\conf")
self.assertEquals(
cfg["LogDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\logs")
self.assertEquals(
cfg["DataDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\data")
def test_logconfig_arg(self):
os.environ.clear()
cfg = get_configuration(args=["--log-config=Logging.js"],
config_files_required=False)
self.assertIn("LogConfigFile", cfg)
self.assertEquals(cfg["LogConfigFile"], "Logging.js")
def test_options_mapping_conf_dir(self):
os.environ.clear()
cfg = get_configuration(args=["--conf-dir=/test_path/etc"],
config_files_required=False)
self.assertIn("ConfigDirectory", cfg)
self.assertEquals(cfg["ConfigDirectory"], "/test_path/etc")
def test_options_mapping_data_dir(self):
os.environ.clear()
cfg = get_configuration(args=["--data-dir=/test_path/data"],
config_files_required=False)
self.assertIn("DataDirectory", cfg)
self.assertEquals(cfg["DataDirectory"], "/test_path/data")
def test_options_mapping_type(self):
os.environ.clear()
cfg = get_configuration(args=["--type=test"],
config_files_required=False)
self.assertIn("LedgerType", cfg)
self.assertEquals(cfg["LedgerType"], "test")
def test_options_mapping_key_file(self):
os.environ.clear()
cfg = get_configuration(args=["--keyfile=/test_path/keys/key.wif"],
config_files_required=False)
self.assertIn("KeyFile", cfg)
self.assertEquals(cfg["KeyFile"], "/test_path/keys/key.wif")
def test_options_mapping_node(self):
os.environ.clear()
cfg = get_configuration(args=["--node=test000"],
config_files_required=False)
self.assertIn("NodeName", cfg)
self.assertEquals(cfg["NodeName"], "test000")
def test_options_mapping_listsn(self):
os.environ.clear()
cfg = get_configuration(args=['--listen="localhost:5500/UDP gossip"'],
config_files_required=False)
self.assertIn("Listen", cfg)
self.assertEquals(cfg["Listen"], ['"localhost:5500/UDP gossip"'])
def test_options_mapping_restore(self):
os.environ.clear()
cfg = get_configuration(args=["--restore"],
config_files_required=False)
self.assertEquals(cfg["Restore"], True)
def test_options_mapping_peers(self):
os.environ.clear()
cfg = get_configuration(args=["--peers=testpeer1"],
config_files_required=False)
self.assertIn("Peers", cfg)
self.assertIn("testpeer1", cfg["Peers"])
def test_options_mapping_url(self):
os.environ.clear()
cfg = get_configuration(args=["--url",
"http://testhost:8888,"
"http://testhost:8889",
"--url",
"http://testhost:8890"],
config_files_required=False)
self.assertIn("LedgerURL", cfg)
self.assertIn("http://testhost:8888", cfg["LedgerURL"])
self.assertIn("http://testhost:8889", cfg["LedgerURL"])
self.assertIn("http://testhost:8890", cfg["LedgerURL"])
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"CURRENCYHOME"
]
| [] | ["CURRENCYHOME"] | python | 1 | 0 | |
cmd/binance/main.go | package main
import (
"bufio"
"crypto/hmac"
"crypto/sha256"
"cryptocurrency/internal/app/binance"
"cryptocurrency/pkg/util"
"fmt"
"github.com/joho/godotenv"
"io"
"log"
"os"
"sync"
)
var unsignedValue util.UnsignedString
var wg sync.WaitGroup
func init() {
// load environment
err := godotenv.Load("../../.env")
util.CheckError(err)
unsignedValue = util.UnsignedString{
ApiKey: os.Getenv("binace_apiKey"),
HashHandler: hmac.New(sha256.New, []byte(os.Getenv("binance_apiSecretKey"))),
}
binance.Ping()
}
func command() {
//io.WriteString(os.Stdout, "please input any command: ")
scanner := bufio.NewScanner(os.Stdin)
FINISH:
for scanner.Scan() {
switch scanner.Text() {
case "server time":
go binance.ServerTime()
case "ping": go binance.Ping()
case "account info": go binance.AccountInfo(unsignedValue)
case "signed test": go binance.SignedTest()
case "quit":
break FINISH
case "help":
fmt.Println("commands: ping, server time, account info, signed test, quit")
io.WriteString(os.Stdout, "please input any command: ")
default:
io.WriteString(os.Stdout, "unknown command!\n")
io.WriteString(os.Stdout, "please input any command: ")
}
}
}
func main() {
command()
log.Println("Finished")
}
| [
"\"binace_apiKey\"",
"\"binance_apiSecretKey\""
]
| []
| [
"binace_apiKey",
"binance_apiSecretKey"
]
| [] | ["binace_apiKey", "binance_apiSecretKey"] | go | 2 | 0 | |
tests/api_connexion/endpoints/test_extra_link_endpoint.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from urllib.parse import quote_plus
from parameterized import parameterized
from airflow import DAG
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.models.baseoperator import BaseOperatorLink
from airflow.models.dagrun import DagRun
from airflow.models.xcom import XCom
from airflow.plugins_manager import AirflowPlugin
from airflow.providers.google.cloud.operators.bigquery import BigQueryExecuteQueryOperator
from airflow.security import permissions
from airflow.utils.dates import days_ago
from airflow.utils.session import provide_session
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from airflow.www import app
from tests.test_utils.api_connexion_utils import create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_runs, clear_db_xcom
from tests.test_utils.mock_plugins import mock_plugin_manager
class TestGetExtraLinks(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with mock.patch.dict("os.environ", SKIP_DAGS_PARSING="True"), conf_vars(
{("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}
):
cls.app = app.create_app(testing=True) # type:ignore
create_user(
cls.app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAGS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test") # type: ignore
delete_user(cls.app, username="test_no_permissions") # type: ignore
@provide_session
def setUp(self, session) -> None:
self.default_time = datetime(2020, 1, 1)
clear_db_runs()
clear_db_xcom()
self.dag = self._create_dag()
self.app.dag_bag.dags = {self.dag.dag_id: self.dag} # type: ignore # pylint: disable=no-member
self.app.dag_bag.sync_to_db() # type: ignore # pylint: disable=no-member
dr = DagRun(
dag_id=self.dag.dag_id,
run_id="TEST_DAG_RUN_ID",
execution_date=self.default_time,
run_type=DagRunType.MANUAL,
)
session.add(dr)
session.commit()
self.client = self.app.test_client() # type:ignore
def tearDown(self) -> None:
super().tearDown()
clear_db_runs()
clear_db_xcom()
@staticmethod
def _create_dag():
with DAG(
dag_id="TEST_DAG_ID",
default_args=dict(
start_date=days_ago(2),
),
) as dag:
BigQueryExecuteQueryOperator(task_id="TEST_SINGLE_QUERY", sql="SELECT 1")
BigQueryExecuteQueryOperator(task_id="TEST_MULTIPLE_QUERY", sql=["SELECT 1", "SELECT 2"])
return dag
@parameterized.expand(
[
(
"missing_dag",
"/api/v1/dags/INVALID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_QUERY/links",
"DAG not found",
'DAG with ID = "INVALID" not found',
),
(
"missing_dag_run",
"/api/v1/dags/TEST_DAG_ID/dagRuns/INVALID/taskInstances/TEST_SINGLE_QUERY/links",
"DAG Run not found",
'DAG Run with ID = "INVALID" not found',
),
(
"missing_task",
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/INVALID/links",
"Task not found",
'Task with ID = "INVALID" not found',
),
]
)
def test_should_response_404(self, name, url, expected_title, expected_detail):
del name
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
self.assertEqual(404, response.status_code)
self.assertEqual(
{
"detail": expected_detail,
"status": 404,
"title": expected_title,
"type": EXCEPTIONS_LINK_MAP[404],
},
response.json,
)
def test_should_raise_403_forbidden(self):
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test_no_permissions"},
)
assert response.status_code == 403
@mock_plugin_manager(plugins=[])
def test_should_response_200(self):
XCom.set(
key="job_id",
value="TEST_JOB_ID",
execution_date=self.default_time,
task_id="TEST_SINGLE_QUERY",
dag_id=self.dag.dag_id,
)
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(200, response.status_code, response.data)
self.assertEqual(
{"BigQuery Console": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID"}, response.json
)
@mock_plugin_manager(plugins=[])
def test_should_response_200_missing_xcom(self):
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(200, response.status_code, response.data)
self.assertEqual(
{"BigQuery Console": None},
response.json,
)
@mock_plugin_manager(plugins=[])
def test_should_response_200_multiple_links(self):
XCom.set(
key="job_id",
value=["TEST_JOB_ID_1", "TEST_JOB_ID_2"],
execution_date=self.default_time,
task_id="TEST_MULTIPLE_QUERY",
dag_id=self.dag.dag_id,
)
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_MULTIPLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(200, response.status_code, response.data)
self.assertEqual(
{
"BigQuery Console #1": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID_1",
"BigQuery Console #2": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID_2",
},
response.json,
)
@mock_plugin_manager(plugins=[])
def test_should_response_200_multiple_links_missing_xcom(self):
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_MULTIPLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(200, response.status_code, response.data)
self.assertEqual(
{"BigQuery Console #1": None, "BigQuery Console #2": None},
response.json,
)
def test_should_response_200_support_plugins(self):
class GoogleLink(BaseOperatorLink):
name = "Google"
def get_link(self, operator, dttm):
return "https://www.google.com"
class S3LogLink(BaseOperatorLink):
name = "S3"
operators = [BigQueryExecuteQueryOperator]
def get_link(self, operator, dttm):
return "https://s3.amazonaws.com/airflow-logs/{dag_id}/{task_id}/{execution_date}".format(
dag_id=operator.dag_id,
task_id=operator.task_id,
execution_date=quote_plus(dttm.isoformat()),
)
class AirflowTestPlugin(AirflowPlugin):
name = "test_plugin"
global_operator_extra_links = [
GoogleLink(),
]
operator_extra_links = [
S3LogLink(),
]
with mock_plugin_manager(plugins=[AirflowTestPlugin]):
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(200, response.status_code, response.data)
self.assertEqual(
{
"BigQuery Console": None,
"Google": "https://www.google.com",
"S3": (
"https://s3.amazonaws.com/airflow-logs/"
"TEST_DAG_ID/TEST_SINGLE_QUERY/2020-01-01T00%3A00%3A00%2B00%3A00"
),
},
response.json,
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
model_evaluator_original.py | import time
from haversine_script import *
import numpy as np
import tensorflow as tf
import random
import pandas as p
import math
import matplotlib.pyplot as plt
import os
import argparse
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout,Activation,BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from tensorflow.keras.callbacks import Callback, TensorBoard, ModelCheckpoint, EarlyStopping
from tensorflow.keras import regularizers
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn import preprocessing
from sklearn.decomposition import PCA
from tensorflow.keras.models import model_from_json
from tensorflow.keras.models import load_model
#def get_exponential_distance(x,minimum,a=60):
# positive_x= x-minimum
# numerator = np.exp(positive_x.div(a))
# denominator = np.exp(-minimum/a)
# exponential_x = numerator/denominator
# exponential_x = exponential_x * 1000 #facilitating calculations
# final_x = exponential_x
# return final_x
def get_powed_distance(x,minimum,b=1.1):
positive_x= x-minimum
numerator = positive_x.pow(b)
denominator = (-minimum)**(b)
powed_x = numerator/denominator
final_x = powed_x
return final_x
def get_powed_distance_np(x,minimum,b=1.1):
positive_x= x-minimum
numerator = pow(positive_x,b)
denominator = (-minimum)**(b)
powed_x = numerator/denominator
final_x = powed_x
return final_x
def generate_dataset(components,random_state,sf_n,oor_value):
print("Creating Dataset")
file = p.read_csv('lorawan_antwerp_2019_dataset_withSF.csv')
columns = file.columns
x = file[columns[0:72]]
SF = file[columns[73:74]]
y = file[columns[75:]]
if oor_value==0:
print("Set out of range value to -200dBm")
x=x
final_x = get_powed_distance(x,-200)
if oor_value==1:
print("Set out of range value to -128dBm") #current experiment
x = x.replace(-200,200)
minimum = x.min().min() - 1
x = x.replace(200,minimum) #set dataset -200 to next posible minimum
print('minimum')
print(minimum)
final_x = get_powed_distance(x,minimum)
if oor_value==2: #rescale according to SF
print("Set out of range value according to SF")
x=np.array(x)
SF=np.array(SF)
for q in range(len(SF)):
print("Updating data",q+1)
for w in range(len(x[q])):
if x[q][w]==-200:
if SF[q]==7:
x[q][w]= -123
if SF[q]==8:
x[q][w]= -126
if SF[q]==9:
x[q][w]= -129
if SF[q]==10:
x[q][w]= -132
if SF[q]==11:
x[q][w]= -134.5
if SF[q]==12:
x[q][w]= -137
final_x = get_powed_distance_np(x,-137)
scaler_x = preprocessing.MinMaxScaler().fit(final_x)
final_x = scaler_x.transform(final_x)
scaler_y = preprocessing.MinMaxScaler().fit(y)
y= scaler_y.transform(y)
scaler_sf= preprocessing.MinMaxScaler().fit(SF)
SF=scaler_sf.transform(SF)
if components >0:
print("PCA enabled",40)
pca = PCA(n_components =components)
final_x = pca.fit_transform(final_x)
explained_variance = pca.explained_variance_ratio_
if sf_n>0:
print("SF enabled")
final_x =np.column_stack((final_x,SF))
x_train, x_test_val, y_train, y_test_val = train_test_split(final_x, y, test_size=0.3, random_state=random_state)
x_val, x_test, y_val, y_test = train_test_split(x_test_val, y_test_val, test_size=0.5, random_state=random_state)
print(x_train.shape)
print(x_val.shape)
print(x_test.shape)
else:
print("SF disabled")
x_train, x_test_val, y_train, y_test_val = train_test_split(final_x, y, test_size=0.3, random_state=random_state)
x_val, x_test, y_val, y_test = train_test_split(x_test_val, y_test_val, test_size=0.5, random_state=random_state)
print(x_train.shape)
print(x_val.shape)
print(x_test.shape)
else:
final_x =np.column_stack((final_x,SF))
x_train, x_test_val, y_train, y_test_val = train_test_split(final_x, y, test_size=0.3, random_state=random_state)
x_val, x_test, y_val, y_test = train_test_split(x_test_val, y_test_val, test_size=0.5, random_state=random_state)
print(x_train.shape)
print(x_val.shape)
print(x_test.shape)
if sf_n>0:
print("SF enabled")
final_x =np.column_stack((final_x,SF))
x_train, x_test_val, y_train, y_test_val = train_test_split(final_x, y, test_size=0.3, random_state=random_state)
x_val, x_test, y_val, y_test = train_test_split(x_test_val, y_test_val, test_size=0.5, random_state=random_state)
print(x_train.shape)
print(x_val.shape)
print(x_test.shape)
else:
print("SF disabled")
x_train, x_test_val, y_train, y_test_val = train_test_split(final_x, y, test_size=0.3, random_state=random_state)
x_val, x_test, y_val, y_test = train_test_split(x_test_val, y_test_val, test_size=0.5, random_state=random_state)
print(x_train.shape)
print(x_val.shape)
print(x_test.shape)
n_of_features = x_train.shape[1]
print("Done Generating Dataset")
return x_train,y_train,x_val,y_val,x_test,y_test,n_of_features,scaler_y
def validate_model(trained_model, x_train ,y_train,x_val,y_val,x_test,y_test,scaler_y,trial_name,batch_size):
model=trained_model
y_predict = model.predict(x_test, batch_size=batch_size)
y_predict_in_val = model.predict(x_val, batch_size=batch_size)
y_predict_in_train = model.predict(x_train, batch_size=batch_size)
y_predict = scaler_y.inverse_transform(y_predict)
y_predict_in_train = scaler_y.inverse_transform(y_predict_in_train)
y_predict_in_val = scaler_y.inverse_transform(y_predict_in_val)
y_train = scaler_y.inverse_transform(y_train)
y_val = scaler_y.inverse_transform(y_val)
y_test = scaler_y.inverse_transform(y_test)
print("Train set mean error: {:.2f}".format(my_custom_haversine_error_stats(y_predict_in_train, y_train,'mean')))
print("Train set median error: {:.2f}".format(my_custom_haversine_error_stats(y_predict_in_train, y_train,'median')))
print("Train set75th perc error: {:.2f}".format(my_custom_haversine_error_stats(y_predict_in_train, y_train,'percentile',75)))
print("Val set mean error: {:.2f}".format(my_custom_haversine_error_stats(y_predict_in_val, y_val,'mean')))
print("Val set median error: {:.2f}".format(my_custom_haversine_error_stats(y_predict_in_val, y_val,'median')))
print("Val set 75th perc. error: {:.2f}".format(my_custom_haversine_error_stats(y_predict_in_val, y_val,'percentile',75)))
print("Test set mean error: {:.2f}".format(my_custom_haversine_error_stats(y_predict, y_test,'mean')))
print("Test set median error: {:.2f}".format(my_custom_haversine_error_stats(y_predict, y_test,'median')))
print("Test set 75th perc. error: {:.2f}".format(my_custom_haversine_error_stats(y_predict, y_test,'percentile',75)))
test_error_list = calculate_pairwise_error_list(y_predict,y_test)
print("Experiment completed!!!")
y_predict_lat=list()
y_predict_long=list()
y_test_lat=list()
y_test_long=list()
for x in range(len(y_predict)):
y_predict_lat.append(y_predict[x][0])
y_predict_long.append(y_predict[x][1])
y_test_lat.append(y_test[x][0])
y_test_long.append(y_test[x][1])
#plt.plot([y_predict[x][0],y_test[x][0]],[y_predict[x][1],y_test[x][1]],color='green')
plt.scatter(y_predict_lat,y_predict_long,s=0.1, marker='.',color='red',label='Predicted Pos')
plt.scatter(y_test_lat,y_test_long,s=0.1,marker='*',color='blue',label='Ground Truth Pos')
plt.title(trial_name+' Predicted Postion Map in Reduced Antwerp LoraWan Dataset')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.legend()
plt.savefig(trial_name+'_predictedmap_reduced.png',bbox_inches='tight',dpi=600)
def load_model(trial_name,n_of_features,dropout,l2,lr,random_state):
json_file = open("original_data/"+trial_name+'.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("original_data/"+trial_name+".h5")
print("Loaded model from disk")
loaded_model.compile(loss='mean_absolute_error',optimizer=Adam(lr=lr))
return loaded_model
if __name__ == '__main__':
config = tf.compat.v1.ConfigProto( device_count = {'GPU': 1 } )
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)
tf.debugging.set_log_device_placement(True)
parser = argparse.ArgumentParser(description="--trial-name,--pca, --sf,--oor")
parser.add_argument('--trial-name',type=str,required=True)
parser.add_argument('--pca',type=int,default=0,help='Principal Component')
parser.add_argument('--sf',type=int,default=0,help='Spreading Factor as input [0] off [1] on')
parser.add_argument('--oor',type=int,default=0,help='RSSI Out of Range Values [0]-200dBm [1]-128dBm [2]SF dependent')
args = parser.parse_args()
components=args.pca
trial_name=str(args.trial_name)
sf_n=args.sf
oor_value =args.oor
dropout = 0.15
l2 = 0.00
lr = 0.0005
batch_size= 512
random_state = 42
os.environ['PYTHONHASHSEED'] = "42"
np.random.seed(42)
tf.random.set_seed(42)
random.seed(42)
x_train,y_train,x_val,y_val,x_test,y_test,n_of_features,scaler_y = generate_dataset(components,random_state,sf_n,oor_value)
trained_model=load_model(trial_name,n_of_features,dropout,l2,lr,random_state)
validate_model(trained_model, x_train ,y_train,x_val,y_val,x_test,y_test,scaler_y,trial_name,batch_size,model_name) | []
| []
| [
"PYTHONHASHSEED"
]
| [] | ["PYTHONHASHSEED"] | python | 1 | 0 | |
soracom/generated/cmd/sims_stop_packet_capture_session.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"fmt"
"net/url"
"os"
"github.com/spf13/cobra"
)
// SimsStopPacketCaptureSessionCmdSessionId holds value of 'session_id' option
var SimsStopPacketCaptureSessionCmdSessionId string
// SimsStopPacketCaptureSessionCmdSimId holds value of 'sim_id' option
var SimsStopPacketCaptureSessionCmdSimId string
func init() {
SimsStopPacketCaptureSessionCmd.Flags().StringVar(&SimsStopPacketCaptureSessionCmdSessionId, "session-id", "", TRAPI("Packet capture session ID"))
SimsStopPacketCaptureSessionCmd.Flags().StringVar(&SimsStopPacketCaptureSessionCmdSimId, "sim-id", "", TRAPI("SIM ID"))
SimsCmd.AddCommand(SimsStopPacketCaptureSessionCmd)
}
// SimsStopPacketCaptureSessionCmd defines 'stop-packet-capture-session' subcommand
var SimsStopPacketCaptureSessionCmd = &cobra.Command{
Use: "stop-packet-capture-session",
Short: TRAPI("/sims/{sim_id}/packet_capture_sessions/{session_id}/stop:post:summary"),
Long: TRAPI(`/sims/{sim_id}/packet_capture_sessions/{session_id}/stop:post:description`),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unexpected arguments passed => %v", args)
}
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectSimsStopPacketCaptureSessionCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectSimsStopPacketCaptureSessionCmdParams(ac *apiClient) (*apiParams, error) {
var parsedBody interface{}
var err error
err = checkIfRequiredStringParameterIsSupplied("session_id", "session-id", "path", parsedBody, SimsStopPacketCaptureSessionCmdSessionId)
if err != nil {
return nil, err
}
err = checkIfRequiredStringParameterIsSupplied("sim_id", "sim-id", "path", parsedBody, SimsStopPacketCaptureSessionCmdSimId)
if err != nil {
return nil, err
}
return &apiParams{
method: "POST",
path: buildPathForSimsStopPacketCaptureSessionCmd("/sims/{sim_id}/packet_capture_sessions/{session_id}/stop"),
query: buildQueryForSimsStopPacketCaptureSessionCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForSimsStopPacketCaptureSessionCmd(path string) string {
escapedSessionId := url.PathEscape(SimsStopPacketCaptureSessionCmdSessionId)
path = strReplace(path, "{"+"session_id"+"}", escapedSessionId, -1)
escapedSimId := url.PathEscape(SimsStopPacketCaptureSessionCmdSimId)
path = strReplace(path, "{"+"sim_id"+"}", escapedSimId, -1)
return path
}
func buildQueryForSimsStopPacketCaptureSessionCmd() url.Values {
result := url.Values{}
return result
}
| [
"\"SORACOM_VERBOSE\""
]
| []
| [
"SORACOM_VERBOSE"
]
| [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
update_db_ms.py | import sqlite3
import json
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
# setup spotify connection
client_id = "YOURS"
client_secret = "YOURS"
token = SpotifyClientCredentials(client_id, client_secret)
sp = spotipy.Spotify(auth_manager=token)
# open db
dbfile = r".\spot_songs_py.db"
spdb = sqlite3.connect(dbfile)
cur = spdb.cursor()
cur.execute("SELECT * FROM Plays WHERE total_ms_song IS NULL;")
rows = cur.fetchall()
for row in rows:
query = sp.track(row[14])
total_ms = query['duration_ms']
sqls = "UPDATE Plays SET total_ms_song = ? WHERE spotify_track_uri = ?"
inserts = (total_ms,row[14])
cur.execute(sqls,inserts)
spdb.commit()
print(row[3] + " " + str(total_ms))
spdb.close()
| []
| []
| []
| [] | [] | python | null | null | null |
kolibri/utils/sanity_checks.py | import logging
import os
import shutil
import sys
import portend
from .conf import OPTIONS
from .server import get_status
from .server import LISTEN_ADDRESS
from .server import NotRunning
logger = logging.getLogger(__name__)
PORT_AVAILABILITY_CHECK_TIMEOUT = 2
def check_other_kolibri_running(port):
"""
Make sure there are no other Kolibri instances running before starting the server.
"""
try:
# Check if there are other kolibri instances running
# If there are, then we need to stop users from starting kolibri again.
get_status()
logger.error(
"There is another Kolibri server running. "
"Please use `kolibri stop` and try again."
)
sys.exit(1)
except NotRunning:
# In case that something other than Kolibri occupies the port,
# check the port's availability.
check_port_availability(LISTEN_ADDRESS, port)
def check_port_availability(host, port):
"""
Make sure the port is available for the server to start.
"""
try:
portend.free(host, port, timeout=PORT_AVAILABILITY_CHECK_TIMEOUT)
except portend.Timeout:
# Port is occupied
logger.error(
"Port {} is occupied.\n"
"Please check that you do not have other processes "
"running on this port and try again.\n".format(port)
)
sys.exit(1)
def check_content_directory_exists_and_writable():
"""
Make sure the content directory of Kolibri exists and is writable.
"""
content_directory = OPTIONS["Paths"]["CONTENT_DIR"]
# Check if the content directory exists
if not os.path.exists(content_directory):
try:
os.makedirs(content_directory)
except OSError:
logger.error(
"The content directory {} does not exist and cannot be created.".format(
content_directory
)
)
sys.exit(1)
# Check if the directory is writable
if not os.access(content_directory, os.W_OK):
logger.error(
"The content directory {} is not writable.".format(content_directory)
)
sys.exit(1)
def check_log_file_location():
"""
Starting from Kolibri v0.12.4, log files are going to be renamed and moved
from KOLIBRI_HOME directory to KOLIBRI_HOME/logs directory.
"""
home = os.environ["KOLIBRI_HOME"]
log_location_update = {}
# Old log file names
old_daemon_log = "server.log"
old_kolibri_log = "kolibri.log"
old_debug_log = "debug.log"
# New log file names
log_location_update[old_daemon_log] = "daemon.txt"
log_location_update[old_kolibri_log] = "kolibri.txt"
log_location_update[old_debug_log] = "debug.txt"
for log in log_location_update:
old_log_path = os.path.join(home, log)
if os.path.exists(old_log_path):
new_log_path = os.path.join(home, "logs", log_location_update[log])
shutil.move(old_log_path, new_log_path)
| []
| []
| [
"KOLIBRI_HOME"
]
| [] | ["KOLIBRI_HOME"] | python | 1 | 0 | |
internal/notes/builtin-SAVE/packages/hub/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Hub(Package):
"""The github git wrapper"""
homepage = "https://github.com/github/hub"
url = "https://github.com/github/hub/archive/v2.2.3.tar.gz"
version('head', git='https://github.com/github/hub')
version('2.2.3', '6675992ddd16d186eac7ba4484d57f5b')
version('2.2.2', '7edc8f5b5d3c7c392ee191dd999596fc')
version('2.2.1', '889a31ee9d10ae9cb333480d8dbe881f')
version('2.2.0', 'eddce830a079b8480f104aa7496f46fe')
version('1.12.4', '4f2ebb14834c9981b04e40b0d1754717')
extends("go")
def install(self, spec, prefix):
env = os.environ
env['GOPATH'] = self.stage.source_path + ':' + env['GOPATH']
bash = which('bash')
bash(os.path.join('script', 'build'), '-o', os.path.join(prefix, 'bin',
'hub'))
| []
| []
| []
| [] | [] | python | 0 | 0 | |
bdc_collection_builder/collections/landsat/tasks.py | #
# This file is part of Brazil Data Cube Collection Builder.
# Copyright (C) 2019-2020 INPE.
#
# Brazil Data Cube Collection Builder is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Describe Celery task handling for Landsat products."""
# Python Native
import logging
import os
import subprocess
from datetime import datetime
# 3rdparty
from botocore.exceptions import EndpointConnectionError
from sqlalchemy.exc import InvalidRequestError
from urllib3.exceptions import NewConnectionError, MaxRetryError
# Builder
from ...celery import celery_app
from ...config import Config
from ...db import db_aws
from ..base_task import RadcorTask
from ..utils import refresh_assets_view, remove_file, upload_file
from .download import download_landsat_images
from .google import download_from_google
from .harmonization import landsat_harmonize
from .publish import publish
from .utils import LandsatProduct, factory
def is_valid_tar_gz(file_path: str):
"""Check tar file integrity."""
try:
retcode = subprocess.call(['gunzip', '-t', file_path])
return retcode == 0
except BaseException:
return False
class LandsatTask(RadcorTask):
"""Define abstraction of Landsat-8 - DN and SR products."""
def get_tile_id(self, scene_id, **kwargs):
"""Retrieve tile from sceneid."""
fragments = scene_id.split('_')
return fragments[2]
def get_tile_date(self, scene_id, **kwargs):
"""Retrieve tile date from sceneid."""
fragments = scene_id.split('_')
return datetime.strptime(fragments[3], '%Y%m%d')
def download(self, scene):
"""Perform download landsat image from USGS.
Args:
scene (dict) - Scene containing activity
Returns:
dict Scene with landsat compressed file
"""
# Create/Update activity
activity_history = self.create_execution(scene)
try:
scene_id = scene['sceneid']
# Get Landsat collection handler
landsat_scene = factory.get_from_sceneid(scene_id, level=1)
activity_args = scene.get('args', {})
collection_item = self.get_collection_item(activity_history.activity)
# Output product dir
productdir = landsat_scene.compressed_file().parent
productdir.mkdir(parents=True, exist_ok=True)
digital_number_file = landsat_scene.compressed_file()
valid = False
# When file exists, check persistence
if digital_number_file.exists() and digital_number_file.is_file():
logging.info('File {} downloaded. Checking file integrity...'.format(str(digital_number_file)))
# Check Landsat 8 tar gz is valid
valid = is_valid_tar_gz(str(digital_number_file))
file = str(digital_number_file)
if not valid:
# Ensure file is removed since it may be corrupted
remove_file(str(digital_number_file))
try:
# Download from google
logging.info('Download Landsat {} -> e={} v={} from Google...'.format(
scene_id, digital_number_file.exists(), valid)
)
file, link = download_from_google(scene_id, str(productdir))
activity_args['provider'] = link
except BaseException:
logging.info('Download Landsat {} from USGS...'.format(scene_id))
file = download_landsat_images(activity_args['link'], productdir)
activity_args['provider'] = activity_args['link']
else:
logging.warning('File {} is valid. Skipping'.format(str(digital_number_file)))
collection_item.compressed_file = str(file).replace(Config.DATA_DIR, '')
cloud = activity_args.get('cloud')
if cloud:
collection_item.cloud_cover = cloud
activity_args['file'] = str(file)
except BaseException as e:
logging.error('An error occurred during task execution - {}'.format(activity_history.activity_id),
exc_info=True)
raise e
collection_item.save()
scene['args'] = activity_args
# Create new activity 'correctionLC8' to continue task chain
scene['activity_type'] = 'correctionLC8'
return scene
def publish(self, scene):
"""Publish and persist collection on database.
Args:
scene - Serialized Activity
"""
scene['activity_type'] = 'publishLC8'
# Create/Update activity
activity_history = self.create_execution(scene)
# Get collection level to publish. Default is l1
# TODO: Check in database the scenes level 2 already published. We must set to level 2
collection_level = scene['args'].get('level') or 1
landsat_scene = factory.get_from_sceneid(scene['sceneid'], level=collection_level)
try:
assets = publish(self.get_collection_item(activity_history.activity), activity_history.activity)
except InvalidRequestError as e:
# Error related with Transaction on AWS
# TODO: Is it occurs on local instance?
logging.error("Transaction Error on activity - {}".format(activity_history.activity_id), exc_info=True)
db_aws.session.rollback()
raise e
except BaseException as e:
logging.error("An error occurred during task execution - {}".format(activity_history.activity_id),
exc_info=True)
raise e
scene['activity_type'] = 'uploadLC8'
scene['args']['assets'] = assets
# Refresh for everything except for L1
if landsat_scene.level > 1:
refresh_assets_view()
return scene
def upload(self, scene):
"""Upload collection to AWS.
Make sure to set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and
`AWS_REGION_NAME` defined in `bdc_collection_builder.config.Config`.
Args:
scene - Serialized Activity
"""
scene['activity_type'] = 'uploadLC8'
# Create/Update activity
self.create_execution(scene)
assets = scene['args']['assets']
for entry in assets.values():
file_without_prefix = entry['asset'].replace('{}/'.format(Config.AWS_BUCKET_NAME), '')
upload_file(entry['file'], Config.AWS_BUCKET_NAME, file_without_prefix)
@staticmethod
def espa_done(scene: LandsatProduct):
"""Check espa-science has executed successfully."""
fs = scene.get_files()
return len(fs) > 0
def correction(self, scene):
"""Apply atmospheric correction on collection.
Args:
scene - Serialized Activity
"""
import subprocess
import tarfile
scene['activity_type'] = 'correctionLC8'
scene_id = scene['sceneid']
# Get Resolver for Landsat scene level 2
landsat_scene = factory.get_from_sceneid(scene_id, level=2)
landsat_scene_level_1 = factory.get_from_sceneid(scene_id, level=1)
scene['collection_id'] = landsat_scene.id
# Create/Update activity
execution = self.create_execution(scene)
try:
params = dict(
app=scene['activity_type'],
sceneid=scene['sceneid'],
file=scene['args']['file']
)
output_path = landsat_scene.path()
output_path.mkdir(exist_ok=True, parents=True)
input_dir = landsat_scene_level_1.compressed_file().parent
with tarfile.open(scene['args']['file']) as compressed_file:
# Extracting to temp directory
compressed_file.extractall(landsat_scene_level_1.compressed_file().parent)
cmd = 'run_lasrc_ledaps_fmask.sh {}'.format(landsat_scene_level_1.scene_id)
logging.warning('cmd {}'.format(cmd))
env = dict(**os.environ, INDIR=str(input_dir), OUTDIR=str(output_path))
process = subprocess.Popen(cmd, shell=True, env=env, stdin=subprocess.PIPE)
process.wait()
assert process.returncode == 0
pathrow = landsat_scene.tile_id()
params['pathrow'] = pathrow
# Product dir
productdir = landsat_scene.path()
logging.info('Checking for the ESPA generated files in {}'.format(productdir))
if not LandsatTask.espa_done(landsat_scene):
raise RuntimeError('Error in atmospheric correction')
scene['args']['file'] = str(productdir)
except BaseException as e:
logging.error('Error at correction Landsat {}, id={} - {}'.format(scene_id, execution.activity_id, str(e)))
raise e
finally:
# Remove extracted files
for f in landsat_scene_level_1.compressed_file_bands():
if f.exists():
f.unlink()
scene['activity_type'] = 'publishLC8'
scene['args']['level'] = landsat_scene.level
return scene
def harmonize(self, scene):
"""Apply Harmonization on Landsat collection.
Args:
scene - Serialized Activity
"""
# Set Collection Level 3 - BDC
scene['args']['level'] = 3
landsat_scene = factory.get_from_sceneid(scene['sceneid'], level=scene['args']['level'])
# Set Collection to the Landsat NBAR (Nadir BRDF Adjusted Reflectance)
scene['collection_id'] = landsat_scene.id
scene['activity_type'] = 'harmonizeLC8'
# Create/Update activity
activity_history = self.create_execution(scene)
logging.debug('Starting Harmonization Landsat...')
activity_history.activity.activity_type = 'harmonizeLC8'
activity_history.start = datetime.utcnow()
activity_history.save()
try:
# Get ESPA output dir
harmonized_dir = landsat_harmonize(self.get_collection_item(activity_history.activity), activity_history.activity)
scene['args']['file'] = harmonized_dir
except BaseException as e:
logging.error('Error at Harmonize Landsat {}'.format(e))
raise e
scene['activity_type'] = 'publishLC8'
return scene
@celery_app.task(base=LandsatTask,
queue='download',
max_retries=72,
autoretry_for=(NewConnectionError, MaxRetryError),
default_retry_delay=Config.TASK_RETRY_DELAY)
def download_landsat(scene):
"""Represent a celery task definition for handling Landsat-8 Download files.
This celery tasks listen only for queues 'download'.
It also retries following errors occurs:
- NewConnectionError, MaxRetryError Internet Connection Problem
Args:
scene (dict): Radcor Activity
Returns:
Returns processed activity
"""
return download_landsat.download(scene)
@celery_app.task(base=LandsatTask, queue='atm-correction')
def atm_correction_landsat(scene):
"""Represent a celery task definition for handling Landsat Atmospheric correction - sen2cor.
This celery tasks listen only for queues 'atm-correction'.
Args:
scene (dict): Radcor Activity with "correctionLC8" app context
Returns:
Returns processed activity
"""
return atm_correction_landsat.correction(scene)
@celery_app.task(base=LandsatTask,
queue='publish',
max_retries=3,
autoretry_for=(InvalidRequestError,),
default_retry_delay=Config.TASK_RETRY_DELAY)
def publish_landsat(scene):
"""Represent a celery task definition for handling Landsat Publish TIFF files generation.
This celery tasks listen only for queues 'publish'.
It also retries following errors occurs:
- InvalidRequestError Error related with transaction error on multiple access to database.
Args:
scene (dict): Radcor Activity with "publishLC8" app context
Returns:
Returns processed activity
"""
return publish_landsat.publish(scene)
@celery_app.task(base=LandsatTask,
queue='upload',
max_retries=3,
auto_retry=(EndpointConnectionError, NewConnectionError,),
default_retry_delay=Config.TASK_RETRY_DELAY)
def upload_landsat(scene):
"""Represent a celery task definition for handling Landsat8 Upload TIFF to AWS.
This celery tasks listen only for queues 'uploadLC8'.
Args:
scene (dict): Radcor Activity with "uploadLC8" app context
"""
upload_landsat.upload(scene)
@celery_app.task(base=LandsatTask, queue='harmonization')
def harmonization_landsat(scene):
"""Represent a celery task definition for harmonizing Landsat8.
This celery tasks listen only for queues 'harmonizeLC8'.
Args:
scene (dict): Radcor Activity with "harmonizeLC8" app context
"""
return harmonization_landsat.harmonize(scene)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/BlazingSQLTest/Runner/runTest.py | # Cast column to f64 before convert it to pandas
# This is a hack, use the assert_equal comparator when nulls is
# fully supported on cudf.sort_values
import json
import logging
import os
import re
import time
import blazingsql
from blazingsql import DataType
# import git
import numpy as np
import pandas as pd
from BlazingLogging import loggingHandler as lhandler
from Configuration import ExecutionMode
from Configuration import Settings as Settings
from DataBase import createSchema as cs
if ((Settings.execution_mode == ExecutionMode.FULL and
Settings.compare_res == "true") or
Settings.execution_mode == ExecutionMode.GENERATOR):
print(Settings.execution_mode)
print(Settings.compare_res)
from pydrill.client import PyDrill
from pyspark.sql.session import SparkSession
class Result:
def __init__(self, columns, resultSet, resultBlz):
self.columns = columns
self.resultSet = resultSet
self.resultBlz = resultBlz
name = "blzlogging"
HANDLER = lhandler.logging_handler()
class loggerblz:
def __init__(self, query, error, totaltime):
self.query = query
self.error = error
self.totaltime = totaltime
class result:
def __init__(self, res_execution, error):
self.res_execution = res_execution
self.error = error
def logginghelper(name):
# logging.basicConfig(filename='example.txt',level=logging.DEBUG)
logging._defaultFormatter = logging.Formatter()
logger = logging.getLogger(name)
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.addHandler(HANDLER)
return logger
def loggingClose(name):
HANDLER.log = []
def upcast_to_float(df):
for name in df.columns:
if np.issubdtype(df[name].dtype, np.bool_):
df[name] = df[name].astype(np.float32)
elif np.issubdtype(df[name].dtype, np.integer):
df[name] = df[name].astype(np.float64)
return df
def to_pandas_f64_engine(df, expected_types_list):
count = 0
for col in df.columns:
if count >= len(expected_types_list):
break
if expected_types_list[count] != np.dtype(object):
if df.shape[0] > 0:
if not np.issubdtype(df[col].dtype, np.number) and not np.issubdtype(
df[col].dtype, np.datetime64
):
if np.issubdtype(expected_types_list[count], np.bool_):
df[col] = (
df[col].map({"true": 1.0, "false": 0.0}).astype(np.float32)
)
elif np.issubdtype(expected_types_list[count], np.datetime64):
df[col] = df[col].astype(expected_types_list[count])
else:
df[col] = pd.to_numeric(df[col], errors="coerce")
count = count + 1
return df
def get_null_constants(df):
null_values = {}
for col, dtype in df.dtypes.to_dict().items():
if np.issubdtype(dtype, np.datetime64):
null_values[col] = np.datetime64("nat")
elif np.issubdtype(dtype, np.number):
null_values[col] = np.nan
return null_values
def compare_results(pdf1, pdf2, acceptable_difference, use_percentage, engine):
np.warnings.filterwarnings("ignore")
if pdf1.size == 0 and pdf2.size == 0:
return "Success"
msg = ""
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
msg = "PyDrill"
else:
msg = "PySpark"
elif engine=="drill":
msg = "PyDrill"
else:
msg = "PySpark"
msg = ""
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
msg = "PyDrill"
else:
msg = "PySpark"
elif engine=="drill":
msg = "PyDrill"
else:
msg = "PySpark"
if pdf1.shape[0] == pdf2.shape[0]:
if pdf1.shape[1] == pdf2.shape[1]:
for name in pdf1.columns:
if pdf1[name].dtype == np.object:
pdf1[name] = pdf1[name].astype('string')
for name in pdf2.columns:
if pdf2[name].dtype == np.object:
pdf2[name] = pdf2[name].astype('string')
# Removing indexes, because those are considered when
# comparing with equals()
pdf1.reset_index(drop=True, inplace=True)
pdf2.reset_index(drop=True, inplace=True)
# Make the column labels equal as equals() also compare labels
orig_pdf2_labels = pdf2.columns.to_list()
pdf2.columns = pdf1.columns.to_list()
exac_comp = pdf1.select_dtypes(exclude=np.inexact).equals(
pdf2.select_dtypes(exclude=np.inexact)
)
# Restore labels
pdf2.columns = orig_pdf2_labels
tmp_pdf1 = pdf1.select_dtypes(include=np.inexact)
tmp_pdf2 = pdf2.select_dtypes(include=np.inexact)
if use_percentage:
relative_tolerance = acceptable_difference
absolute_tolerance = 0
else:
relative_tolerance = 0
absolute_tolerance = acceptable_difference
# np.allclose follows this formula:
# absolute(a - b) <= (absolute_tolerance + relative_tolerance * absolute(b))
res = np.all(exac_comp) and np.allclose(
tmp_pdf1.values, tmp_pdf2.values, relative_tolerance,
absolute_tolerance, equal_nan=True
)
if res:
return "Success"
else:
return "Fail: Different values"
else:
return (
"Fail: Different number of columns blzSQLresult: "
+ str(pdf1.shape[1])
+ " "
+ msg
+ " result: "
+ str(pdf2.shape[1])
)
else:
return (
"Fail: Different number of rows blzSQLresult: "
+ str(pdf1.shape[0])
+ " "
+ msg
+ " result: "
+ str(pdf2.shape[0])
)
def begins_with(col1, col2, exp):
return col1.startswith(exp) or col2.startswith(exp)
def compare_column_names(pdf1, pdf2):
if len(pdf1.columns) != len(pdf2.columns):
if pdf1.values.size == 0 and pdf2.values.size == 0:
return True
print("Different set of columns")
return False
for blzCol, drillCol in zip(
pdf1.columns.values.tolist(), pdf2.columns.values.tolist()
):
if blzCol != drillCol:
if (
begins_with(drillCol, blzCol, "EXPR") is False
and begins_with(drillCol, blzCol, "count(") is False
):
print("Different columns")
return False
return True
# NOTE kharoly percy william: NEVER CHANGE THE ORDER of these
# lines (the logger logic depends that we log first queryType and then queryId
# WARNING DO NOT CHANGE THE CALL ORDER IN THIS FUCTION!
def get_Branch():
branch = blazingsql.__branch_name__
return branch
def get_CommitHash():
commit = blazingsql.__version__
return commit
def get_QueryId(input_type, test_name, test_id):
query_id = (
str(input_type).upper()
+ "-"
+ str(get_codTest(test_name)).upper()
+ "-"
+ str(test_id)
)
return query_id
def get_resultId(resultComparisson):
result_id = 1
if resultComparisson != "Success":
result_id = 0
return result_id
def get_codTest(test_name):
switcher = {
"Aggregations without group by": "AGGWOGRBY",
"Coalesce": "COALESCE",
"Column Basis": "COLBAS",
"Bindable Alias": "BALIAS",
"Boolean": "BOOL",
"Case": "CASE",
"Cast": "CAST",
"Common Table Expressions": "COMTABLEX",
"Concat": "CONCAT",
"Count Distinct": "COUNTD",
"Count without group by": "COUNTWOGRBY",
"Cross join": "CROSSJOIN",
"Date": "DATE",
"DayOfWeek": "DAYOFWEEK",
"Dir": "DIR",
"File System Google Storage": "FSGS",
"Hdfs FileSystem": "FSHDFS",
"Hive FileSystem": "FSHIVE",
"File System Local": "FSLOCAL",
"File System S3": "FSS3",
"Full outer join": "FOUTJOIN",
"Group by": "GROUPBY",
"Group by without aggregations": "GRBYWOAGG",
"Inner join": "INNERJOIN",
"Left outer join": "LOUTJOIN",
"Like": "LIKE",
"Literal": "LITERAL",
"Nested Queries": "NESTEDQ",
"Non-EquiJoin Queries": "NEQUIJOIN",
"Order by": "ORDERBY",
"Predicates With Nulls": "PREDWNULLS",
"Round": "ROUND",
"Replace": "REPLACE",
"Simple Distribution From Local": "SIMPLEDIST",
"Smiles Test": "SMILES",
"Substring": "SUBSTRING",
"Tables from Pandas": "TBLPANDAS",
"Timestampdiff": "TIMESTAMPD",
"Timestamp": "TIMESTAMP",
"To_timestamp": "TO_TIMESTAMP",
"TPCH Queries": "TPCH",
"Config Options": "TPCH", # we want the same outputs as the tpch test
"Unary ops": "UNARYOPS",
"Unify Tables": "UNIFYTBL",
"Union": "UNION",
"Limit": "LIMIT",
"Where clause": "WHERE",
"Wild Card": "WILDCARD",
"Simple String": "SSTRING",
"String case": "STRINGCASE",
"Message Validation": "MESSAGEVAL"
}
return switcher.get(test_name)
def print_fixed_log(
logger,
test_name,
input_type,
test_id,
sql,
resultComparisson,
error_message,
load_time,
engine_time,
total_time,
):
commitHash = get_CommitHash()
branchName = get_Branch()
# dateNow=datetime.now()
inputType = cs.get_extension(input_type)
logger.info(get_QueryId(inputType, test_name, test_id)) # QueryID
logger.info(Settings.dateNow) # TimeStamp
logger.info(test_name) # TestGroup
logger.info(inputType) # InputType
logger.info(sql) # Query
logger.info(get_resultId(resultComparisson)) # Result
logger.info(error_message) # Error
logger.info(branchName) # PR
logger.info(commitHash) # CommitHash
logger.info(Settings.data["RunSettings"]["nRals"])
logger.info(Settings.data["RunSettings"]["nGPUs"])
logger.info(Settings.data["TestSettings"]["dataDirectory"])
logger.info(test_id)
logger.info(load_time)
logger.info(engine_time)
logger.info(total_time)
def print_query_results(
sql,
queryId,
queryType,
pdf1,
pdf2,
resultgdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
):
if print_result:
print("#BLZ:")
print(pdf1)
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
print("#DRILL:")
else:
print("#PYSPARK:")
print(pdf2)
else:
if engine=="drill":
print("#DRILL:")
else:
print("#PYSPARK:")
data_type = cs.get_extension(input_type)
print(str(queryId) + " Test " + queryType + " - " + data_type)
print("#QUERY:")
print(sql)
print("RESULT:")
error_message = ""
stringResult = ""
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if compareResults:
columnNamesComparison = compare_column_names(pdf1, pdf2)
if columnNamesComparison is not True:
print("Columns:")
print(pdf1.columns)
print(pdf2.columns)
error_message = "Column names are not the same"
print("ERROR:")
print(error_message)
resultComparisson = compare_results(
pdf1, pdf2, acceptable_difference, use_percentage, engine
)
if resultComparisson != "Success":
error_message = resultComparisson[6:]
print("ERROR:")
print(error_message)
stringResult = resultComparisson
if resultComparisson != "Success" or columnNamesComparison is False:
stringResult = "Fail"
else:
stringResult = "Success"
print(stringResult)
print("TOTAL TIME: ")
print(total_time)
print("CRASHED NODES: ")
# print(resultgdf.n_crashed_nodes)
print("TOTAL NODES: ")
# print(resultgdf.total_nodes)
print("===================================================")
logger = logginghelper(name)
# TODO percy kharoly bindings we need to get the number from internal api
# print_fixed_log(logger, queryType, queryId, sql, stringResult,
# error_message, 1, 1, 2)
print_fixed_log(
logger,
queryType,
input_type,
queryId,
sql,
stringResult,
error_message,
load_time,
engine_time,
total_time,
)
def print_query_results2(sql, queryId, input_type, queryType, error_message, message_validation):
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
result = validate_messages(error_message, message_validation)
print(result)
print("ERROR:")
if result=="Fail":
print(error_message)
else:
error_message=""
print("CALCITE TIME: ")
print("-")
print("RAL TIME: ")
print("-")
print("EXECUTION TIME: ")
print("-")
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger, queryType, input_type, queryId, sql, result, error_message, None, None, None
)
def print_query_results_performance(sql, queryId, queryType, resultgdf):
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
resultComparisson = "Success"
print("CALCITE TIME: ")
print(resultgdf.calciteTime)
print("RAL TIME: ")
print(resultgdf.ralTime)
print("EXECUTION TIME: ")
print(resultgdf.totalTime)
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger,
queryType,
queryId,
sql,
resultComparisson,
" ",
resultgdf.calciteTime,
resultgdf.ralTime,
resultgdf.totalTime,
)
def print_query_results_dist(
sql,
queryId,
queryType,
pdf1,
pdf2,
resultgdf,
acceptable_difference,
use_percentage,
print_result,
):
if print_result:
print("#BLZ:")
print(pdf1)
print("#DRILL:")
print(pdf2)
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
resultComparisson = compare_results(
pdf1.values, pdf2.values, acceptable_difference, use_percentage
)
error_message = ""
if resultComparisson != "Success":
error_message = resultComparisson[6:]
resultComparisson = "Fail"
print(resultComparisson)
print("ERROR:")
print(error_message)
else:
print(resultComparisson)
print("CALCITE TIME: ")
print(resultgdf.calciteTime)
print("RAL TIME: ")
print(resultgdf.ralTime)
print("EXECUTION TIME: ")
print(resultgdf.totalTime)
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger,
queryType,
queryId,
sql,
resultComparisson,
error_message,
None,
None,
None,
)
class Test:
def __init__(self, test_name):
self.test_name = test_name
self.total = 0
self.success = 0
self.fail_ids = []
def save_log(gpu_ci_mode=False):
c = 1
cadena = []
subcadena = []
countPass = 0
countCrash = 0
for x in HANDLER.log:
if c < 17:
subcadena.append(x.msg)
c = c + 1
else:
c = 1
cadena.append(subcadena)
subcadena = []
subcadena.append(x.msg)
c = c + 1
print()
cadena.append(subcadena)
# If it didn't run any test (probably some were skipped)
# then return success
if cadena == [[]]:
return True, []
df = pd.DataFrame(
cadena,
columns=[
"QueryID",
"TimeStamp",
"TestGroup",
"InputType",
"Query",
"Result",
"Error",
"Branch",
"CommitHash",
"nRals",
"nGPUs",
"DataDirectory",
"TestId",
"LoadingTime",
"EngineTotalTime",
"TotalTime",
],
)
total = df.shape[0]
countPass = df[df.Result == 1].count()["Result"]
df1 = df[
[
"QueryID",
"TimeStamp",
"TestGroup",
"InputType",
"Query",
"Result",
"Error",
"Branch",
"CommitHash",
"nRals",
"nGPUs",
"DataDirectory",
"LoadingTime",
"EngineTotalTime",
"TotalTime",
]
].copy()
create_summary_detail(df, gpu_ci_mode)
printSummary(countPass, countCrash, total, gpu_ci_mode)
if not gpu_ci_mode:
saveLogInFile(df1)
saveLog = False
if "saveLog" in Settings.data["RunSettings"]:
saveLog = Settings.data["RunSettings"]["saveLog"]
print("saveLog = " + str(saveLog))
# TODO william kharoly felipe we should try to enable and use
# this function in the future
# result, error_msgs = verify_prev_google_sheet_results(df1)
result, error_msgs = True, []
if result is True and saveLog == "true":
saving_google_sheet_results(df1)
else:
if countPass < total:
result, error_msgs = False, []
else:
result, error_msgs = True, []
loggingClose(name)
return result, error_msgs
def create_summary_detail(df, no_color):
pdf = df
pdf["Result"] = df["Result"].replace(1, "Success")
pdf["Result"] = df["Result"].replace(0, "Fail")
# making boolean series for a team name
filter_fail = pdf["Result"] == "Fail"
# filtering data
pdf2 = pdf.where(filter_fail)
pdf_fail = pdf2.dropna()
if no_color:
green = ""
yellow = ""
# red = ""
endc = ""
else:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
# red = bcolors.FAIL
endc = bcolors.ENDC
# display
print(green + "========================================================")
print("DETAILED SUMMARY TESTS")
print("========================================================" + endc)
pd.set_option("max_rows", 1500)
print(pdf.groupby(["TestGroup", "InputType"])["Result"].value_counts())
print(yellow + "========================================================")
print("FAILED TESTS" + yellow)
print("========================================================" + endc)
# pd.set_option('max_columns', 5)
# pd.set_option('max_colwidth', 1000)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 2000)
pd.set_option("display.float_format", "{:20,.2f}".format)
pd.set_option("display.max_colwidth", None)
print(
pdf_fail.groupby(["TestGroup", "InputType", "Result"])["TestId"]
.apply(",".join)
.reset_index()
)
# This function use the google spreadsheet to compare the current results
# against historic ones
# Returns a tuple with 2 entries:
# 1st element: False in case gpuci should be fail, True otherwise
# 2nd element: A list of error messages (in case 1st element is False)
# Example:
# result, error_msgs = verify_prev_google_sheet_results(log_pdf)
# if result == False:
# exits the python process and do not move to next steps
# TODO william kharoly felipe we should try to enable and use
# this function in the future
def _verify_prev_google_sheet_results(log_pdf):
import gspread
from oauth2client.service_account import ServiceAccountCredentials
def get_the_data_from_sheet():
# Use creds to create a client to interact with the Google Drive API
scope = [
"https://www.googleapis.com/auth/drive",
"https://spreadsheets.google.com/feeds",
]
# Using credentials from BlazingSQL
# os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']
# # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest
# current_dir = "/home/ubuntu/.conda/envs/e2e"
log_info = Settings.data["RunSettings"]["logInfo"]
if log_info == "":
print(
"""####### ======= >>>>>>> WARNING this test run will not
be compared against old results from Google Docs. Define
the env var BLAZINGSQL_E2E_LOG_INFO"""
)
return None
log_info = json.loads(log_info)
creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(
log_info, scope
)
client_blazing = gspread.authorize(creds_blazing)
# Find a Locally workbook by name and open a sheet
work_sheet = "BSQL Log Results"
if "worksheet" in Settings.data["RunSettings"]:
work_sheet = Settings.data["RunSettings"]["worksheet"]
sheet_blazing = client_blazing.open("BSQL End-to-End Tests").worksheet(
work_sheet
)
# Writing log results into Blazing sheet
ret = pd.DataFrame(sheet_blazing.get_all_records())
# NOTE percy kharo william we need to patch these columns
# before convert to parquet
ret["LoadingTime"] = ret["LoadingTime"].astype(str)
ret["EngineTotalTime"] = ret["EngineTotalTime"].astype(str)
ret["TotalTime"] = ret["TotalTime"].astype(str)
return ret
dir_log = Settings.data["TestSettings"]["logDirectory"]
gspreadCacheHint = Settings.data["RunSettings"]["gspreadCacheHint"]
gspread_e2e_cache_path = dir_log + "/e2e-gspread-cache.parquet"
gspread_df = None
if gspreadCacheHint == "false":
gspread_df = get_the_data_from_sheet()
if gspread_df is not None:
# Always save a cache (so when gspreadCacheHint
# is false will refresh the cache)
gspread_df.to_parquet(gspread_e2e_cache_path)
elif gspreadCacheHint == "true":
if os.path.isfile(gspread_e2e_cache_path):
gspread_df = pd.read_parquet(gspread_e2e_cache_path)
else:
gspread_df = get_the_data_from_sheet()
if gspread_df is not None:
gspread_df.to_parquet(gspread_e2e_cache_path)
if gspread_df is None:
error_msg = """ERROR: This test run could not be compared
against old results from Google Docs"""
return False, [error_msg]
log_pdf_copy = log_pdf.copy()
prev_nrals = gspread_df["nRALS"][0]
curr_nrals = Settings.data["RunSettings"]["nRals"]
# Assume prev_nrals == curr_nrals
last_e2e_run_id = gspread_df["Timestamp"][0]
# NOTE If prev_nrals != curr_nrals we need to search the first
# Timestamp (a.k.a ID) for the current nRals target
if prev_nrals != curr_nrals:
gspread_df_uniques = gspread_df.drop_duplicates()
gspread_df_uniques_target_nrals = gspread_df_uniques.loc[
gspread_df_uniques["nRALS"] == curr_nrals
]
last_e2e_run_id = gspread_df_uniques_target_nrals.iloc[
0, 1
] # select the first Timestamp from the unique values
print(
"####### ======= >>>>>>> E2E INFO: We will compare the"
+ " current run against the ID (Timestamp): "
+ last_e2e_run_id
)
last_e2e_run_df = gspread_df.loc[gspread_df["Timestamp"] == last_e2e_run_id]
# NOTE percy kharo william we need to rename some columns to use our dfs
log_pdf_copy = log_pdf_copy.rename(
columns={
"TestGroup": "Test Group",
"InputType": "Input Type",
"nRals": "nRALS",
"DataDirectory": "data_dir",
}
)
# NOTE For debugging
# log_pdf_copy['TimeStamp'] = log_pdf_copy['TimeStamp'].astype(str)
# log_pdf_copy.to_parquet('/home/percy/workspace/logtest/ultimo.parquet',
# compression='GZIP')
# log_pdf_copy = pd.read_parquet('/home/user/last_run_log_df.parquet')
error_msgs = []
prev_summary = last_e2e_run_df.groupby("Test Group").count()
curr_summary = log_pdf_copy.groupby("Test Group").count()
prev_test_groups = prev_summary.index.tolist()
curr_test_groups = curr_summary.index.tolist()
has_less_test_groups = len(prev_test_groups) > len(curr_test_groups)
# Check if someone deleted some tests
# (there more test groups in the sheet)
if has_less_test_groups:
list_difference = [
item for item in prev_test_groups if item not in curr_test_groups
]
error_msg = (
"ERROR: current e2e has less test groups than"
+ " previous run, delta is %s" % list_difference
)
error_msgs.append(error_msg)
# Just check the common test groups
if has_less_test_groups:
test_groups = curr_test_groups
else:
test_groups = prev_test_groups
for test_group in test_groups:
prev_test_group_df = last_e2e_run_df.loc[
last_e2e_run_df["Test Group"] == test_group
]
prev_input_types = (
prev_test_group_df.groupby("Input Type").count().index.tolist()
)
curr_test_group_df = log_pdf_copy.loc[log_pdf_copy["Test Group"] == test_group]
cur_input_typ = curr_test_group_df.groupby("Input Type").count().index.tolist()
has_less_input_types = len(prev_input_types) > len(cur_input_typ)
if has_less_input_types is True:
list_difference = [
item for item in prev_input_types if item not in cur_input_typ
]
error_msg = """ERROR: current test group %s has less
input types cases, delta is %s""" % (
test_group,
list_difference,
)
error_msgs.append(error_msg)
for input_type in prev_input_types:
prev_tests_df = prev_test_group_df.loc[
prev_test_group_df["Input Type"] == input_type
]
prev_tests_df.sort_values(by=["QueryID"])
curr_tests_df = curr_test_group_df.loc[
curr_test_group_df["Input Type"] == input_type
]
curr_tests_df.sort_values(by=["QueryID"])
# We need to make a copy since we are going to drop some row
prev_tests_df = prev_tests_df.copy()
curr_tests_df = curr_tests_df.copy()
# NOTE for debugging
# print("============================================PREV!")
# print(prev_tests_df.head())
# print(len(prev_tests_df))
# print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxCURR!")
# print(curr_tests_df.head())
# print(len(curr_tests_df))
# Check if current run has less tests than previous run
len_prev_tests_df = len(prev_tests_df)
len_curr_tests_df = len(curr_tests_df)
has_less_tests = len_prev_tests_df > len_curr_tests_df
# NOTE for debugging
# print("====== PREV TESTS ======")
# print(prev_tests_df)
# print("====== CURR TESTS ======")
# print(curr_tests_df)
if has_less_tests:
prev_tests = prev_tests_df["QueryID"].tolist()
curr_tests = curr_tests_df["QueryID"].tolist()
list_difference = [
item for item in prev_tests if item not in curr_tests
]
error_msg = """ERROR: The test group %s has less tests than
previous run for input type %s, delta is %s""" % (
test_group,
input_type,
list_difference,
)
error_msgs.append(error_msg)
n = len_prev_tests_df - len_curr_tests_df
prev_tests_df.drop(prev_tests_df.tail(n).index, inplace=True)
elif len_prev_tests_df < len_curr_tests_df:
n = len_curr_tests_df - len_prev_tests_df
curr_tests_df.drop(curr_tests_df.tail(n).index, inplace=True)
prev_tests_results = prev_tests_df["Result"].to_list()
curr_tests_results = curr_tests_df["Result"].to_list()
for i in range(0, len(prev_tests_results)):
prev_test_result = prev_tests_results[i]
curr_test_result = curr_tests_results[i]
if prev_test_result == 1 and curr_test_result == 0:
error_msg = """ERROR: Test %d for %s (%s) is now failing
but before was ok!""" % (
i + 1,
test_group,
input_type,
)
error_msgs.append(error_msg)
succs = len(error_msgs) == 0
return succs, error_msgs
def saving_google_sheet_results(log_pdf):
import gspread
from oauth2client.service_account import ServiceAccountCredentials
log_info = Settings.data["RunSettings"]["logInfo"]
if log_info == "":
print(
"""####### ======= >>>>>>> WARNING this test run will
not save its results into the Google spreadsheet."""
)
return
# Create an empty list
log_list = []
# Iterate over each row
for index, rows in log_pdf.iterrows():
# Create a list for the current row (ADDS)
current_list = [
rows.QueryID,
str(rows.TimeStamp),
str(rows.TestGroup),
rows.InputType,
rows.Query,
rows.Result,
rows.Error,
rows.Branch,
str(rows.CommitHash),
rows.nRals,
rows.nGPUs,
rows.DataDirectory,
rows.LoadingTime,
rows.EngineTotalTime,
rows.TotalTime,
]
# append the list to the final list
log_list.append(current_list)
# Use creds to create a client to interact with the Google Drive API
scope = [
"https://www.googleapis.com/auth/drive",
"https://spreadsheets.google.com/feeds",
]
# === 1. BlazingSQL =====
# Using credentials from BlazingSQL
# os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']
# # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest
current_dir = "/home/ubuntu/.conda/envs/e2e"
print(current_dir)
log_info = json.loads(log_info)
creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(log_info, scope)
client_blazing = gspread.authorize(creds_blazing)
# Find a Locally workbook by name and open a sheet
work_sheet = "BSQL Log Results"
if "worksheet" in Settings.data["RunSettings"]:
work_sheet = Settings.data["RunSettings"]["worksheet"]
blaz_googlesheat = client_blazing.open("BSQL End-to-End Tests")
sheet_blazing = blaz_googlesheat.worksheet(work_sheet)
# Writing log results into Blazing sheet
total_queries = len(log_list)
for i in range(0, total_queries):
sheet_blazing.append_row(log_list[i])
time.sleep(1)
print("\nTable was uptdated into Blazing Google SpreadSheet")
def saveLogInFile(df):
dir_log = Settings.data["TestSettings"]["logDirectory"]
filepath = getFileName(dir_log)
df.to_excel(filepath, index=False)
def validate_messages(error_message, message_validation):
error_message = error_message.replace('\n', ' ').replace('\r', ' ')
message_validation = message_validation.replace('\n', ' ').replace('\r', ' ')
error_message = error_message.replace(' ', '')
message_validation = message_validation.replace(' ', '')
if error_message == message_validation:
result = "Success"
else:
result = "Fail"
return result
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def on_jenkins():
# NOTE For more env vars see
# https://wiki.jenkins.io/display/JENKINS/Building+a+software+project
jenkins_job = os.environ.get("JOB_NAME")
if jenkins_job is not None:
return True
return False
def print_tests(tests, onlyFails=False):
print(
"""************************************************************
*******************"""
)
tab = " "
failedPrefix = ""
if onlyFails:
failedPrefix = "FAILED"
# TODO percy check None
for extension in tests:
if onlyFails:
if extension == "parquet":
print(
"!!!!!!!!!!!!!!!! "
+ failedPrefix
+ " "
+ extension
+ " TESTS !!!!!!!!!!!!"
)
else:
print(
"!!!!!!!!!!!!!!!! "
+ failedPrefix
+ " "
+ extension
+ " TESTS !!!!!!!!!!!!!!!!"
)
else:
if extension == "parquet":
print("################ " + extension + " TESTS ############")
else:
print("############## " + extension + " TESTS ##############")
testNames = tests.get(extension)
for testName in testNames:
test = testNames.get(testName)
total = test.get("total")
countPass = test.get("countPass")
countCrash = test.get("countCrash")
failIds = test.get("failIds")
showTest = False
if onlyFails:
if len(failIds) > 0:
showTest = True
print(tab + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
else:
showTest = True
print(tab + "++++++++++++++++++++++++++++++++")
if showTest:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
red = bcolors.FAIL
endc = bcolors.ENDC
# don't use colors since jenkins doesn't support ansi chars
if on_jenkins():
green = ""
yellow = ""
red = ""
endc = ""
print(
tab
+ "SUMMARY for "
+ failedPrefix
+ " test suite: "
+ testName
+ " - "
+ extension
)
if not onlyFails:
pass_green = green
pass_endc = endc
if (
countPass != total
): # if no full pass then don't use green colors here
pass_green = ""
pass_endc = ""
print(
pass_green
+ tab
+ "PASSED: "
+ str(countPass)
+ "/"
+ str(total)
+ pass_endc
)
fails = total - countPass - countCrash
yellow_fail = yellow
yellow_endc = endc
if fails == 0:
yellow_fail = ""
yellow_endc = ""
print(
yellow_fail
+ tab
+ "FAILED: "
+ str(fails)
+ "/"
+ str(total)
+ " "
+ str(failIds)
+ yellow_endc
)
red_crash = red
red_endc = endc
# if no crashes then don't use red colors here
if countCrash == 0:
red_crash = ""
red_endc = ""
print(
red_crash
+ tab
+ "CRASH: "
+ str(countCrash)
+ "/"
+ str(total)
+ red_endc
)
if not onlyFails:
print(tab + "TOTAL: " + str(total))
def printSummary(countPass, countCrash, total, no_color):
if no_color:
green = ""
yellow = ""
red = ""
endc = ""
else:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
red = bcolors.FAIL
endc = bcolors.ENDC
# Second: print the global summary (totals from all the tests)
fails = total - countPass - countCrash
print(
"""**********************************************************
*********************"""
)
print("TOTAL SUMMARY for test suite: ")
print(green + "PASSED: " + str(countPass) + "/" + str(total) + endc)
print(yellow + "FAILED: " + str(fails) + "/" + str(total) + endc)
print(red + "CRASH: " + str(countCrash) + "/" + str(total) + endc)
print("TOTAL: " + str(total))
def getFileName(dir_log):
fecha = time.strftime("%H%M%S")
hora = time.strftime("%I%M%S")
return dir_log + "LogTest" + fecha + hora + ".xlsx" #
# ===========================================================================
tableNames = [
"customer",
"orders",
"supplier",
"lineitem",
"part",
"partsupp",
"nation",
"region",
"perf",
"acq",
"names",
"bool_orders",
"web_site",
"web_sales",
"web_returns",
"web_page",
"web_clickstreams",
"warehouse",
"time_dim",
"store_sales",
"store_returns",
"store",
"ship_mode",
"reason",
"promotion",
"product_reviews",
"item_marketprices",
"item",
"inventory",
"income_band",
"household_demographics",
"date_dim",
"customer_demographics",
"customer_address",
"customer",
"split",
"docked",
"smiles",
"dcoids",
]
def get_table_occurrences(query):
res = []
for name in tableNames:
if query.find(name) != -1:
res.append(name)
return res
def replace_all(text, dic):
for i, j in dic.items():
text = re.sub(r"\s%s(\s|$|\,)" % i, j, text)
return text
def get_blazingsql_query(db_name, query):
new_query = query
for table_name in get_table_occurrences(query):
new_query = replace_all(
new_query,
{table_name: " %(table)s " % {"table": db_name + "." + table_name}},
)
return new_query
def get_drill_query(query):
new_query = query
for table_name in get_table_occurrences(query):
new_query = replace_all(
new_query, {table_name: " dfs.tmp.`%(table)s` " % {"table": table_name}}
)
return new_query
# ================================================================================================================
def run_query_drill(drill, query_str):
timeout = 400
query_result = drill.query(query_str, timeout)
df = query_result.to_dataframe()
if df.size == 0:
return Result(query_result.columns, df, None)
df = df[query_result.columns]
result = Result(query_result.columns, df, None)
return result
def run_query_spark(spark, query_str):
query_result = spark.sql(query_str)
df = query_result.toPandas()
if df.size == 0:
return Result(query_result.columns, df, None)
df = df[query_result.columns]
result = Result(query_result.columns, df, None)
return result
def save_results_arrow(filename, pdf2):
# save results
import pyarrow as pa
table = pa.Table.from_pandas(pdf2)
# schema = pa.Schema.from_pandas(pdf2)
with open(filename, "bw") as f:
writer = pa.RecordBatchFileWriter(f, table.schema)
writer.write(table)
writer.close()
def save_results_parquet(filename, pdf2):
pdf2.to_parquet(filename, compression="GZIP")
def run_query(
bc,
engine,
query,
queryId,
queryType,
worder,
orderBy,
acceptable_difference,
use_percentage,
input_type,
**kwargs
):
print(query)
query_spark = kwargs.get("query_spark", query)
algebra = kwargs.get("algebra", "")
nRals = Settings.data["RunSettings"]["nRals"]
print_result = kwargs.get("print_result")
if print_result is None:
print_result = False
message_validation = kwargs.get("message_validation", "")
if message_validation is None:
message_validation = False
data_type = cs.get_extension(input_type)
if Settings.execution_mode != "Generator":
print(
"\n=============== New query: "
+ str(queryId)
+ " - "
+ data_type
+ " ================="
)
load_time = 0
engine_time = 0
total_time = 0
nested_query = kwargs.get("nested_query", False)
error_message = ""
if not nested_query:
# if int(nRals) == 1: # Single Node
query_blz = query # get_blazingsql_query('main', query)
if algebra == "":
start_time = time.time()
try:
result_gdf = bc.sql(query_blz)
except Exception as e:
error_message=str(e)
if not message_validation:
end_time = time.time()
total_time = (end_time - start_time) * 1000
# SUM(CASE WHEN info = 'evaluate_split_query load_data' THEN
# duration ELSE 0 END) AS load_time,
# MAX(load_time) AS load_time,
# log_result = bc.log(
# """SELECT
# MAX(end_time) as end_time, query_id,
# MAX(total_time) AS total_time
# FROM (
# SELECT
# query_id, node_id,
# SUM(CASE WHEN info = 'Query Execution Done' THEN
# duration ELSE 0 END) AS total_time,
# MAX(log_time) AS end_time
# FROM
# bsql_logs
# WHERE
# info = 'evaluate_split_query load_data'
# OR info = 'Query Execution Done'
# GROUP BY
# node_id, query_id
# )
# GROUP BY
# query_id
# ORDER BY
# end_time DESC limit 1"""
# )
# if int(nRals) == 1: # Single Node
# n_log = log_result
# else: # Simple Distribution
# n_log = log_result.compute()
load_time = 0 # n_log['load_time'][0]
engine_time = 0 #n_log["total_time"][0]
else:
result_gdf = bc.sql(query_blz, algebra=algebra)
else: # for nested queries as column basis test
result_gdf = kwargs.get("blz_result", [])
str_code_test = str(get_codTest(queryType)).upper()
filename = str_code_test + "-" + str(queryId) + ".parquet"
result_dir = Settings.data["TestSettings"]["fileResultsDirectory"]
file_results_dir = str(result_dir)
if not message_validation== "":
print_query_results2(
query,
queryId,
input_type,
queryType,
error_message,
message_validation
)
elif not isinstance(engine, str):
if isinstance(engine, PyDrill):
# Drill
query_drill = get_drill_query(query)
result_drill_gd = run_query_drill(engine, query_drill)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = to_pandas_f64_engine(
result_drill_gd.resultSet, expected_dtypes
)
pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))
formatResults(pdf1, pdf2, worder, orderBy)
if Settings.execution_mode == ExecutionMode.GENERATOR:
file_res_drill_dir = (
file_results_dir + "/" + "drill" + "/" + filename
)
if not os.path.exists(file_res_drill_dir):
save_results_parquet(file_res_drill_dir, pdf2)
print("Drill: " + filename + " generated.")
else:
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
elif isinstance(engine, SparkSession):
# Spark
result_spark_df = run_query_spark(engine, query_spark)
if result_gdf is not None:
if result_gdf.columns is not None:
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = to_pandas_f64_engine(
result_spark_df.resultSet, expected_dtypes
)
pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))
formatResults(pdf1, pdf2, worder, orderBy)
if Settings.execution_mode == ExecutionMode.GENERATOR:
file_res_drill_dir = (
file_results_dir + "/" + "spark" + "/" + filename
)
if not os.path.exists(file_res_drill_dir):
save_results_parquet(file_res_drill_dir, pdf2)
print("Spark: " + filename + " generated.")
else:
print_query_results(
query_spark,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query_spark, queryId, queryType, result_gdf.error_message
)
else: # GPUCI
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if compareResults == "true":
resultFile = file_results_dir + "/" + str(engine) + "/" + filename
pdf2 = get_results(resultFile)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
format_pdf(pdf1, worder, orderBy)
print(pdf2)
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
else:
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = pd.DataFrame()
formatResults(pdf1, pdf2, worder, orderBy)
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
def run_query_log(
bc,
query,
queryId,
queryType,
**kwargs
):
result_gdf = None
error_message = ""
message_validation = ""
try:
result_gdf = bc.log(query)
except Exception as e:
error_message=str(e)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
print_query_results2(
query, queryId, DataType.CUDF, queryType, error_message, message_validation
)
else:
print_query_results2(
query, queryId, DataType.CUDF, queryType, error_message, message_validation
)
def run_query_performance(
bc,
drill,
query,
queryId,
queryType,
worder,
orderBy,
acceptable_difference,
use_percentage,
**kwargs
):
# Blazing
query_blz = query # get_blazingsql_query('main', query)
result_gdf = bc.sql(query_blz).get()
if result_gdf.error_message == "":
print_query_results_performance(query, queryId, queryType, result_gdf)
else:
print_query_results2(query, queryId, queryType, result_gdf.error_message)
def formatResults(pdf1, pdf2, worder, orderBy):
if worder == 1 and pdf1.size != 0 and pdf2.size != 0:
if len(pdf1.columns) == len(pdf2.columns):
pdf1.sort_values(
[orderBy] if orderBy else pdf1.columns.to_list(), inplace=True
)
pdf2.sort_values(
[orderBy] if orderBy else pdf2.columns.to_list(), inplace=True
)
def format_pdf(pdf, worder, orderBy):
if worder == 1 and pdf.size != 0:
pdf.sort_values([orderBy] if orderBy else pdf.columns.to_list(), inplace=True)
def get_results(result_file):
df = pd.read_parquet(result_file)
return df
| []
| []
| [
"JOB_NAME"
]
| [] | ["JOB_NAME"] | python | 1 | 0 | |
broker/broker_test.go | package broker_test
import (
"context"
"fmt"
"os"
"strings"
"sync/atomic"
"testing"
"time"
kg "github.com/twmb/franz-go/pkg/kgo"
kgo "go.unistack.org/micro-broker-kgo/v3"
jsoncodec "go.unistack.org/micro-codec-json/v3"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
)
var (
msgcnt = int64(60000)
group = "33"
prefill = true
subtopic = "subtest"
pubtopic = "pubtest"
rateRecs int64
rateBytes int64
)
var subbm = &broker.Message{
Header: map[string]string{"hkey": "hval", metadata.HeaderTopic: subtopic},
Body: []byte(`"bodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybody"`),
}
var pubbm = &broker.Message{
Header: map[string]string{"hkey": "hval", metadata.HeaderTopic: pubtopic},
Body: []byte(`"bodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybodybody"`),
}
func printRate() {
recs := atomic.SwapInt64(&rateRecs, 0)
bytes := atomic.SwapInt64(&rateBytes, 0)
fmt.Printf("%0.2f MiB/s; %0.2fk records/s\n", float64(bytes)/(1024*1024), float64(recs)/1000)
}
func TestOptionPassing(t *testing.T) {
opts := []client.PublishOption{kgo.ClientPublishKey([]byte(`test`))}
options := client.NewPublishOptions(opts...)
if !strings.Contains(fmt.Sprintf("%#+v\n", options.Context), "kgo.publishKey") {
t.Fatal("context does not have publish key")
}
}
func TestKgo(t *testing.T) {
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
t.Skip()
}
_ = logger.DefaultLogger.Init(logger.WithLevel(logger.TraceLevel), logger.WithCallerSkipCount(3))
ctx := context.Background()
var addrs []string
if addr := os.Getenv("BROKER_ADDRS"); len(addr) == 0 {
addrs = []string{"172.18.0.201:29091", "172.18.0.202:29092", "172.18.0.203:29093"}
} else {
addrs = strings.Split(addr, ",")
}
b := kgo.NewBroker(
broker.Codec(jsoncodec.NewCodec()),
broker.Addrs(addrs...),
kgo.CommitInterval(5*time.Second),
kgo.Options(kg.ClientID("test"), kg.FetchMaxBytes(1*1024*1024)),
)
if err := b.Init(); err != nil {
t.Fatal(err)
}
if err := b.Connect(ctx); err != nil {
t.Fatal(err)
}
defer func() {
if err := b.Disconnect(ctx); err != nil {
t.Fatal(err)
}
}()
if prefill {
msgs := make([]*broker.Message, 0, msgcnt)
for i := int64(0); i < msgcnt; i++ {
msgs = append(msgs, subbm)
}
if err := b.BatchPublish(ctx, msgs); err != nil {
t.Fatal(err)
}
}
done := make(chan bool, 1)
idx := int64(0)
fn := func(m broker.Event) error {
atomic.AddInt64(&idx, 1)
time.Sleep(20 * time.Millisecond)
if err := b.BatchPublish(ctx, []*broker.Message{pubbm}); err != nil {
return err
}
atomic.AddInt64(&rateRecs, 1)
atomic.AddInt64(&rateBytes, int64(len(m.Message().Body)))
return m.Ack()
}
sub, err := b.Subscribe(ctx, subtopic, fn,
broker.SubscribeAutoAck(true),
broker.SubscribeGroup(group),
broker.SubscribeBodyOnly(true))
if err != nil {
t.Fatal(err)
}
defer func() {
if err := sub.Unsubscribe(ctx); err != nil {
t.Fatal(err)
}
}()
// ticker := time.NewTicker(10 * time.Second)
// defer ticker.Stop()
pticker := time.NewTicker(1 * time.Second)
defer pticker.Stop()
go func() {
for {
select {
case <-pticker.C:
printRate()
if prc := atomic.LoadInt64(&idx); prc == msgcnt {
close(done)
}
// case <-ticker.C:
// close(done)
}
}
}()
<-done
}
| [
"\"INTEGRATION_TESTS\"",
"\"BROKER_ADDRS\""
]
| []
| [
"BROKER_ADDRS",
"INTEGRATION_TESTS"
]
| [] | ["BROKER_ADDRS", "INTEGRATION_TESTS"] | go | 2 | 0 | |
repo/initialize.go | package repo
import (
"context"
"crypto/rand"
"io"
"os"
"time"
"github.com/pkg/errors"
"github.com/kopia/kopia/internal/gather"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/content"
"github.com/kopia/kopia/repo/encryption"
"github.com/kopia/kopia/repo/hashing"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/repo/splitter"
)
// BuildInfo is the build information of Kopia.
// nolint:gochecknoglobals
var (
BuildInfo = "unknown"
BuildVersion = "v0-unofficial"
BuildGitHubRepo = ""
)
const (
hmacSecretLength = 32
masterKeyLength = 32
uniqueIDLength = 32
)
// NewRepositoryOptions specifies options that apply to newly created repositories.
// All fields are optional, when not provided, reasonable defaults will be used.
type NewRepositoryOptions struct {
UniqueID []byte `json:"uniqueID"` // force the use of particular unique ID
BlockFormat content.FormattingOptions `json:"blockFormat"`
DisableHMAC bool `json:"disableHMAC"`
ObjectFormat object.Format `json:"objectFormat"` // object format
RetentionMode blob.RetentionMode `json:"retentionMode,omitempty"`
RetentionPeriod time.Duration `json:"retentionPeriod,omitempty"`
}
// ErrAlreadyInitialized indicates that repository has already been initialized.
var ErrAlreadyInitialized = errors.Errorf("repository already initialized")
// Initialize creates initial repository data structures in the specified storage with given credentials.
func Initialize(ctx context.Context, st blob.Storage, opt *NewRepositoryOptions, password string) error {
if opt == nil {
opt = &NewRepositoryOptions{}
}
// get the blob - expect ErrNotFound
var tmp gather.WriteBuffer
defer tmp.Close()
err := st.GetBlob(ctx, FormatBlobID, 0, -1, &tmp)
if err == nil {
return ErrAlreadyInitialized
}
if !errors.Is(err, blob.ErrBlobNotFound) {
return errors.Wrap(err, "unexpected error when checking for format blob")
}
err = st.GetBlob(ctx, BlobCfgBlobID, 0, -1, &tmp)
if err == nil {
return errors.Errorf("possible corruption: blobcfg blob exists, but format blob is not found")
}
if !errors.Is(err, blob.ErrBlobNotFound) {
return errors.Wrap(err, "unexpected error when checking for blobcfg blob")
}
format := formatBlobFromOptions(opt)
blobcfg := blobCfgBlobFromOptions(opt)
formatEncryptionKey, err := format.deriveFormatEncryptionKeyFromPassword(password)
if err != nil {
return errors.Wrap(err, "unable to derive format encryption key")
}
f, err := repositoryObjectFormatFromOptions(opt)
if err != nil {
return errors.Wrap(err, "invalid parameters")
}
if err = f.MutableParameters.Validate(); err != nil {
return errors.Wrap(err, "invalid parameters")
}
if err = encryptFormatBytes(format, f, formatEncryptionKey, format.UniqueID); err != nil {
return errors.Wrap(err, "unable to encrypt format bytes")
}
if err := writeBlobCfgBlob(ctx, st, format, blobcfg, formatEncryptionKey); err != nil {
return errors.Wrap(err, "unable to write blobcfg blob")
}
if err := writeFormatBlob(ctx, st, format, blobcfg); err != nil {
return errors.Wrap(err, "unable to write format blob")
}
return nil
}
func formatBlobFromOptions(opt *NewRepositoryOptions) *formatBlob {
return &formatBlob{
Tool: "https://github.com/kopia/kopia",
BuildInfo: BuildInfo,
BuildVersion: BuildVersion,
KeyDerivationAlgorithm: defaultKeyDerivationAlgorithm,
UniqueID: applyDefaultRandomBytes(opt.UniqueID, uniqueIDLength),
EncryptionAlgorithm: defaultFormatEncryption,
}
}
func repositoryObjectFormatFromOptions(opt *NewRepositoryOptions) (*repositoryObjectFormat, error) {
fv := opt.BlockFormat.Version
if fv == 0 {
switch os.Getenv("KOPIA_REPOSITORY_FORMAT_VERSION") {
case "1":
fv = content.FormatVersion1
case "2":
fv = content.FormatVersion2
default:
fv = content.FormatVersion2
}
}
f := &repositoryObjectFormat{
FormattingOptions: content.FormattingOptions{
Hash: applyDefaultString(opt.BlockFormat.Hash, hashing.DefaultAlgorithm),
Encryption: applyDefaultString(opt.BlockFormat.Encryption, encryption.DefaultAlgorithm),
HMACSecret: applyDefaultRandomBytes(opt.BlockFormat.HMACSecret, hmacSecretLength),
MasterKey: applyDefaultRandomBytes(opt.BlockFormat.MasterKey, masterKeyLength),
MutableParameters: content.MutableParameters{
Version: fv,
MaxPackSize: applyDefaultInt(opt.BlockFormat.MaxPackSize, 20<<20), //nolint:gomnd
IndexVersion: applyDefaultInt(opt.BlockFormat.IndexVersion, content.DefaultIndexVersion),
EpochParameters: opt.BlockFormat.EpochParameters,
},
EnablePasswordChange: opt.BlockFormat.EnablePasswordChange,
},
Format: object.Format{
Splitter: applyDefaultString(opt.ObjectFormat.Splitter, splitter.DefaultAlgorithm),
},
}
if opt.DisableHMAC {
f.HMACSecret = nil
}
if err := f.FormattingOptions.ResolveFormatVersion(); err != nil {
return nil, errors.Wrap(err, "error resolving format version")
}
return f, nil
}
func randomBytes(n int) []byte {
b := make([]byte, n)
io.ReadFull(rand.Reader, b) //nolint:errcheck
return b
}
func applyDefaultInt(v, def int) int {
if v == 0 {
return def
}
return v
}
func applyDefaultString(v, def string) string {
if v == "" {
return def
}
return v
}
func applyDefaultRandomBytes(b []byte, n int) []byte {
if b == nil {
return randomBytes(n)
}
return b
}
| [
"\"KOPIA_REPOSITORY_FORMAT_VERSION\""
]
| []
| [
"KOPIA_REPOSITORY_FORMAT_VERSION"
]
| [] | ["KOPIA_REPOSITORY_FORMAT_VERSION"] | go | 1 | 0 | |
main.py |
from ctypes import sizeof
import os
from dotenv import load_dotenv
from crontab import CronTab
from twitchAPI.twitch import Twitch
import schedule
import time
from service.twitch_service import get_streams
from service.twitter_service import tweet_stream
print('Starting Bot...')
load_dotenv()
twitch = Twitch(os.environ['CLIENT_ID'], os.environ['CLIENT_SECRET'])
streams_map = dict()
streams_map = {}
def job():
streams = get_streams()
for stream in streams:
if stream['user_name'] not in streams_map or streams_map[stream['user_name']] != stream['started_at']:
try:
streams_map[stream['user_name']] = stream['started_at']
tweet_stream(stream)
print(f'Twitting {stream["user_name"]} stream...')
except Exception as e:
print(f'Couldnt Twit {stream["user_name"]} stream...')
schedule.every(30).seconds.do(job)
print('Bot Scheduled...')
while True:
schedule.run_pending()
time.sleep(1)
| []
| []
| [
"CLIENT_SECRET",
"CLIENT_ID"
]
| [] | ["CLIENT_SECRET", "CLIENT_ID"] | python | 2 | 0 | |
sympy/printing/tests/test_latex.py | from sympy import (
Add, Abs, Chi, Ci, CosineTransform, Dict, Ei, Eq, FallingFactorial,
FiniteSet, Float, FourierTransform, Function, IndexedBase, Integral,
Interval, InverseCosineTransform, InverseFourierTransform,
InverseLaplaceTransform, InverseMellinTransform, InverseSineTransform,
Lambda, LaplaceTransform, Limit, Matrix, Max, MellinTransform, Min, Mul,
Order, Piecewise, Poly, ring, field, ZZ, Pow, Product, Range, Rational,
RisingFactorial, RootOf, RootSum, S, Shi, Si, SineTransform, Subs,
Sum, Symbol, ImageSet, Tuple, Union, Ynm, Znm, arg, asin,
assoc_laguerre, assoc_legendre, binomial, catalan, ceiling, Complement,
chebyshevt, chebyshevu, conjugate, cot, coth, diff, dirichlet_eta,
exp, expint, factorial, factorial2, floor, gamma, gegenbauer, hermite,
hyper, im, jacobi, laguerre, legendre, lerchphi, log, lowergamma,
meijerg, oo, polar_lift, polylog, re, root, sin, sqrt, symbols,
uppergamma, zeta, subfactorial, totient, elliptic_k, elliptic_f,
elliptic_e, elliptic_pi, cos, tan, Wild, true, false, Equivalent, Not,
Contains, divisor_sigma, SymmetricDifference)
from sympy.abc import mu, tau
from sympy.printing.latex import latex, translate
from sympy.utilities.pytest import XFAIL, raises
from sympy.functions import DiracDelta, Heaviside, KroneckerDelta, LeviCivita
from sympy.logic import Implies
from sympy.logic.boolalg import And, Or, Xor
from sympy.core.trace import Tr
from sympy.core.compatibility import range
x, y, z, t, a, b = symbols('x y z t a b')
k, m, n = symbols('k m n', integer=True)
def test_printmethod():
class R(Abs):
def _latex(self, printer):
return "foo(%s)" % printer._print(self.args[0])
assert latex(R(x)) == "foo(x)"
class R(Abs):
def _latex(self, printer):
return "foo"
assert latex(R(x)) == "foo"
def test_latex_basic():
assert latex(1 + x) == "x + 1"
assert latex(x**2) == "x^{2}"
assert latex(x**(1 + x)) == "x^{x + 1}"
assert latex(x**3 + x + 1 + x**2) == "x^{3} + x^{2} + x + 1"
assert latex(2*x*y) == "2 x y"
assert latex(2*x*y, mul_symbol='dot') == r"2 \cdot x \cdot y"
assert latex(1/x) == r"\frac{1}{x}"
assert latex(1/x, fold_short_frac=True) == "1 / x"
assert latex(1/x**2) == r"\frac{1}{x^{2}}"
assert latex(x/2) == r"\frac{x}{2}"
assert latex(x/2, fold_short_frac=True) == "x / 2"
assert latex((x + y)/(2*x)) == r"\frac{x + y}{2 x}"
assert latex((x + y)/(2*x), fold_short_frac=True) == \
r"\left(x + y\right) / 2 x"
assert latex((x + y)/(2*x), long_frac_ratio=0) == \
r"\frac{1}{2 x} \left(x + y\right)"
assert latex((x + y)/x) == r"\frac{1}{x} \left(x + y\right)"
assert latex((x + y)/x, long_frac_ratio=3) == r"\frac{x + y}{x}"
assert latex(2*Integral(x, x)/3) == r"\frac{2}{3} \int x\, dx"
assert latex(2*Integral(x, x)/3, fold_short_frac=True) == \
r"\left(2 \int x\, dx\right) / 3"
assert latex(sqrt(x)) == r"\sqrt{x}"
assert latex(x**Rational(1, 3)) == r"\sqrt[3]{x}"
assert latex(sqrt(x)**3) == r"x^{\frac{3}{2}}"
assert latex(sqrt(x), itex=True) == r"\sqrt{x}"
assert latex(x**Rational(1, 3), itex=True) == r"\root{3}{x}"
assert latex(sqrt(x)**3, itex=True) == r"x^{\frac{3}{2}}"
assert latex(x**Rational(3, 4)) == r"x^{\frac{3}{4}}"
assert latex(x**Rational(3, 4), fold_frac_powers=True) == "x^{3/4}"
assert latex((x + 1)**Rational(3, 4)) == \
r"\left(x + 1\right)^{\frac{3}{4}}"
assert latex((x + 1)**Rational(3, 4), fold_frac_powers=True) == \
r"\left(x + 1\right)^{3/4}"
assert latex(1.5e20*x) == r"1.5 \cdot 10^{20} x"
assert latex(1.5e20*x, mul_symbol='dot') == r"1.5 \cdot 10^{20} \cdot x"
assert latex(1.5e20*x, mul_symbol='times') == r"1.5 \times 10^{20} \times x"
assert latex(1/sin(x)) == r"\frac{1}{\sin{\left (x \right )}}"
assert latex(sin(x)**-1) == r"\frac{1}{\sin{\left (x \right )}}"
assert latex(sin(x)**Rational(3, 2)) == \
r"\sin^{\frac{3}{2}}{\left (x \right )}"
assert latex(sin(x)**Rational(3, 2), fold_frac_powers=True) == \
r"\sin^{3/2}{\left (x \right )}"
assert latex(~x) == r"\neg x"
assert latex(x & y) == r"x \wedge y"
assert latex(x & y & z) == r"x \wedge y \wedge z"
assert latex(x | y) == r"x \vee y"
assert latex(x | y | z) == r"x \vee y \vee z"
assert latex((x & y) | z) == r"z \vee \left(x \wedge y\right)"
assert latex(Implies(x, y)) == r"x \Rightarrow y"
assert latex(~(x >> ~y)) == r"x \not\Rightarrow \neg y"
assert latex(Implies(Or(x,y), z)) == r"\left(x \vee y\right) \Rightarrow z"
assert latex(Implies(z, Or(x,y))) == r"z \Rightarrow \left(x \vee y\right)"
assert latex(~x, symbol_names={x: "x_i"}) == r"\neg x_i"
assert latex(x & y, symbol_names={x: "x_i", y: "y_i"}) == \
r"x_i \wedge y_i"
assert latex(x & y & z, symbol_names={x: "x_i", y: "y_i", z: "z_i"}) == \
r"x_i \wedge y_i \wedge z_i"
assert latex(x | y, symbol_names={x: "x_i", y: "y_i"}) == r"x_i \vee y_i"
assert latex(x | y | z, symbol_names={x: "x_i", y: "y_i", z: "z_i"}) == \
r"x_i \vee y_i \vee z_i"
assert latex((x & y) | z, symbol_names={x: "x_i", y: "y_i", z: "z_i"}) == \
r"z_i \vee \left(x_i \wedge y_i\right)"
assert latex(Implies(x, y), symbol_names={x: "x_i", y: "y_i"}) == \
r"x_i \Rightarrow y_i"
def test_latex_builtins():
assert latex(True) == r"\mathrm{True}"
assert latex(False) == r"\mathrm{False}"
assert latex(None) == r"\mathrm{None}"
assert latex(true) == r"\mathrm{True}"
assert latex(false) == r'\mathrm{False}'
def test_latex_Float():
assert latex(Float(1.0e100)) == r"1.0 \cdot 10^{100}"
assert latex(Float(1.0e-100)) == r"1.0 \cdot 10^{-100}"
assert latex(Float(1.0e-100), mul_symbol="times") == r"1.0 \times 10^{-100}"
assert latex(1.0*oo) == r"\infty"
assert latex(-1.0*oo) == r"- \infty"
def test_latex_symbols():
Gamma, lmbda, rho = symbols('Gamma, lambda, rho')
mass, volume = symbols('mass, volume')
assert latex(Gamma + lmbda) == r"\Gamma + \lambda"
assert latex(Gamma * lmbda) == r"\Gamma \lambda"
assert latex(Symbol('q1')) == r"q_{1}"
assert latex(Symbol('q21')) == r"q_{21}"
assert latex(Symbol('epsilon0')) == r"\epsilon_{0}"
assert latex(Symbol('omega1')) == r"\omega_{1}"
assert latex(Symbol('91')) == r"91"
assert latex(Symbol('alpha_new')) == r"\alpha_{new}"
assert latex(Symbol('C^orig')) == r"C^{orig}"
assert latex(Symbol('x^alpha')) == r"x^{\alpha}"
assert latex(Symbol('beta^alpha')) == r"\beta^{\alpha}"
assert latex(Symbol('e^Alpha')) == r"e^{A}"
assert latex(Symbol('omega_alpha^beta')) == r"\omega^{\beta}_{\alpha}"
assert latex(Symbol('omega') ** Symbol('beta')) == r"\omega^{\beta}"
@XFAIL
def test_latex_symbols_failing():
rho, mass, volume = symbols('rho, mass, volume')
assert latex(
volume * rho == mass) == r"\rho \mathrm{volume} = \mathrm{mass}"
assert latex(volume / mass * rho == 1) == r"\rho \mathrm{volume} {\mathrm{mass}}^{(-1)} = 1"
assert latex(mass**3 * volume**3) == r"{\mathrm{mass}}^{3} \cdot {\mathrm{volume}}^{3}"
def test_latex_functions():
assert latex(exp(x)) == "e^{x}"
assert latex(exp(1) + exp(2)) == "e + e^{2}"
f = Function('f')
assert latex(f(x)) == r'f{\left (x \right )}'
assert latex(f) == r'f'
g = Function('g')
assert latex(g(x, y)) == r'g{\left (x,y \right )}'
assert latex(g) == r'g'
h = Function('h')
assert latex(h(x, y, z)) == r'h{\left (x,y,z \right )}'
assert latex(h) == r'h'
Li = Function('Li')
assert latex(Li) == r'\operatorname{Li}'
assert latex(Li(x)) == r'\operatorname{Li}{\left (x \right )}'
beta = Function('beta')
# not to be confused with the beta function
assert latex(beta(x)) == r"\beta{\left (x \right )}"
assert latex(beta) == r"\beta"
a1 = Function('a_1')
assert latex(a1) == r"\operatorname{a_{1}}"
assert latex(a1(x)) == r"\operatorname{a_{1}}{\left (x \right )}"
# issue 5868
omega1 = Function('omega1')
assert latex(omega1) == r"\omega_{1}"
assert latex(omega1(x)) == r"\omega_{1}{\left (x \right )}"
assert latex(sin(x)) == r"\sin{\left (x \right )}"
assert latex(sin(x), fold_func_brackets=True) == r"\sin {x}"
assert latex(sin(2*x**2), fold_func_brackets=True) == \
r"\sin {2 x^{2}}"
assert latex(sin(x**2), fold_func_brackets=True) == \
r"\sin {x^{2}}"
assert latex(asin(x)**2) == r"\operatorname{asin}^{2}{\left (x \right )}"
assert latex(asin(x)**2, inv_trig_style="full") == \
r"\arcsin^{2}{\left (x \right )}"
assert latex(asin(x)**2, inv_trig_style="power") == \
r"\sin^{-1}{\left (x \right )}^{2}"
assert latex(asin(x**2), inv_trig_style="power",
fold_func_brackets=True) == \
r"\sin^{-1} {x^{2}}"
assert latex(factorial(k)) == r"k!"
assert latex(factorial(-k)) == r"\left(- k\right)!"
assert latex(subfactorial(k)) == r"!k"
assert latex(subfactorial(-k)) == r"!\left(- k\right)"
assert latex(factorial2(k)) == r"k!!"
assert latex(factorial2(-k)) == r"\left(- k\right)!!"
assert latex(binomial(2, k)) == r"{\binom{2}{k}}"
assert latex(FallingFactorial(3, k)) == r"{\left(3\right)}_{k}"
assert latex(RisingFactorial(3, k)) == r"{3}^{\left(k\right)}"
assert latex(floor(x)) == r"\lfloor{x}\rfloor"
assert latex(ceiling(x)) == r"\lceil{x}\rceil"
assert latex(Min(x, 2, x**3)) == r"\min\left(2, x, x^{3}\right)"
assert latex(Min(x, y)**2) == r"\min\left(x, y\right)^{2}"
assert latex(Max(x, 2, x**3)) == r"\max\left(2, x, x^{3}\right)"
assert latex(Max(x, y)**2) == r"\max\left(x, y\right)^{2}"
assert latex(Abs(x)) == r"\left\|{x}\right\|"
assert latex(re(x)) == r"\Re{x}"
assert latex(re(x + y)) == r"\Re{x} + \Re{y}"
assert latex(im(x)) == r"\Im{x}"
assert latex(conjugate(x)) == r"\overline{x}"
assert latex(gamma(x)) == r"\Gamma{\left(x \right)}"
w = Wild('w')
assert latex(gamma(w)) == r"\Gamma{\left(w \right)}"
assert latex(Order(x)) == r"\mathcal{O}\left(x\right)"
assert latex(Order(x, x)) == r"\mathcal{O}\left(x\right)"
assert latex(Order(x, (x, 0))) == r"\mathcal{O}\left(x\right)"
assert latex(Order(x, (x, oo))) == r"\mathcal{O}\left(x; x\rightarrow\infty\right)"
assert latex(Order(x, x, y)) == r"\mathcal{O}\left(x; \left ( x, \quad y\right )\rightarrow\left ( 0, \quad 0\right )\right)"
assert latex(Order(x, x, y)) == r"\mathcal{O}\left(x; \left ( x, \quad y\right )\rightarrow\left ( 0, \quad 0\right )\right)"
assert latex(Order(x, (x, oo), (y, oo))) == r"\mathcal{O}\left(x; \left ( x, \quad y\right )\rightarrow\left ( \infty, \quad \infty\right )\right)"
assert latex(lowergamma(x, y)) == r'\gamma\left(x, y\right)'
assert latex(uppergamma(x, y)) == r'\Gamma\left(x, y\right)'
assert latex(cot(x)) == r'\cot{\left (x \right )}'
assert latex(coth(x)) == r'\coth{\left (x \right )}'
assert latex(re(x)) == r'\Re{x}'
assert latex(im(x)) == r'\Im{x}'
assert latex(root(x, y)) == r'x^{\frac{1}{y}}'
assert latex(arg(x)) == r'\arg{\left (x \right )}'
assert latex(zeta(x)) == r'\zeta\left(x\right)'
assert latex(zeta(x)) == r"\zeta\left(x\right)"
assert latex(zeta(x)**2) == r"\zeta^{2}\left(x\right)"
assert latex(zeta(x, y)) == r"\zeta\left(x, y\right)"
assert latex(zeta(x, y)**2) == r"\zeta^{2}\left(x, y\right)"
assert latex(dirichlet_eta(x)) == r"\eta\left(x\right)"
assert latex(dirichlet_eta(x)**2) == r"\eta^{2}\left(x\right)"
assert latex(polylog(x, y)) == r"\operatorname{Li}_{x}\left(y\right)"
assert latex(
polylog(x, y)**2) == r"\operatorname{Li}_{x}^{2}\left(y\right)"
assert latex(lerchphi(x, y, n)) == r"\Phi\left(x, y, n\right)"
assert latex(lerchphi(x, y, n)**2) == r"\Phi^{2}\left(x, y, n\right)"
assert latex(elliptic_k(z)) == r"K\left(z\right)"
assert latex(elliptic_k(z)**2) == r"K^{2}\left(z\right)"
assert latex(elliptic_f(x, y)) == r"F\left(x\middle| y\right)"
assert latex(elliptic_f(x, y)**2) == r"F^{2}\left(x\middle| y\right)"
assert latex(elliptic_e(x, y)) == r"E\left(x\middle| y\right)"
assert latex(elliptic_e(x, y)**2) == r"E^{2}\left(x\middle| y\right)"
assert latex(elliptic_e(z)) == r"E\left(z\right)"
assert latex(elliptic_e(z)**2) == r"E^{2}\left(z\right)"
assert latex(elliptic_pi(x, y, z)) == r"\Pi\left(x; y\middle| z\right)"
assert latex(elliptic_pi(x, y, z)**2) == \
r"\Pi^{2}\left(x; y\middle| z\right)"
assert latex(elliptic_pi(x, y)) == r"\Pi\left(x\middle| y\right)"
assert latex(elliptic_pi(x, y)**2) == r"\Pi^{2}\left(x\middle| y\right)"
assert latex(Ei(x)) == r'\operatorname{Ei}{\left (x \right )}'
assert latex(Ei(x)**2) == r'\operatorname{Ei}^{2}{\left (x \right )}'
assert latex(expint(x, y)**2) == r'\operatorname{E}_{x}^{2}\left(y\right)'
assert latex(Shi(x)**2) == r'\operatorname{Shi}^{2}{\left (x \right )}'
assert latex(Si(x)**2) == r'\operatorname{Si}^{2}{\left (x \right )}'
assert latex(Ci(x)**2) == r'\operatorname{Ci}^{2}{\left (x \right )}'
assert latex(Chi(x)**2) == r'\operatorname{Chi}^{2}{\left (x \right )}'
assert latex(Chi(x)) == r'\operatorname{Chi}{\left (x \right )}'
assert latex(
jacobi(n, a, b, x)) == r'P_{n}^{\left(a,b\right)}\left(x\right)'
assert latex(jacobi(n, a, b, x)**2) == r'\left(P_{n}^{\left(a,b\right)}\left(x\right)\right)^{2}'
assert latex(
gegenbauer(n, a, x)) == r'C_{n}^{\left(a\right)}\left(x\right)'
assert latex(gegenbauer(n, a, x)**2) == r'\left(C_{n}^{\left(a\right)}\left(x\right)\right)^{2}'
assert latex(chebyshevt(n, x)) == r'T_{n}\left(x\right)'
assert latex(
chebyshevt(n, x)**2) == r'\left(T_{n}\left(x\right)\right)^{2}'
assert latex(chebyshevu(n, x)) == r'U_{n}\left(x\right)'
assert latex(
chebyshevu(n, x)**2) == r'\left(U_{n}\left(x\right)\right)^{2}'
assert latex(legendre(n, x)) == r'P_{n}\left(x\right)'
assert latex(legendre(n, x)**2) == r'\left(P_{n}\left(x\right)\right)^{2}'
assert latex(
assoc_legendre(n, a, x)) == r'P_{n}^{\left(a\right)}\left(x\right)'
assert latex(assoc_legendre(n, a, x)**2) == r'\left(P_{n}^{\left(a\right)}\left(x\right)\right)^{2}'
assert latex(laguerre(n, x)) == r'L_{n}\left(x\right)'
assert latex(laguerre(n, x)**2) == r'\left(L_{n}\left(x\right)\right)^{2}'
assert latex(
assoc_laguerre(n, a, x)) == r'L_{n}^{\left(a\right)}\left(x\right)'
assert latex(assoc_laguerre(n, a, x)**2) == r'\left(L_{n}^{\left(a\right)}\left(x\right)\right)^{2}'
assert latex(hermite(n, x)) == r'H_{n}\left(x\right)'
assert latex(hermite(n, x)**2) == r'\left(H_{n}\left(x\right)\right)^{2}'
theta = Symbol("theta", real=True)
phi = Symbol("phi", real=True)
assert latex(Ynm(n,m,theta,phi)) == r'Y_{n}^{m}\left(\theta,\phi\right)'
assert latex(Ynm(n, m, theta, phi)**3) == r'\left(Y_{n}^{m}\left(\theta,\phi\right)\right)^{3}'
assert latex(Znm(n,m,theta,phi)) == r'Z_{n}^{m}\left(\theta,\phi\right)'
assert latex(Znm(n, m, theta, phi)**3) == r'\left(Z_{n}^{m}\left(\theta,\phi\right)\right)^{3}'
# Test latex printing of function names with "_"
assert latex(
polar_lift(0)) == r"\operatorname{polar\_lift}{\left (0 \right )}"
assert latex(polar_lift(
0)**3) == r"\operatorname{polar\_lift}^{3}{\left (0 \right )}"
assert latex(totient(n)) == r'\phi\left( n \right)'
assert latex(divisor_sigma(x)) == r"\sigma\left(x\right)"
assert latex(divisor_sigma(x)**2) == r"\sigma^{2}\left(x\right)"
assert latex(divisor_sigma(x, y)) == r"\sigma_y\left(x\right)"
assert latex(divisor_sigma(x, y)**2) == r"\sigma^{2}_y\left(x\right)"
# some unknown function name should get rendered with \operatorname
fjlkd = Function('fjlkd')
assert latex(fjlkd(x)) == r'\operatorname{fjlkd}{\left (x \right )}'
# even when it is referred to without an argument
assert latex(fjlkd) == r'\operatorname{fjlkd}'
def test_hyper_printing():
from sympy import pi
from sympy.abc import x, z
assert latex(meijerg(Tuple(pi, pi, x), Tuple(1),
(0, 1), Tuple(1, 2, 3/pi), z)) == \
r'{G_{4, 5}^{2, 3}\left(\begin{matrix} \pi, \pi, x & 1 \\0, 1 & 1, 2, \frac{3}{\pi} \end{matrix} \middle| {z} \right)}'
assert latex(meijerg(Tuple(), Tuple(1), (0,), Tuple(), z)) == \
r'{G_{1, 1}^{1, 0}\left(\begin{matrix} & 1 \\0 & \end{matrix} \middle| {z} \right)}'
assert latex(hyper((x, 2), (3,), z)) == \
r'{{}_{2}F_{1}\left(\begin{matrix} x, 2 ' \
r'\\ 3 \end{matrix}\middle| {z} \right)}'
assert latex(hyper(Tuple(), Tuple(1), z)) == \
r'{{}_{0}F_{1}\left(\begin{matrix} ' \
r'\\ 1 \end{matrix}\middle| {z} \right)}'
def test_latex_bessel():
from sympy.functions.special.bessel import (besselj, bessely, besseli,
besselk, hankel1, hankel2, jn, yn)
from sympy.abc import z
assert latex(besselj(n, z**2)**k) == r'J^{k}_{n}\left(z^{2}\right)'
assert latex(bessely(n, z)) == r'Y_{n}\left(z\right)'
assert latex(besseli(n, z)) == r'I_{n}\left(z\right)'
assert latex(besselk(n, z)) == r'K_{n}\left(z\right)'
assert latex(hankel1(n, z**2)**2) == \
r'\left(H^{(1)}_{n}\left(z^{2}\right)\right)^{2}'
assert latex(hankel2(n, z)) == r'H^{(2)}_{n}\left(z\right)'
assert latex(jn(n, z)) == r'j_{n}\left(z\right)'
assert latex(yn(n, z)) == r'y_{n}\left(z\right)'
def test_latex_fresnel():
from sympy.functions.special.error_functions import (fresnels, fresnelc)
from sympy.abc import z
assert latex(fresnels(z)) == r'S\left(z\right)'
assert latex(fresnelc(z)) == r'C\left(z\right)'
assert latex(fresnels(z)**2) == r'S^{2}\left(z\right)'
assert latex(fresnelc(z)**2) == r'C^{2}\left(z\right)'
def test_latex_brackets():
assert latex((-1)**x) == r"\left(-1\right)^{x}"
def test_latex_indexed():
Psi_symbol = Symbol('Psi_0', complex=True, real=False)
Psi_indexed = IndexedBase(Symbol('Psi', complex=True, real=False))
symbol_latex = latex(Psi_symbol * conjugate(Psi_symbol))
indexed_latex = latex(Psi_indexed[0] * conjugate(Psi_indexed[0]))
# \\overline{\\Psi_{0}} \\Psi_{0} vs. \\Psi_{0} \\overline{\\Psi_{0}}
assert symbol_latex.split() == indexed_latex.split() \
or symbol_latex.split() == indexed_latex.split()[::-1]
# Symbol('gamma') gives r'\gamma'
assert latex(IndexedBase('gamma')) == r'\gamma'
assert latex(IndexedBase('a b')) == 'a b'
assert latex(IndexedBase('a_b')) == 'a_{b}'
def test_latex_derivatives():
# regular "d" for ordinary derivatives
assert latex(diff(x**3, x, evaluate=False)) == \
r"\frac{d}{d x} x^{3}"
assert latex(diff(sin(x) + x**2, x, evaluate=False)) == \
r"\frac{d}{d x}\left(x^{2} + \sin{\left (x \right )}\right)"
assert latex(diff(diff(sin(x) + x**2, x, evaluate=False), evaluate=False)) == \
r"\frac{d^{2}}{d x^{2}} \left(x^{2} + \sin{\left (x \right )}\right)"
assert latex(diff(diff(diff(sin(x) + x**2, x, evaluate=False), evaluate=False), evaluate=False)) == \
r"\frac{d^{3}}{d x^{3}} \left(x^{2} + \sin{\left (x \right )}\right)"
# \partial for partial derivatives
assert latex(diff(sin(x * y), x, evaluate=False)) == \
r"\frac{\partial}{\partial x} \sin{\left (x y \right )}"
assert latex(diff(sin(x * y) + x**2, x, evaluate=False)) == \
r"\frac{\partial}{\partial x}\left(x^{2} + \sin{\left (x y \right )}\right)"
assert latex(diff(diff(sin(x*y) + x**2, x, evaluate=False), x, evaluate=False)) == \
r"\frac{\partial^{2}}{\partial x^{2}} \left(x^{2} + \sin{\left (x y \right )}\right)"
assert latex(diff(diff(diff(sin(x*y) + x**2, x, evaluate=False), x, evaluate=False), x, evaluate=False)) == \
r"\frac{\partial^{3}}{\partial x^{3}} \left(x^{2} + \sin{\left (x y \right )}\right)"
# mixed partial derivatives
f = Function("f")
assert latex(diff(diff(f(x,y), x, evaluate=False), y, evaluate=False)) == \
r"\frac{\partial^{2}}{\partial x\partial y} " + latex(f(x,y))
assert latex(diff(diff(diff(f(x,y), x, evaluate=False), x, evaluate=False), y, evaluate=False)) == \
r"\frac{\partial^{3}}{\partial x^{2}\partial y} " + latex(f(x,y))
# use ordinary d when one of the variables has been integrated out
assert latex(diff(Integral(exp(-x * y), (x, 0, oo)), y, evaluate=False)) == \
r"\frac{d}{d y} \int_{0}^{\infty} e^{- x y}\, dx"
def test_latex_subs():
assert latex(Subs(x*y, (
x, y), (1, 2))) == r'\left. x y \right|_{\substack{ x=1\\ y=2 }}'
def test_latex_integrals():
assert latex(Integral(log(x), x)) == r"\int \log{\left (x \right )}\, dx"
assert latex(Integral(x**2, (x, 0, 1))) == r"\int_{0}^{1} x^{2}\, dx"
assert latex(Integral(x**2, (x, 10, 20))) == r"\int_{10}^{20} x^{2}\, dx"
assert latex(Integral(
y*x**2, (x, 0, 1), y)) == r"\int\int_{0}^{1} x^{2} y\, dx\, dy"
assert latex(Integral(y*x**2, (x, 0, 1), y), mode='equation*') \
== r"\begin{equation*}\int\int\limits_{0}^{1} x^{2} y\, dx\, dy\end{equation*}"
assert latex(Integral(y*x**2, (x, 0, 1), y), mode='equation*', itex=True) \
== r"$$\int\int_{0}^{1} x^{2} y\, dx\, dy$$"
assert latex(Integral(x, (x, 0))) == r"\int^{0} x\, dx"
assert latex(Integral(x*y, x, y)) == r"\iint x y\, dx\, dy"
assert latex(Integral(x*y*z, x, y, z)) == r"\iiint x y z\, dx\, dy\, dz"
assert latex(Integral(x*y*z*t, x, y, z, t)) == \
r"\iiiint t x y z\, dx\, dy\, dz\, dt"
assert latex(Integral(x, x, x, x, x, x, x)) == \
r"\int\int\int\int\int\int x\, dx\, dx\, dx\, dx\, dx\, dx"
assert latex(Integral(x, x, y, (z, 0, 1))) == \
r"\int_{0}^{1}\int\int x\, dx\, dy\, dz"
def test_latex_sets():
for s in (frozenset, set):
assert latex(s([x*y, x**2])) == r"\left\{x^{2}, x y\right\}"
assert latex(s(range(1, 6))) == r"\left\{1, 2, 3, 4, 5\right\}"
assert latex(s(range(1, 13))) == \
r"\left\{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\right\}"
s = FiniteSet
assert latex(s(*[x*y, x**2])) == r"\left\{x^{2}, x y\right\}"
assert latex(s(*range(1, 6))) == r"\left\{1, 2, 3, 4, 5\right\}"
assert latex(s(*range(1, 13))) == \
r"\left\{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\right\}"
def test_latex_Range():
assert latex(Range(1, 51)) == \
r'\left\{1, 2, \ldots, 50\right\}'
assert latex(Range(1, 4)) == r'\left\{1, 2, 3\right\}'
def test_latex_intervals():
a = Symbol('a', real=True)
assert latex(Interval(0, 0)) == r"\left\{0\right\}"
assert latex(Interval(0, a)) == r"\left[0, a\right]"
assert latex(Interval(0, a, False, False)) == r"\left[0, a\right]"
assert latex(Interval(0, a, True, False)) == r"\left(0, a\right]"
assert latex(Interval(0, a, False, True)) == r"\left[0, a\right)"
assert latex(Interval(0, a, True, True)) == r"\left(0, a\right)"
def test_latex_emptyset():
assert latex(S.EmptySet) == r"\emptyset"
def test_latex_union():
assert latex(Union(Interval(0, 1), Interval(2, 3))) == \
r"\left[0, 1\right] \cup \left[2, 3\right]"
assert latex(Union(Interval(1, 1), Interval(2, 2), Interval(3, 4))) == \
r"\left\{1, 2\right\} \cup \left[3, 4\right]"
def test_latex_symmetric_difference():
assert latex(SymmetricDifference(Interval(2,5), Interval(4,7), \
evaluate = False)) == r'\left[2, 5\right] \triangle \left[4, 7\right]'
def test_latex_Complement():
assert latex(Complement(S.Reals, S.Naturals)) == r"\mathbb{R} \setminus \mathbb{N}"
def test_latex_productset():
line = Interval(0, 1)
bigline = Interval(0, 10)
fset = FiniteSet(1, 2, 3)
assert latex(line**2) == r"%s^2" % latex(line)
assert latex(line * bigline * fset) == r"%s \times %s \times %s" % (
latex(line), latex(bigline), latex(fset))
def test_latex_Naturals():
assert latex(S.Naturals) == r"\mathbb{N}"
assert latex(S.Integers) == r"\mathbb{Z}"
def test_latex_ImageSet():
x = Symbol('x')
assert latex(ImageSet(Lambda(x, x**2), S.Naturals)) == \
r"\left\{x^{2}\; |\; x \in \mathbb{N}\right\}"
def test_latex_Contains():
x = Symbol('x')
assert latex(Contains(x, S.Naturals)) == r"x \in \mathbb{N}"
def test_latex_sum():
assert latex(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \
r"\sum_{\substack{-2 \leq x \leq 2\\-5 \leq y \leq 5}} x y^{2}"
assert latex(Sum(x**2, (x, -2, 2))) == \
r"\sum_{x=-2}^{2} x^{2}"
assert latex(Sum(x**2 + y, (x, -2, 2))) == \
r"\sum_{x=-2}^{2} \left(x^{2} + y\right)"
def test_latex_product():
assert latex(Product(x*y**2, (x, -2, 2), (y, -5, 5))) == \
r"\prod_{\substack{-2 \leq x \leq 2\\-5 \leq y \leq 5}} x y^{2}"
assert latex(Product(x**2, (x, -2, 2))) == \
r"\prod_{x=-2}^{2} x^{2}"
assert latex(Product(x**2 + y, (x, -2, 2))) == \
r"\prod_{x=-2}^{2} \left(x^{2} + y\right)"
def test_latex_limits():
assert latex(Limit(x, x, oo)) == r"\lim_{x \to \infty} x"
# issue 8175
f = Function('f')
assert latex(Limit(f(x), x, 0)) == r"\lim_{x \to 0^+} f{\left (x \right )}"
assert latex(Limit(f(x), x, 0, "-")) == r"\lim_{x \to 0^-} f{\left (x \right )}"
def test_issue_3568():
beta = Symbol(r'\beta')
y = beta + x
assert latex(y) in [r'\beta + x', r'x + \beta']
beta = Symbol(r'beta')
y = beta + x
assert latex(y) in [r'\beta + x', r'x + \beta']
def test_latex():
assert latex((2*tau)**Rational(7, 2)) == "8 \\sqrt{2} \\tau^{\\frac{7}{2}}"
assert latex((2*mu)**Rational(7, 2), mode='equation*') == \
"\\begin{equation*}8 \\sqrt{2} \\mu^{\\frac{7}{2}}\\end{equation*}"
assert latex((2*mu)**Rational(7, 2), mode='equation', itex=True) == \
"$$8 \\sqrt{2} \\mu^{\\frac{7}{2}}$$"
assert latex([2/x, y]) == r"\left [ \frac{2}{x}, \quad y\right ]"
def test_latex_dict():
d = {Rational(1): 1, x**2: 2, x: 3, x**3: 4}
assert latex(d) == r'\left \{ 1 : 1, \quad x : 3, \quad x^{2} : 2, \quad x^{3} : 4\right \}'
D = Dict(d)
assert latex(D) == r'\left \{ 1 : 1, \quad x : 3, \quad x^{2} : 2, \quad x^{3} : 4\right \}'
def test_latex_list():
l = [Symbol('omega1'), Symbol('a'), Symbol('alpha')]
assert latex(l) == r'\left [ \omega_{1}, \quad a, \quad \alpha\right ]'
def test_latex_rational():
#tests issue 3973
assert latex(-Rational(1, 2)) == "- \\frac{1}{2}"
assert latex(Rational(-1, 2)) == "- \\frac{1}{2}"
assert latex(Rational(1, -2)) == "- \\frac{1}{2}"
assert latex(-Rational(-1, 2)) == "\\frac{1}{2}"
assert latex(-Rational(1, 2)*x) == "- \\frac{x}{2}"
assert latex(-Rational(1, 2)*x + Rational(-2, 3)*y) == \
"- \\frac{x}{2} - \\frac{2 y}{3}"
def test_latex_inverse():
#tests issue 4129
assert latex(1/x) == "\\frac{1}{x}"
assert latex(1/(x + y)) == "\\frac{1}{x + y}"
def test_latex_DiracDelta():
assert latex(DiracDelta(x)) == r"\delta\left(x\right)"
assert latex(DiracDelta(x)**2) == r"\left(\delta\left(x\right)\right)^{2}"
assert latex(DiracDelta(x, 0)) == r"\delta\left(x\right)"
assert latex(DiracDelta(x, 5)) == \
r"\delta^{\left( 5 \right)}\left( x \right)"
assert latex(DiracDelta(x, 5)**2) == \
r"\left(\delta^{\left( 5 \right)}\left( x \right)\right)^{2}"
def test_latex_Heaviside():
assert latex(Heaviside(x)) == r"\theta\left(x\right)"
assert latex(Heaviside(x)**2) == r"\left(\theta\left(x\right)\right)^{2}"
def test_latex_KroneckerDelta():
assert latex(KroneckerDelta(x, y)) == r"\delta_{x y}"
assert latex(KroneckerDelta(x, y + 1)) == r"\delta_{x, y + 1}"
# issue 6578
assert latex(KroneckerDelta(x + 1, y)) == r"\delta_{y, x + 1}"
def test_latex_LeviCivita():
assert latex(LeviCivita(x, y, z)) == r"\varepsilon_{x y z}"
assert latex(LeviCivita(x, y, z)**2) == r"\left(\varepsilon_{x y z}\right)^{2}"
assert latex(LeviCivita(x, y, z + 1)) == r"\varepsilon_{x, y, z + 1}"
assert latex(LeviCivita(x, y + 1, z)) == r"\varepsilon_{x, y + 1, z}"
assert latex(LeviCivita(x + 1, y, z)) == r"\varepsilon_{x + 1, y, z}"
def test_mode():
expr = x + y
assert latex(expr) == 'x + y'
assert latex(expr, mode='plain') == 'x + y'
assert latex(expr, mode='inline') == '$x + y$'
assert latex(
expr, mode='equation*') == '\\begin{equation*}x + y\\end{equation*}'
assert latex(
expr, mode='equation') == '\\begin{equation}x + y\\end{equation}'
def test_latex_Piecewise():
p = Piecewise((x, x < 1), (x**2, True))
assert latex(p) == "\\begin{cases} x & \\text{for}\: x < 1 \\\\x^{2} &" \
" \\text{otherwise} \\end{cases}"
assert latex(p, itex=True) == "\\begin{cases} x & \\text{for}\: x \\lt 1 \\\\x^{2} &" \
" \\text{otherwise} \\end{cases}"
p = Piecewise((x, x < 0), (0, x >= 0))
assert latex(p) == "\\begin{cases} x & \\text{for}\\: x < 0 \\\\0 &" \
" \\text{for}\\: x \\geq 0 \\end{cases}"
A, B = symbols("A B", commutative=False)
p = Piecewise((A**2, Eq(A, B)), (A*B, True))
s = r"\begin{cases} A^{2} & \text{for}\: A = B \\A B & \text{otherwise} \end{cases}"
assert latex(p) == s
assert latex(A*p) == r"A %s" % s
assert latex(p*A) == r"\left(%s\right) A" % s
def test_latex_Matrix():
M = Matrix([[1 + x, y], [y, x - 1]])
assert latex(M) == \
r'\left[\begin{matrix}x + 1 & y\\y & x - 1\end{matrix}\right]'
assert latex(M, mode='inline') == \
r'$\left[\begin{smallmatrix}x + 1 & y\\' \
r'y & x - 1\end{smallmatrix}\right]$'
assert latex(M, mat_str='array') == \
r'\left[\begin{array}{cc}x + 1 & y\\y & x - 1\end{array}\right]'
assert latex(M, mat_str='bmatrix') == \
r'\left[\begin{bmatrix}x + 1 & y\\y & x - 1\end{bmatrix}\right]'
assert latex(M, mat_delim=None, mat_str='bmatrix') == \
r'\begin{bmatrix}x + 1 & y\\y & x - 1\end{bmatrix}'
M2 = Matrix(1, 11, range(11))
assert latex(M2) == \
r'\left[\begin{array}{ccccccccccc}' \
r'0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10\end{array}\right]'
def test_latex_matrix_with_functions():
t = symbols('t')
theta1 = symbols('theta1', cls=Function)
M = Matrix([[sin(theta1(t)), cos(theta1(t))],
[cos(theta1(t).diff(t)), sin(theta1(t).diff(t))]])
expected = (r'\left[\begin{matrix}\sin{\left '
r'(\theta_{1}{\left (t \right )} \right )} & '
r'\cos{\left (\theta_{1}{\left (t \right )} \right '
r')}\\\cos{\left (\frac{d}{d t} \theta_{1}{\left (t '
r'\right )} \right )} & \sin{\left (\frac{d}{d t} '
r'\theta_{1}{\left (t \right )} \right '
r')}\end{matrix}\right]')
assert latex(M) == expected
def test_latex_mul_symbol():
assert latex(4*4**x, mul_symbol='times') == "4 \\times 4^{x}"
assert latex(4*4**x, mul_symbol='dot') == "4 \\cdot 4^{x}"
assert latex(4*4**x, mul_symbol='ldot') == "4 \,.\, 4^{x}"
assert latex(4*x, mul_symbol='times') == "4 \\times x"
assert latex(4*x, mul_symbol='dot') == "4 \\cdot x"
assert latex(4*x, mul_symbol='ldot') == "4 \,.\, x"
def test_latex_issue_4381():
y = 4*4**log(2)
assert latex(y) == r'4 \cdot 4^{\log{\left (2 \right )}}'
assert latex(1/y) == r'\frac{1}{4 \cdot 4^{\log{\left (2 \right )}}}'
def test_latex_issue_4576():
assert latex(Symbol("beta_13_2")) == r"\beta_{13 2}"
assert latex(Symbol("beta_132_20")) == r"\beta_{132 20}"
assert latex(Symbol("beta_13")) == r"\beta_{13}"
assert latex(Symbol("x_a_b")) == r"x_{a b}"
assert latex(Symbol("x_1_2_3")) == r"x_{1 2 3}"
assert latex(Symbol("x_a_b1")) == r"x_{a b1}"
assert latex(Symbol("x_a_1")) == r"x_{a 1}"
assert latex(Symbol("x_1_a")) == r"x_{1 a}"
assert latex(Symbol("x_1^aa")) == r"x^{aa}_{1}"
assert latex(Symbol("x_1__aa")) == r"x^{aa}_{1}"
assert latex(Symbol("x_11^a")) == r"x^{a}_{11}"
assert latex(Symbol("x_11__a")) == r"x^{a}_{11}"
assert latex(Symbol("x_a_a_a_a")) == r"x_{a a a a}"
assert latex(Symbol("x_a_a^a^a")) == r"x^{a a}_{a a}"
assert latex(Symbol("x_a_a__a__a")) == r"x^{a a}_{a a}"
assert latex(Symbol("alpha_11")) == r"\alpha_{11}"
assert latex(Symbol("alpha_11_11")) == r"\alpha_{11 11}"
assert latex(Symbol("alpha_alpha")) == r"\alpha_{\alpha}"
assert latex(Symbol("alpha^aleph")) == r"\alpha^{\aleph}"
assert latex(Symbol("alpha__aleph")) == r"\alpha^{\aleph}"
def test_latex_pow_fraction():
x = Symbol('x')
# Testing exp
assert 'e^{-x}' in latex(exp(-x)/2).replace(' ', '') # Remove Whitespace
# Testing just e^{-x} in case future changes alter behavior of muls or fracs
# In particular current output is \frac{1}{2}e^{- x} but perhaps this will
# change to \frac{e^{-x}}{2}
# Testing general, non-exp, power
assert '3^{-x}' in latex(3**-x/2).replace(' ', '')
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
assert latex(A*B*C**-1) == "A B C^{-1}"
assert latex(C**-1*A*B) == "C^{-1} A B"
assert latex(A*C**-1*B) == "A C^{-1} B"
def test_latex_order():
expr = x**3 + x**2*y + 3*x*y**3 + y**4
assert latex(expr, order='lex') == "x^{3} + x^{2} y + 3 x y^{3} + y^{4}"
assert latex(
expr, order='rev-lex') == "y^{4} + 3 x y^{3} + x^{2} y + x^{3}"
def test_latex_Lambda():
assert latex(Lambda(x, x + 1)) == \
r"\left( x \mapsto x + 1 \right)"
assert latex(Lambda((x, y), x + 1)) == \
r"\left( \left ( x, \quad y\right ) \mapsto x + 1 \right)"
def test_latex_PolyElement():
Ruv, u,v = ring("u,v", ZZ)
Rxyz, x,y,z = ring("x,y,z", Ruv)
assert latex(x - x) == r"0"
assert latex(x - 1) == r"x - 1"
assert latex(x + 1) == r"x + 1"
assert latex((u**2 + 3*u*v + 1)*x**2*y + u + 1) == r"\left({u}^{2} + 3 u v + 1\right) {x}^{2} y + u + 1"
assert latex((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x) == r"\left({u}^{2} + 3 u v + 1\right) {x}^{2} y + \left(u + 1\right) x"
assert latex((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1) == r"\left({u}^{2} + 3 u v + 1\right) {x}^{2} y + \left(u + 1\right) x + 1"
assert latex((-u**2 + 3*u*v - 1)*x**2*y - (u + 1)*x - 1) == r"-\left({u}^{2} - 3 u v + 1\right) {x}^{2} y - \left(u + 1\right) x - 1"
assert latex(-(v**2 + v + 1)*x + 3*u*v + 1) == r"-\left({v}^{2} + v + 1\right) x + 3 u v + 1"
assert latex(-(v**2 + v + 1)*x - 3*u*v + 1) == r"-\left({v}^{2} + v + 1\right) x - 3 u v + 1"
def test_latex_FracElement():
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
assert latex(x - x) == r"0"
assert latex(x - 1) == r"x - 1"
assert latex(x + 1) == r"x + 1"
assert latex(x/3) == r"\frac{x}{3}"
assert latex(x/z) == r"\frac{x}{z}"
assert latex(x*y/z) == r"\frac{x y}{z}"
assert latex(x/(z*t)) == r"\frac{x}{z t}"
assert latex(x*y/(z*t)) == r"\frac{x y}{z t}"
assert latex((x - 1)/y) == r"\frac{x - 1}{y}"
assert latex((x + 1)/y) == r"\frac{x + 1}{y}"
assert latex((-x - 1)/y) == r"\frac{-x - 1}{y}"
assert latex((x + 1)/(y*z)) == r"\frac{x + 1}{y z}"
assert latex(-y/(x + 1)) == r"\frac{-y}{x + 1}"
assert latex(y*z/(x + 1)) == r"\frac{y z}{x + 1}"
assert latex(((u + 1)*x*y + 1)/((v - 1)*z - 1)) == r"\frac{\left(u + 1\right) x y + 1}{\left(v - 1\right) z - 1}"
assert latex(((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1)) == r"\frac{\left(u + 1\right) x y + 1}{\left(v - 1\right) z - u v t - 1}"
def test_latex_Poly():
assert latex(Poly(x**2 + 2 * x, x)) == \
r"\operatorname{Poly}{\left( x^{2} + 2 x, x, domain=\mathbb{Z} \right)}"
assert latex(Poly(x/y, x)) == \
r"\operatorname{Poly}{\left( \frac{x}{y}, x, domain=\mathbb{Z}\left(y\right) \right)}"
assert latex(Poly(2.0*x + y)) == \
r"\operatorname{Poly}{\left( 2.0 x + 1.0 y, x, y, domain=\mathbb{R} \right)}"
def test_latex_RootOf():
assert latex(RootOf(x**5 + x + 3, 0)) == \
r"\operatorname{RootOf} {\left(x^{5} + x + 3, 0\right)}"
def test_latex_RootSum():
assert latex(RootSum(x**5 + x + 3, sin)) == \
r"\operatorname{RootSum} {\left(x^{5} + x + 3, \left( x \mapsto \sin{\left (x \right )} \right)\right)}"
def test_settings():
raises(TypeError, lambda: latex(x*y, method="garbage"))
def test_latex_numbers():
assert latex(catalan(n)) == r"C_{n}"
def test_lamda():
assert latex(Symbol('lamda')) == r"\lambda"
assert latex(Symbol('Lamda')) == r"\Lambda"
def test_custom_symbol_names():
x = Symbol('x')
y = Symbol('y')
assert latex(x) == "x"
assert latex(x, symbol_names={x: "x_i"}) == "x_i"
assert latex(x + y, symbol_names={x: "x_i"}) == "x_i + y"
assert latex(x**2, symbol_names={x: "x_i"}) == "x_i^{2}"
assert latex(x + y, symbol_names={x: "x_i", y: "y_j"}) == "x_i + y_j"
def test_matAdd():
from sympy import MatrixSymbol
from sympy.printing.latex import LatexPrinter
C = MatrixSymbol('C', 5, 5)
B = MatrixSymbol('B', 5, 5)
l = LatexPrinter()
assert l._print_MatAdd(C - 2*B) in ['-2 B + C', 'C -2 B']
assert l._print_MatAdd(C + 2*B) in ['2 B + C', 'C + 2 B']
assert l._print_MatAdd(B - 2*C) in ['B -2 C', '-2 C + B']
assert l._print_MatAdd(B + 2*C) in ['B + 2 C', '2 C + B']
def test_matMul():
from sympy import MatrixSymbol
from sympy.printing.latex import LatexPrinter
A = MatrixSymbol('A', 5, 5)
B = MatrixSymbol('B', 5, 5)
x = Symbol('x')
l = LatexPrinter()
assert l._print_MatMul(2*A) == '2 A'
assert l._print_MatMul(2*x*A) == '2 x A'
assert l._print_MatMul(-2*A) == '-2 A'
assert l._print_MatMul(1.5*A) == '1.5 A'
assert l._print_MatMul(sqrt(2)*A) == r'\sqrt{2} A'
assert l._print_MatMul(-sqrt(2)*A) == r'- \sqrt{2} A'
assert l._print_MatMul(2*sqrt(2)*x*A) == r'2 \sqrt{2} x A'
assert l._print_MatMul(-2*A*(A + 2*B)) in [r'-2 A \left(A + 2 B\right)',
r'-2 A \left(2 B + A\right)']
def test_latex_MatrixSlice():
from sympy.matrices.expressions import MatrixSymbol
assert latex(MatrixSymbol('X', 10, 10)[:5, 1:9:2]) == \
r'X\left[:5, 1:9:2\right]'
assert latex(MatrixSymbol('X', 10, 10)[5, :5:2]) == \
r'X\left[5, :5:2\right]'
def test_latex_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
X = Normal('x1', 0, 1)
assert latex(where(X > 0)) == r"Domain: 0 < x_{1} \wedge x_{1} < \infty"
D = Die('d1', 6)
assert latex(where(D > 4)) == r"Domain: d_{1} = 5 \vee d_{1} = 6"
A = Exponential('a', 1)
B = Exponential('b', 1)
assert latex(
pspace(Tuple(A, B)).domain) == \
r"Domain: 0 \leq a \wedge 0 \leq b \wedge a < \infty \wedge b < \infty"
def test_PrettyPoly():
from sympy.polys.domains import QQ
F = QQ.frac_field(x, y)
R = QQ[x, y]
assert latex(F.convert(x/(x + y))) == latex(x/(x + y))
assert latex(R.convert(x + y)) == latex(x + y)
def test_integral_transforms():
x = Symbol("x")
k = Symbol("k")
f = Function("f")
a = Symbol("a")
b = Symbol("b")
assert latex(MellinTransform(f(x), x, k)) == r"\mathcal{M}_{x}\left[f{\left (x \right )}\right]\left(k\right)"
assert latex(InverseMellinTransform(f(k), k, x, a, b)) == r"\mathcal{M}^{-1}_{k}\left[f{\left (k \right )}\right]\left(x\right)"
assert latex(LaplaceTransform(f(x), x, k)) == r"\mathcal{L}_{x}\left[f{\left (x \right )}\right]\left(k\right)"
assert latex(InverseLaplaceTransform(f(k), k, x, (a, b))) == r"\mathcal{L}^{-1}_{k}\left[f{\left (k \right )}\right]\left(x\right)"
assert latex(FourierTransform(f(x), x, k)) == r"\mathcal{F}_{x}\left[f{\left (x \right )}\right]\left(k\right)"
assert latex(InverseFourierTransform(f(k), k, x)) == r"\mathcal{F}^{-1}_{k}\left[f{\left (k \right )}\right]\left(x\right)"
assert latex(CosineTransform(f(x), x, k)) == r"\mathcal{COS}_{x}\left[f{\left (x \right )}\right]\left(k\right)"
assert latex(InverseCosineTransform(f(k), k, x)) == r"\mathcal{COS}^{-1}_{k}\left[f{\left (k \right )}\right]\left(x\right)"
assert latex(SineTransform(f(x), x, k)) == r"\mathcal{SIN}_{x}\left[f{\left (x \right )}\right]\left(k\right)"
assert latex(InverseSineTransform(f(k), k, x)) == r"\mathcal{SIN}^{-1}_{k}\left[f{\left (k \right )}\right]\left(x\right)"
def test_PolynomialRingBase():
from sympy.polys.domains import QQ
assert latex(QQ.old_poly_ring(x, y)) == r"\mathbb{Q}\left[x, y\right]"
assert latex(QQ.old_poly_ring(x, y, order="ilex")) == \
r"S_<^{-1}\mathbb{Q}\left[x, y\right]"
def test_categories():
from sympy.categories import (Object, IdentityMorphism,
NamedMorphism, Category, Diagram, DiagramGrid)
A1 = Object("A1")
A2 = Object("A2")
A3 = Object("A3")
f1 = NamedMorphism(A1, A2, "f1")
f2 = NamedMorphism(A2, A3, "f2")
id_A1 = IdentityMorphism(A1)
K1 = Category("K1")
assert latex(A1) == "A_{1}"
assert latex(f1) == "f_{1}:A_{1}\\rightarrow A_{2}"
assert latex(id_A1) == "id:A_{1}\\rightarrow A_{1}"
assert latex(f2*f1) == "f_{2}\\circ f_{1}:A_{1}\\rightarrow A_{3}"
assert latex(K1) == "\mathbf{K_{1}}"
d = Diagram()
assert latex(d) == "\emptyset"
d = Diagram({f1: "unique", f2: S.EmptySet})
assert latex(d) == r"\left \{ f_{2}\circ f_{1}:A_{1}" \
r"\rightarrow A_{3} : \emptyset, \quad id:A_{1}\rightarrow " \
r"A_{1} : \emptyset, \quad id:A_{2}\rightarrow A_{2} : " \
r"\emptyset, \quad id:A_{3}\rightarrow A_{3} : \emptyset, " \
r"\quad f_{1}:A_{1}\rightarrow A_{2} : \left\{unique\right\}, " \
r"\quad f_{2}:A_{2}\rightarrow A_{3} : \emptyset\right \}"
d = Diagram({f1: "unique", f2: S.EmptySet}, {f2 * f1: "unique"})
assert latex(d) == r"\left \{ f_{2}\circ f_{1}:A_{1}" \
r"\rightarrow A_{3} : \emptyset, \quad id:A_{1}\rightarrow " \
r"A_{1} : \emptyset, \quad id:A_{2}\rightarrow A_{2} : " \
r"\emptyset, \quad id:A_{3}\rightarrow A_{3} : \emptyset, " \
r"\quad f_{1}:A_{1}\rightarrow A_{2} : \left\{unique\right\}," \
r" \quad f_{2}:A_{2}\rightarrow A_{3} : \emptyset\right \}" \
r"\Longrightarrow \left \{ f_{2}\circ f_{1}:A_{1}" \
r"\rightarrow A_{3} : \left\{unique\right\}\right \}"
# A linear diagram.
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
d = Diagram([f, g])
grid = DiagramGrid(d)
assert latex(grid) == "\\begin{array}{cc}\n" \
"A & B \\\\\n" \
" & C \n" \
"\\end{array}\n"
def test_Modules():
from sympy.polys.domains import QQ
from sympy.polys.agca import homomorphism
R = QQ.old_poly_ring(x, y)
F = R.free_module(2)
M = F.submodule([x, y], [1, x**2])
assert latex(F) == r"{\mathbb{Q}\left[x, y\right]}^{2}"
assert latex(M) == \
r"\left< {\left[ {x},{y} \right]},{\left[ {1},{x^{2}} \right]} \right>"
I = R.ideal(x**2, y)
assert latex(I) == r"\left< {x^{2}},{y} \right>"
Q = F / M
assert latex(Q) == r"\frac{{\mathbb{Q}\left[x, y\right]}^{2}}{\left< {\left[ {x},{y} \right]},{\left[ {1},{x^{2}} \right]} \right>}"
assert latex(Q.submodule([1, x**3/2], [2, y])) == \
r"\left< {{\left[ {1},{\frac{x^{3}}{2}} \right]} + {\left< {\left[ {x},{y} \right]},{\left[ {1},{x^{2}} \right]} \right>}},{{\left[ {2},{y} \right]} + {\left< {\left[ {x},{y} \right]},{\left[ {1},{x^{2}} \right]} \right>}} \right>"
h = homomorphism(QQ.old_poly_ring(x).free_module(2), QQ.old_poly_ring(x).free_module(2), [0, 0])
assert latex(h) == r"{\left[\begin{matrix}0 & 0\\0 & 0\end{matrix}\right]} : {{\mathbb{Q}\left[x\right]}^{2}} \to {{\mathbb{Q}\left[x\right]}^{2}}"
def test_QuotientRing():
from sympy.polys.domains import QQ
R = QQ.old_poly_ring(x)/[x**2 + 1]
assert latex(
R) == r"\frac{\mathbb{Q}\left[x\right]}{\left< {x^{2} + 1} \right>}"
assert latex(R.one) == r"{1} + {\left< {x^{2} + 1} \right>}"
def test_Tr():
#TODO: Handle indices
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert latex(t) == r'\mbox{Tr}\left(A B\right)'
def test_Adjoint():
from sympy.matrices import MatrixSymbol, Adjoint, Inverse, Transpose
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert latex(Adjoint(X)) == r'X^\dag'
assert latex(Adjoint(X + Y)) == r'\left(X + Y\right)^\dag'
assert latex(Adjoint(X) + Adjoint(Y)) == r'X^\dag + Y^\dag'
assert latex(Adjoint(X*Y)) == r'\left(X Y\right)^\dag'
assert latex(Adjoint(Y)*Adjoint(X)) == r'Y^\dag X^\dag'
assert latex(Adjoint(X**2)) == r'\left(X^{2}\right)^\dag'
assert latex(Adjoint(X)**2) == r'\left(X^\dag\right)^{2}'
assert latex(Adjoint(Inverse(X))) == r'\left(X^{-1}\right)^\dag'
assert latex(Inverse(Adjoint(X))) == r'\left(X^\dag\right)^{-1}'
assert latex(Adjoint(Transpose(X))) == r'\left(X^T\right)^\dag'
assert latex(Transpose(Adjoint(X))) == r'\left(X^\dag\right)^T'
def test_Hadamard():
from sympy.matrices import MatrixSymbol, HadamardProduct
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert latex(HadamardProduct(X, Y*Y)) == r'X \circ \left(Y Y\right)'
assert latex(HadamardProduct(X, Y)*Y) == r'\left(X \circ Y\right) Y'
def test_boolean_args_order():
syms = symbols('a:f')
expr = And(*syms)
assert latex(expr) == 'a \\wedge b \\wedge c \\wedge d \\wedge e \\wedge f'
expr = Or(*syms)
assert latex(expr) == 'a \\vee b \\vee c \\vee d \\vee e \\vee f'
expr = Equivalent(*syms)
assert latex(expr) == 'a \\equiv b \\equiv c \\equiv d \\equiv e \\equiv f'
expr = Xor(*syms)
assert latex(expr) == 'a \\veebar b \\veebar c \\veebar d \\veebar e \\veebar f'
def test_imaginary():
i = sqrt(-1)
assert latex(i) == r'i'
def test_builtins_without_args():
assert latex(sin) == r'\sin'
assert latex(cos) == r'\cos'
assert latex(tan) == r'\tan'
assert latex(log) == r'\log'
assert latex(Ei) == r'\operatorname{Ei}'
assert latex(zeta) == r'\zeta'
def test_latex_greek_functions():
# bug because capital greeks that have roman equivalents should not use
# \Alpha, \Beta, \Eta, etc.
s = Function('Alpha')
assert latex(s) == r'A'
assert latex(s(x)) == r'A{\left (x \right )}'
s = Function('Beta')
assert latex(s) == r'B'
s = Function('Eta')
assert latex(s) == r'H'
assert latex(s(x)) == r'H{\left (x \right )}'
# bug because sympy.core.numbers.Pi is special
p = Function('Pi')
# assert latex(p(x)) == r'\Pi{\left (x \right )}'
assert latex(p) == r'\Pi'
# bug because not all greeks are included
c = Function('chi')
assert latex(c(x)) == r'\chi{\left (x \right )}'
assert latex(c) == r'\chi'
def test_translate():
s = 'Alpha'
assert translate(s) == 'A'
s = 'Beta'
assert translate(s) == 'B'
s = 'Eta'
assert translate(s) == 'H'
s = 'omicron'
assert translate(s) == 'o'
s = 'Pi'
assert translate(s) == r'\Pi'
s = 'pi'
assert translate(s) == r'\pi'
s = 'LamdaHatDOT'
assert translate(s) == r'\dot{\hat{\Lambda}}'
def test_other_symbols():
from sympy.printing.latex import other_symbols
for s in other_symbols:
assert latex(symbols(s)) == "\\"+s
def test_modifiers():
# Test each modifier individually in the simplest case (with funny capitalizations)
assert latex(symbols("xMathring")) == r"\mathring{x}"
assert latex(symbols("xCheck")) == r"\check{x}"
assert latex(symbols("xBreve")) == r"\breve{x}"
assert latex(symbols("xAcute")) == r"\acute{x}"
assert latex(symbols("xGrave")) == r"\grave{x}"
assert latex(symbols("xTilde")) == r"\tilde{x}"
assert latex(symbols("xPrime")) == r"{x}'"
assert latex(symbols("xddDDot")) == r"\ddddot{x}"
assert latex(symbols("xDdDot")) == r"\dddot{x}"
assert latex(symbols("xDDot")) == r"\ddot{x}"
assert latex(symbols("xBold")) == r"\boldsymbol{x}"
assert latex(symbols("xnOrM")) == r"\left\lVert{x}\right\rVert"
assert latex(symbols("xAVG")) == r"\left\langle{x}\right\rangle"
assert latex(symbols("xHat")) == r"\hat{x}"
assert latex(symbols("xDot")) == r"\dot{x}"
assert latex(symbols("xBar")) == r"\bar{x}"
assert latex(symbols("xVec")) == r"\vec{x}"
assert latex(symbols("xAbs")) == r"\left\|{x}\right\|"
assert latex(symbols("xMag")) == r"\left\lvert{x}\right\rvert"
assert latex(symbols("xPrM")) == r"{x}'"
assert latex(symbols("xBM")) == r"\boldsymbol{x}"
# Test strings that are *only* the names of modifiers
assert latex(symbols("Mathring")) == r"Mathring"
assert latex(symbols("Check")) == r"Check"
assert latex(symbols("Breve")) == r"Breve"
assert latex(symbols("Acute")) == r"Acute"
assert latex(symbols("Grave")) == r"Grave"
assert latex(symbols("Tilde")) == r"Tilde"
assert latex(symbols("Prime")) == r"Prime"
assert latex(symbols("DDot")) == r"\dot{D}"
assert latex(symbols("Bold")) == r"Bold"
assert latex(symbols("NORm")) == r"NORm"
assert latex(symbols("AVG")) == r"AVG"
assert latex(symbols("Hat")) == r"Hat"
assert latex(symbols("Dot")) == r"Dot"
assert latex(symbols("Bar")) == r"Bar"
assert latex(symbols("Vec")) == r"Vec"
assert latex(symbols("Abs")) == r"Abs"
assert latex(symbols("Mag")) == r"Mag"
assert latex(symbols("PrM")) == r"PrM"
assert latex(symbols("BM")) == r"BM"
assert latex(symbols("hbar")) == r"\hbar"
# Check a few combinations
assert latex(symbols("xvecdot")) == r"\dot{\vec{x}}"
assert latex(symbols("xDotVec")) == r"\vec{\dot{x}}"
assert latex(symbols("xHATNorm")) == r"\left\lVert{\hat{x}}\right\rVert"
# Check a couple big, ugly combinations
assert latex(symbols('xMathringBm_yCheckPRM__zbreveAbs')) == r"\boldsymbol{\mathring{x}}^{\left\|{\breve{z}}\right\|}_{{\check{y}}'}"
assert latex(symbols('alphadothat_nVECDOT__tTildePrime')) == r"\hat{\dot{\alpha}}^{{\tilde{t}}'}_{\dot{\vec{n}}}"
def test_greek_symbols():
assert latex(Symbol('alpha')) == r'\alpha'
assert latex(Symbol('beta')) == r'\beta'
assert latex(Symbol('gamma')) == r'\gamma'
assert latex(Symbol('delta')) == r'\delta'
assert latex(Symbol('epsilon')) == r'\epsilon'
assert latex(Symbol('zeta')) == r'\zeta'
assert latex(Symbol('eta')) == r'\eta'
assert latex(Symbol('theta')) == r'\theta'
assert latex(Symbol('iota')) == r'\iota'
assert latex(Symbol('kappa')) == r'\kappa'
assert latex(Symbol('lambda')) == r'\lambda'
assert latex(Symbol('mu')) == r'\mu'
assert latex(Symbol('nu')) == r'\nu'
assert latex(Symbol('xi')) == r'\xi'
assert latex(Symbol('omicron')) == r'o'
assert latex(Symbol('pi')) == r'\pi'
assert latex(Symbol('rho')) == r'\rho'
assert latex(Symbol('sigma')) == r'\sigma'
assert latex(Symbol('tau')) == r'\tau'
assert latex(Symbol('upsilon')) == r'\upsilon'
assert latex(Symbol('phi')) == r'\phi'
assert latex(Symbol('chi')) == r'\chi'
assert latex(Symbol('psi')) == r'\psi'
assert latex(Symbol('omega')) == r'\omega'
assert latex(Symbol('Alpha')) == r'A'
assert latex(Symbol('Beta')) == r'B'
assert latex(Symbol('Gamma')) == r'\Gamma'
assert latex(Symbol('Delta')) == r'\Delta'
assert latex(Symbol('Epsilon')) == r'E'
assert latex(Symbol('Zeta')) == r'Z'
assert latex(Symbol('Eta')) == r'H'
assert latex(Symbol('Theta')) == r'\Theta'
assert latex(Symbol('Iota')) == r'I'
assert latex(Symbol('Kappa')) == r'K'
assert latex(Symbol('Lambda')) == r'\Lambda'
assert latex(Symbol('Mu')) == r'M'
assert latex(Symbol('Nu')) == r'N'
assert latex(Symbol('Xi')) == r'\Xi'
assert latex(Symbol('Omicron')) == r'O'
assert latex(Symbol('Pi')) == r'\Pi'
assert latex(Symbol('Rho')) == r'P'
assert latex(Symbol('Sigma')) == r'\Sigma'
assert latex(Symbol('Tau')) == r'T'
assert latex(Symbol('Upsilon')) == r'\Upsilon'
assert latex(Symbol('Phi')) == r'\Phi'
assert latex(Symbol('Chi')) == r'X'
assert latex(Symbol('Psi')) == r'\Psi'
assert latex(Symbol('Omega')) == r'\Omega'
assert latex(Symbol('varepsilon')) == r'\varepsilon'
assert latex(Symbol('varkappa')) == r'\varkappa'
assert latex(Symbol('varphi')) == r'\varphi'
assert latex(Symbol('varpi')) == r'\varpi'
assert latex(Symbol('varrho')) == r'\varrho'
assert latex(Symbol('varsigma')) == r'\varsigma'
assert latex(Symbol('vartheta')) == r'\vartheta'
@XFAIL
def test_builtin_without_args_mismatched_names():
assert latex(CosineTransform) == r'\mathcal{COS}'
def test_builtin_no_args():
assert latex(Chi) == r'\operatorname{Chi}'
assert latex(gamma) == r'\Gamma'
assert latex(KroneckerDelta) == r'\delta'
assert latex(DiracDelta) == r'\delta'
assert latex(lowergamma) == r'\gamma'
def test_issue_6853():
p = Function('Pi')
assert latex(p(x)) == r"\Pi{\left (x \right )}"
def test_Mul():
e = Mul(-2, x + 1, evaluate=False)
assert latex(e) == r'- 2 \left(x + 1\right)'
e = Mul(2, x + 1, evaluate=False)
assert latex(e) == r'2 \left(x + 1\right)'
e = Mul(S.One/2, x + 1, evaluate=False)
assert latex(e) == r'\frac{1}{2} \left(x + 1\right)'
e = Mul(y, x + 1, evaluate=False)
assert latex(e) == r'y \left(x + 1\right)'
e = Mul(-y, x + 1, evaluate=False)
assert latex(e) == r'- y \left(x + 1\right)'
e = Mul(-2, x + 1)
assert latex(e) == r'- 2 x - 2'
e = Mul(2, x + 1)
assert latex(e) == r'2 x + 2'
def test_Pow():
e = Pow(2, 2, evaluate=False)
assert latex(e) == r'2^{2}'
def test_issue_7180():
assert latex(Equivalent(x, y)) == r"x \equiv y"
assert latex(Not(Equivalent(x, y))) == r"x \not\equiv y"
def test_issue_8409():
assert latex(S.Half**n) == r"\left(\frac{1}{2}\right)^{n}"
def test_issue_8470():
from sympy.parsing.sympy_parser import parse_expr
e = parse_expr("-B*A", evaluate=False)
assert latex(e) == r"A \left(- B\right)"
def test_issue_7117():
# See also issue #5031 (hence the evaluate=False in these).
e = Eq(x + 1, 2*x)
q = Mul(2, e, evaluate=False)
assert latex(q) == r"2 \left(x + 1 = 2 x\right)"
q = Add(6, e, evaluate=False)
assert latex(q) == r"6 + \left(x + 1 = 2 x\right)"
q = Pow(e, 2, evaluate=False)
assert latex(q) == r"\left(x + 1 = 2 x\right)^{2}"
| []
| []
| []
| [] | [] | python | null | null | null |
devproject/devproject/wsgi.py | """
WSGI config for demo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devproject.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
spyder/plugins/ipythonconsole/utils/kernelspec.py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Kernel spec for Spyder kernels
"""
import os
import os.path as osp
from jupyter_client.kernelspec import KernelSpec
from spyder.config.base import SAFE_MODE, running_under_pytest
from spyder.config.manager import CONF
from spyder.utils.encoding import to_unicode_from_fs
from spyder.utils.programs import is_python_interpreter
from spyder.py3compat import PY2, iteritems, to_text_string, to_binary_string
from spyder.utils.environ import clean_env
from spyder.utils.misc import (add_pathlist_to_PYTHONPATH,
get_python_executable)
class SpyderKernelSpec(KernelSpec):
"""Kernel spec for Spyder kernels"""
def __init__(self, is_cython=False, is_pylab=False,
is_sympy=False, **kwargs):
super(SpyderKernelSpec, self).__init__(**kwargs)
self.is_cython = is_cython
self.is_pylab = is_pylab
self.is_sympy = is_sympy
self.display_name = 'Python 2 (Spyder)' if PY2 else 'Python 3 (Spyder)'
self.language = 'python2' if PY2 else 'python3'
self.resource_dir = ''
@property
def argv(self):
"""Command to start kernels"""
# Python interpreter used to start kernels
if CONF.get('main_interpreter', 'default'):
pyexec = get_python_executable()
else:
# Avoid IPython adding the virtualenv on which Spyder is running
# to the kernel sys.path
os.environ.pop('VIRTUAL_ENV', None)
pyexec = CONF.get('main_interpreter', 'executable')
if not is_python_interpreter(pyexec):
pyexec = get_python_executable()
CONF.set('main_interpreter', 'executable', '')
CONF.set('main_interpreter', 'default', True)
CONF.set('main_interpreter', 'custom', False)
# Fixes spyder-ide/spyder#3427.
if os.name == 'nt':
dir_pyexec = osp.dirname(pyexec)
pyexec_w = osp.join(dir_pyexec, 'pythonw.exe')
if osp.isfile(pyexec_w):
pyexec = pyexec_w
# Command used to start kernels
kernel_cmd = [
pyexec,
'-m',
'spyder_kernels.console',
'-f',
'{connection_file}'
]
return kernel_cmd
@property
def env(self):
"""Env vars for kernels"""
# Add our PYTHONPATH to the kernel
pathlist = CONF.get('main', 'spyder_pythonpath', default=[])
default_interpreter = CONF.get('main_interpreter', 'default')
pypath = add_pathlist_to_PYTHONPATH([], pathlist, ipyconsole=True,
drop_env=False)
# Environment variables that we need to pass to our sitecustomize
umr_namelist = CONF.get('main_interpreter', 'umr/namelist')
if PY2:
original_list = umr_namelist[:]
for umr_n in umr_namelist:
try:
umr_n.encode('utf-8')
except UnicodeDecodeError:
umr_namelist.remove(umr_n)
if original_list != umr_namelist:
CONF.set('main_interpreter', 'umr/namelist', umr_namelist)
env_vars = {
'SPY_EXTERNAL_INTERPRETER': not default_interpreter,
'SPY_UMR_ENABLED': CONF.get('main_interpreter', 'umr/enabled'),
'SPY_UMR_VERBOSE': CONF.get('main_interpreter', 'umr/verbose'),
'SPY_UMR_NAMELIST': ','.join(umr_namelist),
'SPY_RUN_LINES_O': CONF.get('ipython_console', 'startup/run_lines'),
'SPY_PYLAB_O': CONF.get('ipython_console', 'pylab'),
'SPY_BACKEND_O': CONF.get('ipython_console', 'pylab/backend'),
'SPY_AUTOLOAD_PYLAB_O': CONF.get('ipython_console',
'pylab/autoload'),
'SPY_FORMAT_O': CONF.get('ipython_console',
'pylab/inline/figure_format'),
'SPY_BBOX_INCHES_O': CONF.get('ipython_console',
'pylab/inline/bbox_inches'),
'SPY_RESOLUTION_O': CONF.get('ipython_console',
'pylab/inline/resolution'),
'SPY_WIDTH_O': CONF.get('ipython_console', 'pylab/inline/width'),
'SPY_HEIGHT_O': CONF.get('ipython_console', 'pylab/inline/height'),
'SPY_USE_FILE_O': CONF.get('ipython_console',
'startup/use_run_file'),
'SPY_RUN_FILE_O': CONF.get('ipython_console', 'startup/run_file'),
'SPY_AUTOCALL_O': CONF.get('ipython_console', 'autocall'),
'SPY_GREEDY_O': CONF.get('ipython_console', 'greedy_completer'),
'SPY_JEDI_O': CONF.get('ipython_console', 'jedi_completer'),
'SPY_SYMPY_O': CONF.get('ipython_console', 'symbolic_math'),
'SPY_TESTING': running_under_pytest() or SAFE_MODE,
'SPY_HIDE_CMD': CONF.get('ipython_console', 'hide_cmd_windows')
}
if self.is_pylab is True:
env_vars['SPY_AUTOLOAD_PYLAB_O'] = True
env_vars['SPY_SYMPY_O'] = False
env_vars['SPY_RUN_CYTHON'] = False
if self.is_sympy is True:
env_vars['SPY_AUTOLOAD_PYLAB_O'] = False
env_vars['SPY_SYMPY_O'] = True
env_vars['SPY_RUN_CYTHON'] = False
if self.is_cython is True:
env_vars['SPY_AUTOLOAD_PYLAB_O'] = False
env_vars['SPY_SYMPY_O'] = False
env_vars['SPY_RUN_CYTHON'] = True
# Add our PYTHONPATH to env_vars
env_vars.update(pypath)
# Making all env_vars strings
clean_env_vars = clean_env(env_vars)
return clean_env_vars
| []
| []
| []
| [] | [] | python | 0 | 0 | |
sphinx/util/i18n.py | """
sphinx.util.i18n
~~~~~~~~~~~~~~~~
Builder superclass for all builders.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
from datetime import datetime, timezone
from os import path
from typing import TYPE_CHECKING, Callable, Generator, List, NamedTuple, Optional, Tuple, Union
import babel.dates
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po
from sphinx.errors import SphinxError
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.osutil import SEP, canon_path, relpath
if TYPE_CHECKING:
from sphinx.environment import BuildEnvironment
logger = logging.getLogger(__name__)
class LocaleFileInfoBase(NamedTuple):
base_dir: str
domain: str
charset: str
class CatalogInfo(LocaleFileInfoBase):
@property
def po_file(self) -> str:
return self.domain + '.po'
@property
def mo_file(self) -> str:
return self.domain + '.mo'
@property
def po_path(self) -> str:
return path.join(self.base_dir, self.po_file)
@property
def mo_path(self) -> str:
return path.join(self.base_dir, self.mo_file)
def is_outdated(self) -> bool:
return (
not path.exists(self.mo_path) or
path.getmtime(self.mo_path) < path.getmtime(self.po_path))
def write_mo(self, locale: str) -> None:
with open(self.po_path, encoding=self.charset) as file_po:
try:
po = read_po(file_po, locale)
except Exception as exc:
logger.warning(__('reading error: %s, %s'), self.po_path, exc)
return
with open(self.mo_path, 'wb') as file_mo:
try:
write_mo(file_mo, po)
except Exception as exc:
logger.warning(__('writing error: %s, %s'), self.mo_path, exc)
class CatalogRepository:
"""A repository for message catalogs."""
def __init__(self, basedir: str, locale_dirs: List[str],
language: str, encoding: str) -> None:
self.basedir = basedir
self._locale_dirs = locale_dirs
self.language = language
self.encoding = encoding
@property
def locale_dirs(self) -> Generator[str, None, None]:
if not self.language:
return
for locale_dir in self._locale_dirs:
locale_dir = path.join(self.basedir, locale_dir)
locale_path = path.join(locale_dir, self.language, 'LC_MESSAGES')
if path.exists(locale_path):
yield locale_dir
else:
logger.verbose(__('locale_dir %s does not exists'), locale_path)
@property
def pofiles(self) -> Generator[Tuple[str, str], None, None]:
for locale_dir in self.locale_dirs:
basedir = path.join(locale_dir, self.language, 'LC_MESSAGES')
for root, dirnames, filenames in os.walk(basedir):
# skip dot-directories
for dirname in dirnames:
if dirname.startswith('.'):
dirnames.remove(dirname)
for filename in filenames:
if filename.endswith('.po'):
fullpath = path.join(root, filename)
yield basedir, relpath(fullpath, basedir)
@property
def catalogs(self) -> Generator[CatalogInfo, None, None]:
for basedir, filename in self.pofiles:
domain = canon_path(path.splitext(filename)[0])
yield CatalogInfo(basedir, domain, self.encoding)
def docname_to_domain(docname: str, compaction: Union[bool, str]) -> str:
"""Convert docname to domain for catalogs."""
if isinstance(compaction, str):
return compaction
if compaction:
return docname.split(SEP, 1)[0]
else:
return docname
# date_format mappings: ustrftime() to bable.dates.format_datetime()
date_format_mappings = {
'%a': 'EEE', # Weekday as locale’s abbreviated name.
'%A': 'EEEE', # Weekday as locale’s full name.
'%b': 'MMM', # Month as locale’s abbreviated name.
'%B': 'MMMM', # Month as locale’s full name.
'%c': 'medium', # Locale’s appropriate date and time representation.
'%-d': 'd', # Day of the month as a decimal number.
'%d': 'dd', # Day of the month as a zero-padded decimal number.
'%-H': 'H', # Hour (24-hour clock) as a decimal number [0,23].
'%H': 'HH', # Hour (24-hour clock) as a zero-padded decimal number [00,23].
'%-I': 'h', # Hour (12-hour clock) as a decimal number [1,12].
'%I': 'hh', # Hour (12-hour clock) as a zero-padded decimal number [01,12].
'%-j': 'D', # Day of the year as a decimal number.
'%j': 'DDD', # Day of the year as a zero-padded decimal number.
'%-m': 'M', # Month as a decimal number.
'%m': 'MM', # Month as a zero-padded decimal number.
'%-M': 'm', # Minute as a decimal number [0,59].
'%M': 'mm', # Minute as a zero-padded decimal number [00,59].
'%p': 'a', # Locale’s equivalent of either AM or PM.
'%-S': 's', # Second as a decimal number.
'%S': 'ss', # Second as a zero-padded decimal number.
'%U': 'WW', # Week number of the year (Sunday as the first day of the week)
# as a zero padded decimal number. All days in a new year preceding
# the first Sunday are considered to be in week 0.
'%w': 'e', # Weekday as a decimal number, where 0 is Sunday and 6 is Saturday.
'%-W': 'W', # Week number of the year (Monday as the first day of the week)
# as a decimal number. All days in a new year preceding the first
# Monday are considered to be in week 0.
'%W': 'WW', # Week number of the year (Monday as the first day of the week)
# as a zero-padded decimal number.
'%x': 'medium', # Locale’s appropriate date representation.
'%X': 'medium', # Locale’s appropriate time representation.
'%y': 'YY', # Year without century as a zero-padded decimal number.
'%Y': 'yyyy', # Year with century as a decimal number.
'%Z': 'zzz', # Time zone name (no characters if no time zone exists).
'%z': 'ZZZ', # UTC offset in the form ±HHMM[SS[.ffffff]]
# (empty string if the object is naive).
'%%': '%',
}
date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
def babel_format_date(date: datetime, format: str, locale: Optional[str],
formatter: Callable = babel.dates.format_date) -> str:
if locale is None:
locale = 'en'
# Check if we have the tzinfo attribute. If not we cannot do any time
# related formats.
if not hasattr(date, 'tzinfo'):
formatter = babel.dates.format_date
try:
return formatter(date, format, locale=locale)
except (ValueError, babel.core.UnknownLocaleError):
# fallback to English
return formatter(date, format, locale='en')
except AttributeError:
logger.warning(__('Invalid date format. Quote the string by single quote '
'if you want to output it directly: %s'), format)
return format
def format_date(format: str, date: datetime = None, language: Optional[str] = None) -> str:
if date is None:
# If time is not specified, try to use $SOURCE_DATE_EPOCH variable
# See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
date = datetime.utcfromtimestamp(float(source_date_epoch))
else:
date = datetime.now(timezone.utc).astimezone()
result = []
tokens = date_format_re.split(format)
for token in tokens:
if token in date_format_mappings:
babel_format = date_format_mappings.get(token, '')
# Check if we have to use a different babel formatter then
# format_datetime, because we only want to format a date
# or a time.
if token == '%x':
function = babel.dates.format_date
elif token == '%X':
function = babel.dates.format_time
else:
function = babel.dates.format_datetime
result.append(babel_format_date(date, babel_format, locale=language,
formatter=function))
else:
result.append(token)
return "".join(result)
def get_image_filename_for_language(filename: str, env: "BuildEnvironment") -> str:
if not env.config.language:
return filename
filename_format = env.config.figure_language_filename
d = dict()
d['root'], d['ext'] = path.splitext(filename)
dirname = path.dirname(d['root'])
if dirname and not dirname.endswith(path.sep):
dirname += path.sep
docpath = path.dirname(env.docname)
if docpath and not docpath.endswith(path.sep):
docpath += path.sep
d['path'] = dirname
d['basename'] = path.basename(d['root'])
d['docpath'] = docpath
d['language'] = env.config.language
try:
return filename_format.format(**d)
except KeyError as exc:
raise SphinxError('Invalid figure_language_filename: %r' % exc) from exc
def search_image_for_language(filename: str, env: "BuildEnvironment") -> str:
if not env.config.language:
return filename
translated = get_image_filename_for_language(filename, env)
_, abspath = env.relfn2path(translated)
if path.exists(abspath):
return translated
else:
return filename
| []
| []
| [
"SOURCE_DATE_EPOCH"
]
| [] | ["SOURCE_DATE_EPOCH"] | python | 1 | 0 | |
.eggs/py2app-0.14-py3.6.egg/py2app/bootstrap/semi_standalone_path.py | def _update_path():
import os
import sys
resources = os.environ['RESOURCEPATH']
sys.path.append(os.path.join(
resources, 'lib', 'python%d.%d' % (
sys.version_info[:2]), 'lib-dynload'))
sys.path.append(os.path.join(
resources, 'lib', 'python%d.%d' % (
sys.version_info[:2])))
sys.path.append(os.path.join(
resources, 'lib', 'python%d.%d' % (
sys.version_info[:2]), 'site-packages.zip'))
_update_path()
| []
| []
| [
"RESOURCEPATH"
]
| [] | ["RESOURCEPATH"] | python | 1 | 0 | |
test/packetimpact/runner/packetimpact_test.go | // Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The runner starts docker containers and networking for a packetimpact test.
package packetimpact_test
import (
"context"
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net"
"os"
"os/exec"
"path"
"strings"
"testing"
"time"
"github.com/docker/docker/api/types/mount"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/test/packetimpact/netdevs"
)
// stringList implements flag.Value.
type stringList []string
// String implements flag.Value.String.
func (l *stringList) String() string {
return strings.Join(*l, ",")
}
// Set implements flag.Value.Set.
func (l *stringList) Set(value string) error {
*l = append(*l, value)
return nil
}
var (
dutPlatform = flag.String("dut_platform", "", "either \"linux\" or \"netstack\"")
testbenchBinary = flag.String("testbench_binary", "", "path to the testbench binary")
tshark = flag.Bool("tshark", false, "use more verbose tshark in logs instead of tcpdump")
extraTestArgs = stringList{}
expectFailure = flag.Bool("expect_failure", false, "expect that the test will fail when run")
dutAddr = net.IPv4(0, 0, 0, 10)
testbenchAddr = net.IPv4(0, 0, 0, 20)
)
const ctrlPort = "40000"
// logger implements testutil.Logger.
//
// Labels logs based on their source and formats multi-line logs.
type logger string
// Name implements testutil.Logger.Name.
func (l logger) Name() string {
return string(l)
}
// Logf implements testutil.Logger.Logf.
func (l logger) Logf(format string, args ...interface{}) {
lines := strings.Split(fmt.Sprintf(format, args...), "\n")
log.Printf("%s: %s", l, lines[0])
for _, line := range lines[1:] {
log.Printf("%*s %s", len(l), "", line)
}
}
func TestOne(t *testing.T) {
flag.Var(&extraTestArgs, "extra_test_arg", "extra arguments to pass to the testbench")
flag.Parse()
if *dutPlatform != "linux" && *dutPlatform != "netstack" {
t.Fatal("--dut_platform should be either linux or netstack")
}
if *testbenchBinary == "" {
t.Fatal("--testbench_binary is missing")
}
if *dutPlatform == "netstack" {
if _, err := dockerutil.RuntimePath(); err != nil {
t.Fatal("--runtime is missing or invalid with --dut_platform=netstack:", err)
}
}
dockerutil.EnsureSupportedDockerVersion()
ctx := context.Background()
// Create the networks needed for the test. One control network is needed for
// the gRPC control packets and one test network on which to transmit the test
// packets.
ctrlNet := dockerutil.NewNetwork(ctx, logger("ctrlNet"))
testNet := dockerutil.NewNetwork(ctx, logger("testNet"))
for _, dn := range []*dockerutil.Network{ctrlNet, testNet} {
for {
if err := createDockerNetwork(ctx, dn); err != nil {
t.Log("creating docker network:", err)
const wait = 100 * time.Millisecond
t.Logf("sleeping %s and will try creating docker network again", wait)
// This can fail if another docker network claimed the same IP so we'll
// just try again.
time.Sleep(wait)
continue
}
break
}
defer func(dn *dockerutil.Network) {
if err := dn.Cleanup(ctx); err != nil {
t.Errorf("unable to cleanup container %s: %s", dn.Name, err)
}
}(dn)
// Sanity check.
inspect, err := dn.Inspect(ctx)
if err != nil {
t.Fatalf("failed to inspect network %s: %v", dn.Name, err)
} else if inspect.Name != dn.Name {
t.Fatalf("name mismatch for network want: %s got: %s", dn.Name, inspect.Name)
}
}
tmpDir, err := ioutil.TempDir("", "container-output")
if err != nil {
t.Fatal("creating temp dir:", err)
}
defer os.RemoveAll(tmpDir)
const testOutputDir = "/tmp/testoutput"
// Create the Docker container for the DUT.
dut := dockerutil.MakeContainer(ctx, logger("dut"))
if *dutPlatform == "linux" {
dut.Runtime = ""
}
runOpts := dockerutil.RunOpts{
Image: "packetimpact",
CapAdd: []string{"NET_ADMIN"},
Mounts: []mount.Mount{mount.Mount{
Type: mount.TypeBind,
Source: tmpDir,
Target: testOutputDir,
ReadOnly: false,
}},
}
const containerPosixServerBinary = "/packetimpact/posix_server"
dut.CopyFiles(&runOpts, "/packetimpact", "/test/packetimpact/dut/posix_server")
conf, hostconf, _ := dut.ConfigsFrom(runOpts, containerPosixServerBinary, "--ip=0.0.0.0", "--port="+ctrlPort)
hostconf.AutoRemove = true
hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"}
if err := dut.CreateFrom(ctx, conf, hostconf, nil); err != nil {
t.Fatalf("unable to create container %s: %v", dut.Name, err)
}
defer dut.CleanUp(ctx)
// Add ctrlNet as eth1 and testNet as eth2.
const testNetDev = "eth2"
if err := addNetworks(ctx, dut, dutAddr, []*dockerutil.Network{ctrlNet, testNet}); err != nil {
t.Fatal(err)
}
if err := dut.Start(ctx); err != nil {
t.Fatalf("unable to start container %s: %s", dut.Name, err)
}
if _, err := dut.WaitForOutput(ctx, "Server listening.*\n", 60*time.Second); err != nil {
t.Fatalf("%s on container %s never listened: %s", containerPosixServerBinary, dut.Name, err)
}
dutTestDevice, dutDeviceInfo, err := deviceByIP(ctx, dut, addressInSubnet(dutAddr, *testNet.Subnet))
if err != nil {
t.Fatal(err)
}
remoteMAC := dutDeviceInfo.MAC
remoteIPv6 := dutDeviceInfo.IPv6Addr
// Netstack as DUT doesn't assign IPv6 addresses automatically so do it if
// needed.
if remoteIPv6 == nil {
if _, err := dut.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "add", netdevs.MACToIP(remoteMAC).String(), "scope", "link", "dev", dutTestDevice); err != nil {
t.Fatalf("unable to ip addr add on container %s: %s", dut.Name, err)
}
// Now try again, to make sure that it worked.
_, dutDeviceInfo, err = deviceByIP(ctx, dut, addressInSubnet(dutAddr, *testNet.Subnet))
if err != nil {
t.Fatal(err)
}
remoteIPv6 = dutDeviceInfo.IPv6Addr
if remoteIPv6 == nil {
t.Fatal("unable to set IPv6 address on container", dut.Name)
}
}
// Create the Docker container for the testbench.
testbench := dockerutil.MakeContainer(ctx, logger("testbench"))
testbench.Runtime = "" // The testbench always runs on Linux.
tbb := path.Base(*testbenchBinary)
containerTestbenchBinary := "/packetimpact/" + tbb
runOpts = dockerutil.RunOpts{
Image: "packetimpact",
CapAdd: []string{"NET_ADMIN"},
Mounts: []mount.Mount{mount.Mount{
Type: mount.TypeBind,
Source: tmpDir,
Target: testOutputDir,
ReadOnly: false,
}},
}
testbench.CopyFiles(&runOpts, "/packetimpact", "/test/packetimpact/tests/"+tbb)
// Run tcpdump in the test bench unbuffered, without DNS resolution, just on
// the interface with the test packets.
snifferArgs := []string{
"tcpdump",
"-S", "-vvv", "-U", "-n",
"-i", testNetDev,
"-w", testOutputDir + "/dump.pcap",
}
snifferRegex := "tcpdump: listening.*\n"
if *tshark {
// Run tshark in the test bench unbuffered, without DNS resolution, just on
// the interface with the test packets.
snifferArgs = []string{
"tshark", "-V", "-l", "-n", "-i", testNetDev,
"-o", "tcp.check_checksum:TRUE",
"-o", "udp.check_checksum:TRUE",
}
snifferRegex = "Capturing on.*\n"
}
defer func() {
if err := exec.Command("/bin/cp", "-r", tmpDir, os.Getenv("TEST_UNDECLARED_OUTPUTS_DIR")).Run(); err != nil {
t.Error("unable to copy container output files:", err)
}
}()
conf, hostconf, _ = testbench.ConfigsFrom(runOpts, snifferArgs...)
hostconf.AutoRemove = true
hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"}
if err := testbench.CreateFrom(ctx, conf, hostconf, nil); err != nil {
t.Fatalf("unable to create container %s: %s", testbench.Name, err)
}
defer testbench.CleanUp(ctx)
// Add ctrlNet as eth1 and testNet as eth2.
if err := addNetworks(ctx, testbench, testbenchAddr, []*dockerutil.Network{ctrlNet, testNet}); err != nil {
t.Fatal(err)
}
if err := testbench.Start(ctx); err != nil {
t.Fatalf("unable to start container %s: %s", testbench.Name, err)
}
// Kill so that it will flush output.
defer func() {
time.Sleep(1 * time.Second)
testbench.Exec(ctx, dockerutil.ExecOpts{}, "killall", snifferArgs[0])
}()
if _, err := testbench.WaitForOutput(ctx, snifferRegex, 60*time.Second); err != nil {
t.Fatalf("sniffer on %s never listened: %s", dut.Name, err)
}
// Because the Linux kernel receives the SYN-ACK but didn't send the SYN it
// will issue a RST. To prevent this IPtables can be used to filter out all
// incoming packets. The raw socket that packetimpact tests use will still see
// everything.
if logs, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, "iptables", "-A", "INPUT", "-i", testNetDev, "-j", "DROP"); err != nil {
t.Fatalf("unable to Exec iptables on container %s: %s, logs from testbench:\n%s", testbench.Name, err, logs)
}
// FIXME(b/156449515): Some piece of the system has a race. The old
// bash script version had a sleep, so we have one too. The race should
// be fixed and this sleep removed.
time.Sleep(time.Second)
// Start a packetimpact test on the test bench. The packetimpact test sends
// and receives packets and also sends POSIX socket commands to the
// posix_server to be executed on the DUT.
testArgs := []string{containerTestbenchBinary}
testArgs = append(testArgs, extraTestArgs...)
testArgs = append(testArgs,
"--posix_server_ip", addressInSubnet(dutAddr, *ctrlNet.Subnet).String(),
"--posix_server_port", ctrlPort,
"--remote_ipv4", addressInSubnet(dutAddr, *testNet.Subnet).String(),
"--local_ipv4", addressInSubnet(testbenchAddr, *testNet.Subnet).String(),
"--remote_ipv6", remoteIPv6.String(),
"--remote_mac", remoteMAC.String(),
"--remote_interface_id", fmt.Sprintf("%d", dutDeviceInfo.ID),
"--device", testNetDev,
"--dut_type", *dutPlatform,
)
testbenchLogs, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, testArgs...)
if (err != nil) != *expectFailure {
var dutLogs string
if logs, err := dut.Logs(ctx); err != nil {
dutLogs = fmt.Sprintf("failed to fetch DUT logs: %s", err)
} else {
dutLogs = logs
}
t.Errorf(`test error: %v, expect failure: %t
====== Begin of DUT Logs ======
%s
====== End of DUT Logs ======
====== Begin of Testbench Logs ======
%s
====== End of Testbench Logs ======`,
err, *expectFailure, dutLogs, testbenchLogs)
}
}
func addNetworks(ctx context.Context, d *dockerutil.Container, addr net.IP, networks []*dockerutil.Network) error {
for _, dn := range networks {
ip := addressInSubnet(addr, *dn.Subnet)
// Connect to the network with the specified IP address.
if err := dn.Connect(ctx, d, ip.String(), ""); err != nil {
return fmt.Errorf("unable to connect container %s to network %s: %w", d.Name, dn.Name, err)
}
}
return nil
}
// addressInSubnet combines the subnet provided with the address and returns a
// new address. The return address bits come from the subnet where the mask is 1
// and from the ip address where the mask is 0.
func addressInSubnet(addr net.IP, subnet net.IPNet) net.IP {
var octets []byte
for i := 0; i < 4; i++ {
octets = append(octets, (subnet.IP.To4()[i]&subnet.Mask[i])+(addr.To4()[i]&(^subnet.Mask[i])))
}
return net.IP(octets)
}
// createDockerNetwork makes a randomly-named network that will start with the
// namePrefix. The network will be a random /24 subnet.
func createDockerNetwork(ctx context.Context, n *dockerutil.Network) error {
randSource := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(randSource)
// Class C, 192.0.0.0 to 223.255.255.255, transitionally has mask 24.
ip := net.IPv4(byte(r1.Intn(224-192)+192), byte(r1.Intn(256)), byte(r1.Intn(256)), 0)
n.Subnet = &net.IPNet{
IP: ip,
Mask: ip.DefaultMask(),
}
return n.Create(ctx)
}
// deviceByIP finds a deviceInfo and device name from an IP address.
func deviceByIP(ctx context.Context, d *dockerutil.Container, ip net.IP) (string, netdevs.DeviceInfo, error) {
out, err := d.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "show")
if err != nil {
return "", netdevs.DeviceInfo{}, fmt.Errorf("listing devices on %s container: %w", d.Name, err)
}
devs, err := netdevs.ParseDevices(out)
if err != nil {
return "", netdevs.DeviceInfo{}, fmt.Errorf("parsing devices from %s container: %w", d.Name, err)
}
testDevice, deviceInfo, err := netdevs.FindDeviceByIP(ip, devs)
if err != nil {
return "", netdevs.DeviceInfo{}, fmt.Errorf("can't find deviceInfo for container %s: %w", d.Name, err)
}
return testDevice, deviceInfo, nil
}
| [
"\"TEST_UNDECLARED_OUTPUTS_DIR\""
]
| []
| [
"TEST_UNDECLARED_OUTPUTS_DIR"
]
| [] | ["TEST_UNDECLARED_OUTPUTS_DIR"] | go | 1 | 0 | |
test.py | import argparse
import scipy
from scipy import ndimage
import cv2
import numpy as np
import sys
import json
import torch
from torch.autograd import Variable
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data
from networks.ccnet import Res_Deeplab
from dataset.datasets import CSDataTestSet
from collections import OrderedDict
import os
import scipy.ndimage as nd
from math import ceil
from PIL import Image as PILImage
import torch.nn as nn
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
DATA_DIRECTORY = 'cityscapes'
DATA_LIST_PATH = './dataset/list/cityscapes/test.lst'
IGNORE_LABEL = 255
NUM_CLASSES = 19
INPUT_SIZE = '769,769'
RESTORE_FROM = './deeplab_resnet.ckpt'
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLabLFOV Network")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the PASCAL VOC dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--gpu", type=str, default='0',
help="choose gpu device.")
parser.add_argument("--recurrence", type=int, default=1,
help="choose the number of recurrence.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of images.")
parser.add_argument("--whole", type=bool, default=False,
help="use whole input size.")
return parser.parse_args()
def get_palette(num_cls):
""" Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def pad_image(img, target_size):
"""Pad an image up to the target size."""
rows_missing = target_size[0] - img.shape[2]
cols_missing = target_size[1] - img.shape[3]
padded_img = np.pad(img, ((0, 0), (0, 0), (0, rows_missing), (0, cols_missing)), 'constant')
return padded_img
def predict_sliding(net, image, tile_size, classes, flip_evaluation, recurrence):
interp = nn.Upsample(size=tile_size, mode='bilinear', align_corners=True)
image_size = image.shape
overlap = 1/3
stride = ceil(tile_size[0] * (1 - overlap))
tile_rows = int(ceil((image_size[2] - tile_size[0]) / stride) + 1) # strided convolution formula
tile_cols = int(ceil((image_size[3] - tile_size[1]) / stride) + 1)
print("Need %i x %i prediction tiles @ stride %i px" % (tile_cols, tile_rows, stride))
full_probs = np.zeros((image_size[2], image_size[3], classes))
count_predictions = np.zeros((image_size[2], image_size[3], classes))
tile_counter = 0
for row in range(tile_rows):
for col in range(tile_cols):
x1 = int(col * stride)
y1 = int(row * stride)
x2 = min(x1 + tile_size[1], image_size[3])
y2 = min(y1 + tile_size[0], image_size[2])
x1 = max(int(x2 - tile_size[1]), 0) # for portrait images the x1 underflows sometimes
y1 = max(int(y2 - tile_size[0]), 0) # for very few rows y1 underflows
img = image[:, :, y1:y2, x1:x2]
padded_img = pad_image(img, tile_size)
# plt.imshow(padded_img)
# plt.show()
tile_counter += 1
print("Predicting tile %i" % tile_counter)
padded_prediction = net(Variable(torch.from_numpy(padded_img), volatile=True).cuda(), recurrence)
if isinstance(padded_prediction, list):
padded_prediction = padded_prediction[0]
padded_prediction = interp(padded_prediction).cpu().data[0].numpy().transpose(1,2,0)
prediction = padded_prediction[0:img.shape[2], 0:img.shape[3], :]
count_predictions[y1:y2, x1:x2] += 1
full_probs[y1:y2, x1:x2] += prediction # accumulate the predictions also in the overlapping regions
# average the predictions in the overlapping regions
full_probs /= count_predictions
# visualize normalization Weights
# plt.imshow(np.mean(count_predictions, axis=2))
# plt.show()
return full_probs
def predict_whole(net, image, tile_size, flip_evaluation, recurrence):
interp = nn.Upsample(size=tile_size, mode='bilinear', align_corners=True)
prediction = net(image.cuda(), recurrence)
if isinstance(prediction, list):
prediction = prediction[0]
prediction = interp(prediction).cpu().data[0].numpy().transpose(1,2,0)
return prediction
def id2trainId(label, id_to_trainid, reverse=False):
label_copy = label.copy()
if reverse:
for v, k in id_to_trainid.items():
label_copy[label == k] = v
else:
for k, v in id_to_trainid.items():
label_copy[label == k] = v
return label_copy
def main():
"""Create the model and start the evaluation process."""
args = get_arguments()
# gpu0 = args.gpu
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
h, w = map(int, args.input_size.split(','))
if args.whole:
input_size = (1024, 2048)
else:
input_size = (h, w)
ignore_label= args.ignore_label
model = Res_Deeplab(num_classes=args.num_classes)
saved_state_dict = torch.load(args.restore_from)
model.load_state_dict(saved_state_dict)
model.eval()
model.cuda()
testloader = data.DataLoader(CSDataTestSet(args.data_dir, args.data_list, crop_size=(1024, 2048), mean=IMG_MEAN),
batch_size=1, shuffle=False, pin_memory=True)
data_list = []
confusion_matrix = np.zeros((args.num_classes,args.num_classes))
palette = get_palette(256)
id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
interp = nn.Upsample(size=(1024, 2048), mode='bilinear', align_corners=True)
if not os.path.exists('outputs'):
os.makedirs('outputs')
for index, batch in enumerate(testloader):
if index % 100 == 0:
print('%d processd'%(index))
image, size, name = batch
size = size[0].numpy()
with torch.no_grad():
if args.whole:
output = predict_whole(model, image, input_size, True, args.recurrence)
else:
output = predict_sliding(model, image.numpy(), input_size, args.num_classes, True, args.recurrence)
seg_pred = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
seg_pred = id2trainId(seg_pred, id_to_trainid, reverse=True)
output_im = PILImage.fromarray(seg_pred)
output_im.putpalette(palette)
output_im.save('outputs/'+name[0]+'.png')
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
rconweb/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rconweb.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/runtime/vendor/github.com/containerd/containerd/archive/tar.go | /*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"archive/tar"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/containerd/containerd/log"
"github.com/containerd/continuity/fs"
"github.com/pkg/errors"
)
var bufPool = &sync.Pool{
New: func() interface{} {
buffer := make([]byte, 32*1024)
return &buffer
},
}
var errInvalidArchive = errors.New("invalid archive")
// Diff returns a tar stream of the computed filesystem
// difference between the provided directories.
//
// Produces a tar using OCI style file markers for deletions. Deleted
// files will be prepended with the prefix ".wh.". This style is
// based off AUFS whiteouts.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
func Diff(ctx context.Context, a, b string) io.ReadCloser {
r, w := io.Pipe()
go func() {
err := WriteDiff(ctx, w, a, b)
if err = w.CloseWithError(err); err != nil {
log.G(ctx).WithError(err).Debugf("closing tar pipe failed")
}
}()
return r
}
// WriteDiff writes a tar stream of the computed difference between the
// provided directories.
//
// Produces a tar using OCI style file markers for deletions. Deleted
// files will be prepended with the prefix ".wh.". This style is
// based off AUFS whiteouts.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
func WriteDiff(ctx context.Context, w io.Writer, a, b string) error {
cw := newChangeWriter(w, b)
err := fs.Changes(ctx, a, b, cw.HandleChange)
if err != nil {
return errors.Wrap(err, "failed to create diff tar stream")
}
return cw.Close()
}
const (
// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
whiteoutPrefix = ".wh."
// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
// whiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
paxSchilyXattr = "SCHILY.xattrs."
)
// Apply applies a tar stream of an OCI style diff tar.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) {
root = filepath.Clean(root)
var options ApplyOptions
for _, opt := range opts {
if err := opt(&options); err != nil {
return 0, errors.Wrap(err, "failed to apply option")
}
}
if options.Filter == nil {
options.Filter = all
}
return apply(ctx, root, tar.NewReader(r), options)
}
// applyNaive applies a tar stream of an OCI style diff tar.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
var (
dirs []*tar.Header
// Used for handling opaque directory markers which
// may occur out of order
unpackedPaths = make(map[string]struct{})
// Used for aufs plink directory
aufsTempdir = ""
aufsHardlinks = make(map[string]*tar.Header)
)
// Iterate through the files in the archive.
for {
select {
case <-ctx.Done():
return 0, ctx.Err()
default:
}
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
accept, err := options.Filter(hdr)
if err != nil {
return 0, err
}
if !accept {
continue
}
if skipFile(hdr) {
log.G(ctx).Warnf("file %q ignored: archive may not be supported on system", hdr.Name)
continue
}
// Split name and resolve symlinks for root directory.
ppath, base := filepath.Split(hdr.Name)
ppath, err = fs.RootPath(root, ppath)
if err != nil {
return 0, errors.Wrap(err, "failed to get root path")
}
// Join to root before joining to parent path to ensure relative links are
// already resolved based on the root before adding to parent.
path := filepath.Join(ppath, filepath.Join("/", base))
if path == root {
log.G(ctx).Debugf("file %q ignored: resolved to root", hdr.Name)
continue
}
// If file is not directly under root, ensure parent directory
// exists or is created.
if ppath != root {
parentPath := ppath
if base == "" {
parentPath = filepath.Dir(path)
}
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = mkdirAll(parentPath, 0700)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, whiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, whiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
p, err := fs.RootPath(aufsTempdir, basename)
if err != nil {
return 0, err
}
if err := createTarFile(ctx, p, root, hdr, tr); err != nil {
return 0, err
}
}
if hdr.Name != whiteoutOpaqueDir {
continue
}
}
if strings.HasPrefix(base, whiteoutPrefix) {
dir := filepath.Dir(path)
if base == whiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
continue
}
originalBase := base[len(whiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
// Ensure originalPath is under dir
if dir[len(dir)-1] != filepath.Separator {
dir += string(filepath.Separator)
}
if !strings.HasPrefix(originalPath, dir) {
return 0, errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
}
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
continue
}
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
srcData := io.Reader(tr)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), whiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("invalid aufs hardlink")
}
p, err := fs.RootPath(aufsTempdir, linkBasename)
if err != nil {
return 0, err
}
tmpFile, err := os.Open(p)
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
for _, hdr := range dirs {
path, err := fs.RootPath(root, hdr.Name)
if err != nil {
return 0, err
}
if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil {
return 0, err
}
}
return size, nil
}
func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader) error {
// hdr.Mode is in linux format, which we can use for syscalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
hdrInfo := hdr.FileInfo()
switch hdr.Typeflag {
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
if err := mkdir(path, hdrInfo.Mode()); err != nil {
return err
}
}
case tar.TypeReg, tar.TypeRegA:
file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode())
if err != nil {
return err
}
_, err = copyBuffered(ctx, file, reader)
if err1 := file.Close(); err == nil {
err = err1
}
if err != nil {
return err
}
case tar.TypeBlock, tar.TypeChar:
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
}
case tar.TypeFifo:
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
}
case tar.TypeLink:
targetPath, err := hardlinkRootPath(extractDir, hdr.Linkname)
if err != nil {
return err
}
if err := os.Link(targetPath, path); err != nil {
return err
}
case tar.TypeSymlink:
if err := os.Symlink(hdr.Linkname, path); err != nil {
return err
}
case tar.TypeXGlobalHeader:
log.G(ctx).Debug("PAX Global Extended Headers found and ignored")
return nil
default:
return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag)
}
// Lchown is not supported on Windows.
if runtime.GOOS != "windows" {
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
return err
}
}
for key, value := range hdr.PAXRecords {
if strings.HasPrefix(key, paxSchilyXattr) {
key = key[len(paxSchilyXattr):]
if err := setxattr(path, key, value); err != nil {
if errors.Cause(err) == syscall.ENOTSUP {
log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key)
continue
}
return err
}
}
}
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
if err := handleLChmod(hdr, path, hdrInfo); err != nil {
return err
}
return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime))
}
type changeWriter struct {
tw *tar.Writer
source string
whiteoutT time.Time
inodeSrc map[uint64]string
inodeRefs map[uint64][]string
addedDirs map[string]struct{}
}
func newChangeWriter(w io.Writer, source string) *changeWriter {
return &changeWriter{
tw: tar.NewWriter(w),
source: source,
whiteoutT: time.Now(),
inodeSrc: map[uint64]string{},
inodeRefs: map[uint64][]string{},
addedDirs: map[string]struct{}{},
}
}
func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if k == fs.ChangeKindDelete {
whiteOutDir := filepath.Dir(p)
whiteOutBase := filepath.Base(p)
whiteOut := filepath.Join(whiteOutDir, whiteoutPrefix+whiteOutBase)
hdr := &tar.Header{
Typeflag: tar.TypeReg,
Name: whiteOut[1:],
Size: 0,
ModTime: cw.whiteoutT,
AccessTime: cw.whiteoutT,
ChangeTime: cw.whiteoutT,
}
if err := cw.includeParents(hdr); err != nil {
return err
}
if err := cw.tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write whiteout header")
}
} else {
var (
link string
err error
source = filepath.Join(cw.source, p)
)
switch {
case f.Mode()&os.ModeSocket != 0:
return nil // ignore sockets
case f.Mode()&os.ModeSymlink != 0:
if link, err = os.Readlink(source); err != nil {
return err
}
}
hdr, err := tar.FileInfoHeader(f, link)
if err != nil {
return err
}
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
name := p
if strings.HasPrefix(name, string(filepath.Separator)) {
name, err = filepath.Rel(string(filepath.Separator), name)
if err != nil {
return errors.Wrap(err, "failed to make path relative")
}
}
name, err = tarName(name)
if err != nil {
return errors.Wrap(err, "cannot canonicalize path")
}
// suffix with '/' for directories
if f.IsDir() && !strings.HasSuffix(name, "/") {
name += "/"
}
hdr.Name = name
if err := setHeaderForSpecialDevice(hdr, name, f); err != nil {
return errors.Wrap(err, "failed to set device headers")
}
// additionalLinks stores file names which must be linked to
// this file when this file is added
var additionalLinks []string
inode, isHardlink := fs.GetLinkInfo(f)
if isHardlink {
// If the inode has a source, always link to it
if source, ok := cw.inodeSrc[inode]; ok {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = source
hdr.Size = 0
} else {
if k == fs.ChangeKindUnmodified {
cw.inodeRefs[inode] = append(cw.inodeRefs[inode], name)
return nil
}
cw.inodeSrc[inode] = name
additionalLinks = cw.inodeRefs[inode]
delete(cw.inodeRefs, inode)
}
} else if k == fs.ChangeKindUnmodified {
// Nothing to write to diff
return nil
}
if capability, err := getxattr(source, "security.capability"); err != nil {
return errors.Wrap(err, "failed to get capabilities xattr")
} else if capability != nil {
if hdr.PAXRecords == nil {
hdr.PAXRecords = map[string]string{}
}
hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability)
}
if err := cw.includeParents(hdr); err != nil {
return err
}
if err := cw.tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write file header")
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
file, err := open(source)
if err != nil {
return errors.Wrapf(err, "failed to open path: %v", source)
}
defer file.Close()
n, err := copyBuffered(context.TODO(), cw.tw, file)
if err != nil {
return errors.Wrap(err, "failed to copy")
}
if n != hdr.Size {
return errors.New("short write copying file")
}
}
if additionalLinks != nil {
source = hdr.Name
for _, extra := range additionalLinks {
hdr.Name = extra
hdr.Typeflag = tar.TypeLink
hdr.Linkname = source
hdr.Size = 0
if err := cw.includeParents(hdr); err != nil {
return err
}
if err := cw.tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write file header")
}
}
}
}
return nil
}
func (cw *changeWriter) Close() error {
if err := cw.tw.Close(); err != nil {
return errors.Wrap(err, "failed to close tar writer")
}
return nil
}
func (cw *changeWriter) includeParents(hdr *tar.Header) error {
name := strings.TrimRight(hdr.Name, "/")
fname := filepath.Join(cw.source, name)
parent := filepath.Dir(name)
pname := filepath.Join(cw.source, parent)
// Do not include root directory as parent
if fname != cw.source && pname != cw.source {
_, ok := cw.addedDirs[parent]
if !ok {
cw.addedDirs[parent] = struct{}{}
fi, err := os.Stat(pname)
if err != nil {
return err
}
if err := cw.HandleChange(fs.ChangeKindModify, parent, fi, nil); err != nil {
return err
}
}
}
if hdr.Typeflag == tar.TypeDir {
cw.addedDirs[name] = struct{}{}
}
return nil
}
func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) {
buf := bufPool.Get().(*[]byte)
defer bufPool.Put(buf)
for {
select {
case <-ctx.Done():
err = ctx.Err()
return
default:
}
nr, er := src.Read(*buf)
if nr > 0 {
nw, ew := dst.Write((*buf)[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er != nil {
if er != io.EOF {
err = er
}
break
}
}
return written, err
}
// hardlinkRootPath returns target linkname, evaluating and bounding any
// symlink to the parent directory.
//
// NOTE: Allow hardlink to the softlink, not the real one. For example,
//
// touch /tmp/zzz
// ln -s /tmp/zzz /tmp/xxx
// ln /tmp/xxx /tmp/yyy
//
// /tmp/yyy should be softlink which be same of /tmp/xxx, not /tmp/zzz.
func hardlinkRootPath(root, linkname string) (string, error) {
ppath, base := filepath.Split(linkname)
ppath, err := fs.RootPath(root, ppath)
if err != nil {
return "", err
}
targetPath := filepath.Join(ppath, base)
if !strings.HasPrefix(targetPath, root) {
targetPath = root
}
return targetPath, nil
}
| [
"\"XDG_RUNTIME_DIR\""
]
| []
| [
"XDG_RUNTIME_DIR"
]
| [] | ["XDG_RUNTIME_DIR"] | go | 1 | 0 | |
test/test_repo_template/setup.py | import os
from setuptools import setup
# This file is a template, and will be rendered before executed.
# So the double curly brackets will become single after rendering, and
# when executed, this will work as expected
content = 'env = {{}}\n'.format(repr(dict(os.environ))) # noqa W291 see above comment
with open('asv_test_repo/build_time_env.py', 'w') as f:
f.write(content)
setup(name='asv_test_repo',
version="{version}",
packages=['asv_test_repo'],
# The following forces setuptools to generate .egg-info directory,
# which causes problems in test_environment.py:test_install_success
include_package_data=True,
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
example/remove_sn_url.py | # Remove "http://sn" login_url
import logging
# logger.basicConfig(filename=f"{__name__}.log")
logger = logging.getLogger(__name__)
sHandler = logging.StreamHandler()
sHandler.setLevel(logging.INFO)
logger.addHandler(sHandler)
logfilenode = __file__.rsplit('.')[0]
handler = logging.FileHandler(f"{logfilenode}.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
import sys
import os
sys.path.append("..")
from ycommander import api, params
if __name__ == '__main__':
try:
user = sys.argv[1]
except IndexError:
try:
user = os.environ['user']
except KeyError:
user = input("User:")
try:
password = sys.argv[2]
except IndexError:
try:
password = os.environ['password']
except KeyError:
from getpass import getpass
password = getpass('Password:')
# config = {'user': user, 'password': password}
params = params.KeeperParams() # config=config)
params.user = user
params.password = password
session_token = api.login(params)
TABLE_NAME = 'sn_url'
sn_url = 'http://sn'
params.sync_data = True # to update
MAX_REPEAT = 999
logger.setLevel(logging.INFO)
for repeat, uid in enumerate(params.record_cache):
if repeat >= MAX_REPEAT:
logger.info(f"Exitting because of over repeat limit {repeat}")
break
rec = api.get_record(params, uid)
if rec.login_url == sn_url:
rec.login_url = '' # set string empty
api.update_record(params, rec)
logger.info(f"sn_url is erased at {uid} : {rec.title}")
exit(0) # to suppress warning of 'Exit without exit code' | []
| []
| [
"user",
"password"
]
| [] | ["user", "password"] | python | 2 | 0 | |
share/lib/python/neuron/crxd/rxd.py | from neuron import h, nrn, nrn_dll_sym
from . import species, node, section1d, region
from .nodelist import NodeList
import weakref
import numpy
import ctypes
import atexit
from . import options
from .rxdException import RxDException
from . import initializer
import collections
import os
from distutils import sysconfig
import uuid
import sys
import itertools
from numpy.ctypeslib import ndpointer
import re
import platform
# aliases to avoid repeatedly doing multiple hash-table lookups
_numpy_array = numpy.array
_numpy_zeros = numpy.zeros
_species_get_all_species = species._get_all_species
_node_get_states = node._get_states
_section1d_transfer_to_legacy = section1d._transfer_to_legacy
_ctypes_c_int = ctypes.c_int
_weakref_ref = weakref.ref
_external_solver = None
_external_solver_initialized = False
_windows_dll_files = []
_windows_dll = []
make_time_ptr = nrn_dll_sym('make_time_ptr')
make_time_ptr.argtypes = [ctypes.py_object, ctypes.py_object]
make_time_ptr(h._ref_dt, h._ref_t)
_double_ptr = ctypes.POINTER(ctypes.c_double)
_int_ptr = ctypes.POINTER(_ctypes_c_int)
_long_ptr = ctypes.POINTER(ctypes.c_long)
fptr_prototype = ctypes.CFUNCTYPE(None)
set_nonvint_block = nrn_dll_sym('set_nonvint_block')
set_nonvint_block(nrn_dll_sym('rxd_nonvint_block'))
set_setup = nrn_dll_sym('set_setup')
set_setup.argtypes = [fptr_prototype]
set_initialize = nrn_dll_sym('set_initialize')
set_initialize.argtypes = [fptr_prototype]
scatter_concentrations = nrn_dll_sym('scatter_concentrations')
# Transfer extracellular concentrations to NEURON
_fih_transfer_ecs = h.FInitializeHandler(1, scatter_concentrations)
rxd_set_no_diffusion = nrn_dll_sym('rxd_set_no_diffusion')
setup_solver = nrn_dll_sym('setup_solver')
setup_solver.argtypes = [ndpointer(ctypes.c_double), ctypes.c_int, numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'), ctypes.c_int, ctypes.py_object, ctypes.py_object]
#states = None
_set_num_threads = nrn_dll_sym('set_num_threads')
_set_num_threads.argtypes = [ctypes.c_int]
_get_num_threads = nrn_dll_sym('get_num_threads')
_get_num_threads.restype = ctypes.c_int
clear_rates = nrn_dll_sym('clear_rates')
register_rate = nrn_dll_sym('register_rate')
register_rate.argtypes = [
ctypes.c_int, #num species
ctypes.c_int, #num regions
ctypes.c_int, #num seg
numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #species ids
ctypes.c_int, numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #num ecs species
numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #ecs species ids
ctypes.c_int, #num multicompartment reactions
numpy.ctypeslib.ndpointer(ctypes.c_double, flags='contiguous'), #multicompartment multipliers
] #Reaction rate function
setup_currents = nrn_dll_sym('setup_currents')
setup_currents.argtypes = [
ctypes.c_int, #number of membrane currents
ctypes.c_int, #number induced currents
ctypes.c_int, #number of nodes with membrane currents
_int_ptr, #number of species involved in each membrane current
_int_ptr, #charges of the species involved in each membrane current
_int_ptr, #node indices
_int_ptr, #node indices
_double_ptr, #scaling (areas) of the fluxes
_int_ptr, #charges for each species in each reation
ctypes.POINTER(ctypes.py_object), #hoc pointers
_int_ptr, #maps for membrane fluxes
_int_ptr #maps for ecs fluxes
]
set_reaction_indices = nrn_dll_sym('set_reaction_indices')
set_reaction_indices.argtypes = [ctypes.c_int, _int_ptr, _int_ptr, _int_ptr,
_int_ptr,_int_ptr,_double_ptr, ctypes.c_int, _int_ptr, _int_ptr, _int_ptr,
_int_ptr]
ecs_register_reaction = nrn_dll_sym('ecs_register_reaction')
ecs_register_reaction.argtype = [ctypes.c_int, ctypes.c_int, _int_ptr, fptr_prototype]
set_euler_matrix = nrn_dll_sym('rxd_set_euler_matrix')
set_euler_matrix.argtypes = [
ctypes.c_int,
ctypes.c_int,
_long_ptr,
_long_ptr,
_double_ptr,
numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'),
ctypes.c_int,
numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),
]
rxd_setup_curr_ptrs = nrn_dll_sym('rxd_setup_curr_ptrs')
rxd_setup_curr_ptrs.argtypes = [
ctypes.c_int,
_int_ptr,
numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),
ctypes.POINTER(ctypes.py_object),
]
rxd_setup_conc_ptrs = nrn_dll_sym('rxd_setup_conc_ptrs')
rxd_setup_conc_ptrs.argtypes = [
ctypes.c_int,
_int_ptr,
ctypes.POINTER(ctypes.py_object)
]
_c_headers = """#include <math.h>
/*Some functions supported by numpy that aren't included in math.h
* names and arguments match the wrappers used in rxdmath.py
*/
double factorial(const double);
double degrees(const double);
void radians(const double, double*);
double log1p(const double);
"""
def _list_to_cint_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.c_int * len(data))(*tuple(data))
def _list_to_cdouble_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.c_double * len(data))(*tuple(data))
def _list_to_clong_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.c_long * len(data))(*tuple(data))
def _list_to_pyobject_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.py_object * len(data))(*tuple(data))
def byeworld():
# needed to prevent a seg-fault error at shutdown in at least some
# combinations of NEURON and Python, which I think is due to objects
# getting deleted out-of-order
global _react_matrix_solver
try:
del _react_matrix_solver
except NameError:
# # if it already didn't exist, that's fine
pass
_windows_remove_dlls()
atexit.register(byeworld)
# Faraday's constant (store to reduce number of lookups)
FARADAY = h.FARADAY
# converting from mM um^3 to molecules
# = 6.02214129e23 * 1000. / 1.e18 / 1000
# = avogadro * (L / m^3) * (m^3 / um^3) * (mM / M)
# value for avogardro's constant from NIST webpage, accessed 25 April 2012:
# http://physics.nist.gov/cgi-bin/cuu/Value?na
_conversion_factor = 602214.129
_cvode_object = h.CVode()
last_diam_change_cnt = None
last_structure_change_cnt = None
_linmodadd_c = None
_diffusion_matrix = None
_curr_scales = None
_curr_ptrs = None
_curr_indices = None
_all_reactions = []
_zero_volume_indices = numpy.ndarray(0, dtype=numpy.int_)
_nonzero_volume_indices = []
nrn_tree_solve = nrn_dll_sym('nrn_tree_solve')
nrn_tree_solve.restype = None
_dptr = _double_ptr
_dimensions = collections.defaultdict(lambda: 1)
_default_dx = 0.25
_default_method = 'deterministic'
#CRxD
_diffusion_d = None
_diffusion_a = None
_diffusion_b = None
_diffusion_p = None
_cur_node_indices = None
_diffusion_a_ptr, _diffusion_b_ptr, _diffusion_p_ptr = None, None, None
def set_solve_type(domain=None, dimension=None, dx=None, nsubseg=None, method=None):
"""Specify the numerical discretization and solver options.
domain -- a section or Python iterable of sections"""
setting_default = False
if domain is None:
domain = h.allsec()
setting_default = True
elif isinstance(domain, nrn.Section):
domain = [domain]
# NOTE: These attributes are set on a per-nrn.Section basis; they cannot
# assume Section1D objects exist because they might be specified before
# those objects are created
# domain is now always an iterable (or invalid)
if method is not None:
raise RxDException('using set_solve_type to specify method is not yet implemented')
if dimension is not None:
if dimension not in (1, 3):
raise RxDException('invalid option to set_solve_type: dimension must be 1 or 3')
factory = lambda: dimension
if setting_default:
_dimensions.default_factory = factory
for sec in domain:
_dimensions[sec] = dimension
if dx is not None:
raise RxDException('using set_solve_type to specify dx is not yet implemented')
if nsubseg is not None:
raise RxDException('using set_solve_type to specify nsubseg is not yet implemented')
def _unregister_reaction(r):
global _all_reactions
for i, r2 in enumerate(_all_reactions):
if r2() == r:
del _all_reactions[i]
break
def _register_reaction(r):
# TODO: should we search to make sure that (a weakref to) r hasn't already been added?
global _all_reactions, _external_solver_initialized
_all_reactions.append(_weakref_ref(r))
_external_solver_initialized = False
def _after_advance():
global last_diam_change_cnt
last_diam_change_cnt = _diam_change_count.value
def re_init():
"""reinitializes all rxd concentrations to match HOC values, updates matrices"""
global _external_solver_initialized
h.define_shape()
if not species._has_3d:
# TODO: if we do have 3D, make sure that we do the necessary parts of this
# update current pointers
section1d._purge_cptrs()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
s._register_cptrs()
# update matrix equations
_setup_matrices()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: s.re_init()
# TODO: is this safe?
_cvode_object.re_init()
_external_solver_initialized = False
def _invalidate_matrices():
# TODO: make a separate variable for this?
global _diffusion_matrix, _external_solver_initialized, last_structure_change_cnt
_diffusion_matrix = None
last_structure_change_cnt = None
_external_solver_initialized = False
_rxd_offset = None
def _atolscale(y):
real_index_lookup = {item: index for index, item in enumerate(_nonzero_volume_indices)}
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
shifted_i = [real_index_lookup[i] + _rxd_offset for i in s.indices() if i in real_index_lookup]
y[shifted_i] *= s._atolscale
def _ode_count(offset):
global _rxd_offset, last_structure_change_cnt, _structure_change_count
initializer._do_init()
_rxd_offset = offset - len(_nonzero_volume_indices)
if _diffusion_matrix is None or last_structure_change_cnt != _structure_change_count.value: _setup_matrices()
last_structure_change_cnt = _structure_change_count.value
return len(_nonzero_volume_indices)
def _ode_reinit(y):
y[_rxd_offset : _rxd_offset + len(_nonzero_volume_indices)] = _node_get_states()[_nonzero_volume_indices]
def _ode_fun(t, y, ydot):
initializer.assert_initialized()
lo = _rxd_offset
hi = lo + len(_nonzero_volume_indices)
if lo == hi: return
states = _node_get_states().copy()
states[_nonzero_volume_indices] = y[lo : hi]
# need to fill in the zero volume states with the correct concentration
# this assumes that states at the zero volume indices is zero (although that
# assumption could be easily removed)
#matrix = _scipy_sparse_dok_matrix((len(_zero_volume_indices), len(states)))
"""
for i, row in enumerate(_zero_volume_indices):
d = _diffusion_matrix[row, row]
if d:
nzj = _diffusion_matrix[row].nonzero()[1]
print 'nzj:', nzj
for j in nzj:
matrix[i, j] = -_diffusion_matrix[row, j] / d
states[_zero_volume_indices] = matrix * states
"""
if len(_zero_volume_indices):
states[_zero_volume_indices] = _mat_for_zero_volume_nodes * states
"""
for i in _zero_volume_indices:
v = _diffusion_matrix[i] * states
d = _diffusion_matrix[i, i]
if d:
states[i] = -v / d
"""
# TODO: make this so that the section1d parts use cptrs (can't do this directly for 3D because sum, but could maybe move that into the C)
# the old way: _section1d_transfer_to_legacy()
# for sr in _species_get_all_species().values():
# s = sr()
# if s is not None: s._transfer_to_legacy()
if ydot is not None:
# diffusion_matrix = - jacobian
ydot[lo : hi] = (_rxd_reaction(states) - _diffusion_matrix * states)[_nonzero_volume_indices]
states[_zero_volume_indices] = 0
_rxd_induced_currents = None
_memb_cur_ptrs= []
def _setup_memb_currents():
global _memb_cur_ptrs
initializer._do_init()
# setup membrane fluxes from our stuff
# TODO: cache the memb_cur_ptrs, memb_cur_charges, memb_net_charges, memb_cur_mapped
# because won't change very often
# need this; think it's because of initialization of mod files
if _curr_indices is None: return
SPECIES_ABSENT = -1
# TODO: change so that this is only called when there are in fact currents
rxd_memb_scales = []
_memb_cur_ptrs = []
memb_cur_charges = []
memb_net_charges = []
memb_cur_mapped = []
memb_cur_mapped_ecs = []
for rptr in _all_reactions:
r = rptr()
if r and r._membrane_flux:
scales = r._memb_scales
rxd_memb_scales.extend(scales)
_memb_cur_ptrs += r._cur_ptrs
memb_cur_mapped += r._cur_mapped
memb_cur_mapped_ecs += r._cur_mapped_ecs
memb_cur_charges += [r._cur_charges] * len(scales)
memb_net_charges += [r._net_charges] * len(scales)
ecs_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped_ecs)))]
ics_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped)))]
if _memb_cur_ptrs:
cur_counts = [len(x) for x in memb_cur_mapped]
num_currents = numpy.array(cur_counts).sum()
setup_currents(len(_memb_cur_ptrs),
num_currents,
len(_curr_indices), # num_currents == len(_curr_indices) if no Extracellular
_list_to_cint_array(cur_counts),
_list_to_cint_array(memb_net_charges),
_list_to_cint_array(_curr_indices),
_list_to_cint_array(_cur_node_indices),
_list_to_cdouble_array(rxd_memb_scales),
_list_to_cint_array(list(itertools.chain.from_iterable(memb_cur_charges))),
_list_to_pyobject_array(list(itertools.chain.from_iterable(_memb_cur_ptrs))),
_list_to_cint_array(ics_map),
_list_to_cint_array(ecs_map))
def _currents(rhs):
return
if rxd_memb_flux:
# TODO: remove the asserts when this is verified to work
assert(len(rxd_memb_flux) == len(_cur_node_indices))
assert(len(rxd_memb_flux) == len(memb_cur_ptrs))
assert(len(rxd_memb_flux) == len(memb_cur_charges))
assert(len(rxd_memb_flux) == len(memb_net_charges))
for flux, cur_ptrs, cur_charges, net_charge, i, cur_maps in zip(rxd_memb_flux, memb_cur_ptrs, memb_cur_charges, memb_net_charges, _cur_node_indices, memb_cur_mapped):
rhs[i] -= net_charge * flux
#import sys
#sys.exit()
# TODO: remove this assert when more thoroughly tested
assert(len(cur_ptrs) == len(cur_maps))
for ptr, charge, cur_map_i in zip(cur_ptrs, cur_charges, cur_maps):
# this has the opposite sign of the above because positive
# currents lower the membrane potential
cur = charge * flux
ptr[0] += cur
for c in cur_map_i:
_rxd_induced_currents[c] += cur
#for sign, c in zip([-1, 1], cur_maps):
# if c is not None:
# _rxd_induced_currents[c] += sign * cur
_last_m = None
_last_preconditioner = None
_fixed_step_count = 0
def _rxd_reaction(states):
# TODO: this probably shouldn't be here
# TODO: this was included in the 3d, probably shouldn't be there either
# TODO: if its None and there is 3D... should we do anything special?
if _diffusion_matrix is None and not species._has_3d: _setup_matrices()
b = _numpy_zeros(len(states))
if _curr_ptr_vector is not None:
_curr_ptr_vector.gather(_curr_ptr_storage_nrn)
b[_curr_indices] = _curr_scales * (_curr_ptr_storage - _rxd_induced_currents)
b[_curr_indices] = _curr_scales * [ptr[0] for ptr in _curr_ptrs]
# TODO: store weak references to the r._evaluate in addition to r so no
# repeated lookups
#for rptr in _all_reactions:
# r = rptr()
# if r:
# indices, mult, rate = r._evaluate(states)
# we split this in parts to allow for multiplicities and to allow stochastic to make the same changes in different places
# for i, m in zip(indices, mult):
# b[i] += m * rate
node._apply_node_fluxes(b)
return b
_last_preconditioner_dt = 0
_last_dt = None
_last_m = None
_diffusion_d = None
_diffusion_a = None
_diffusion_b = None
_diffusion_p = None
_cur_node_indices = None
_diffusion_a_ptr, _diffusion_b_ptr, _diffusion_p_ptr = None, None, None
def _setup():
initializer._do_init()
# TODO: this is when I should resetup matrices (structure changed event)
global _last_dt, _external_solver_initialized
_last_dt = None
_external_solver_initialized = False
# Using C-code for reactions
options.use_reaction_contribution_to_jacobian = False
def _find_librxdmath():
import glob
base_path = os.path.join(h.neuronhome(), "..", "..", platform.machine(), "lib", "librxdmath")
success = False
for extension in ['', '.dll', '.so', '.dylib']:
dll = base_path + extension
try:
success = os.path.exists(dll)
except:
pass
if success: break
if not success:
if sys.platform.lower().startswith("win"):
dll = os.path.join(h.neuronhome(), 'bin', 'librxdmath.dll')
success = os.path.exists(dll)
if not success:
raise RxDException('unable to connect to the librxdmath library')
return dll
def _c_compile(formula):
filename = 'rxddll' + str(uuid.uuid1())
with open(filename + '.c', 'w') as f:
f.write(formula)
math_library = '-lm'
fpic = '-fPIC'
try:
gcc = os.environ["CC"]
except:
#when running on windows try and used the gcc included with NEURON
if sys.platform.lower().startswith("win"):
math_library = ''
fpic = ''
gcc = os.path.join(h.neuronhome(),"mingw","mingw64","bin","x86_64-w64-mingw32-gcc.exe")
if not os.path.isfile(gcc):
raise RxDException("unable to locate a C compiler. Please `set CC=<path to C compiler>`")
else:
gcc = "gcc"
#TODO: Check this works on non-Linux machines
gcc_cmd = "%s -I%s -I%s " % (gcc, sysconfig.get_python_inc(), os.path.join(h.neuronhome(), "..", "..", "include", "nrn"))
gcc_cmd += "-shared %s %s.c %s " % (fpic, filename, _find_librxdmath())
gcc_cmd += "-o %s.so %s" % (filename, math_library)
if sys.platform.lower().startswith("win"):
my_path = os.getenv('PATH')
os.putenv('PATH', my_path + ';' + os.path.join(h.neuronhome(),"mingw","mingw64","bin"))
os.system(gcc_cmd)
os.putenv('PATH', my_path)
else:
os.system(gcc_cmd)
#TODO: Find a better way of letting the system locate librxdmath.so.0
rxdmath_dll = ctypes.cdll[_find_librxdmath()]
dll = ctypes.cdll['./%s.so' % filename]
reaction = dll.reaction
reaction.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double)]
reaction.restype = ctypes.c_double
os.remove(filename + '.c')
if sys.platform.lower().startswith("win"):
#cannot remove dll that are in use
_windows_dll.append(weakref.ref(dll))
_windows_dll_files.append(filename + ".so")
else:
os.remove(filename + '.so')
return reaction
def _conductance(d):
pass
def _ode_jacobian(dt, t, ypred, fpred):
#print '_ode_jacobian: dt = %g, last_dt = %r' % (dt, _last_dt)
lo = _rxd_offset
hi = lo + len(_nonzero_volume_indices)
_reaction_matrix_setup(dt, ypred[lo : hi])
_curr_ptr_vector = None
_curr_ptr_storage = None
_curr_ptr_storage_nrn = None
pinverse = None
_cur_map = None
_h_ptrvector = h.PtrVector
_h_vector = h.Vector
_structure_change_count = nrn_dll_sym('structure_change_cnt', _ctypes_c_int)
_diam_change_count = nrn_dll_sym('diam_change_cnt', _ctypes_c_int)
def _donothing(): pass
def _update_node_data(force=False):
global last_diam_change_cnt, last_structure_change_cnt, _curr_indices, _curr_scales, _curr_ptrs, _cur_map
global _curr_ptr_vector, _curr_ptr_storage, _curr_ptr_storage_nrn
if last_diam_change_cnt != _diam_change_count.value or _structure_change_count.value != last_structure_change_cnt or force:
_cur_map = {}
last_diam_change_cnt = _diam_change_count.value
last_structure_change_cnt = _structure_change_count.value
#if not species._has_3d:
# TODO: merge this with the 3d/hybrid case?
nsegs_changed = 0
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: nsegs_changed += s._update_node_data()
if nsegs_changed:
section1d._purge_cptrs()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
s._update_region_indices(True)
s._register_cptrs()
if species._has_1d and species._1d_submatrix_n():
volumes = node._get_data()[0]
_zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)
setup_solver(_node_get_states(), len(_node_get_states()), _zero_volume_indices, len(_zero_volume_indices), h._ref_t, h._ref_dt)
# TODO: separate compiling reactions -- so the indices can be updated without recompiling
_compile_reactions()
#end#if
for rptr in _all_reactions:
r = rptr()
if r is not None: r._update_indices()
_curr_indices = []
_curr_scales = []
_curr_ptrs = []
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: s._setup_currents(_curr_indices, _curr_scales, _curr_ptrs, _cur_map)
num = len(_curr_ptrs)
if num:
_curr_ptr_vector = _h_ptrvector(num)
_curr_ptr_vector.ptr_update_callback(_donothing)
for i, ptr in enumerate(_curr_ptrs):
_curr_ptr_vector.pset(i, ptr)
_curr_ptr_storage_nrn = _h_vector(num)
_curr_ptr_storage = _curr_ptr_storage_nrn.as_numpy()
else:
_curr_ptr_vector = None
#_curr_scales = _numpy_array(_curr_scales)
def _matrix_to_rxd_sparse(m):
"""precondition: assumes m a numpy array"""
nonzero_i, nonzero_j = list(zip(*list(m.keys())))
nonzero_values = numpy.ascontiguousarray(list(m.values()), dtype=numpy.float64)
# number of rows
n = m.shape[1]
return n, len(nonzero_i), numpy.ascontiguousarray(nonzero_i, dtype=numpy.int_), numpy.ascontiguousarray(nonzero_j, dtype=numpy.int_), nonzero_values
_euler_matrix = None
# TODO: make sure this does the right thing when the diffusion constant changes between two neighboring nodes
def _setup_matrices():
global _curr_ptrs
global _cur_node_indices
global _zero_volume_indices
# TODO: this sometimes seems to get called twice. Figure out why and fix, if possible.
# if the shape has changed update the nodes
_update_node_data()
n = len(_node_get_states())
#TODO: Replace with ADI version
"""
if species._has_3d:
_euler_matrix = _scipy_sparse_dok_matrix((n, n), dtype=float)
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: s._setup_matrices3d(_euler_matrix)
_diffusion_matrix = -_euler_matrix
_euler_matrix = _euler_matrix.tocsr()
_update_node_data(True)
# NOTE: if we also have 1D, this will be replaced with the correct values below
_zero_volume_indices = []
_nonzero_volume_indices = list(range(len(_node_get_states())))
"""
if species._has_1d:
n = species._1d_submatrix_n()
# TODO: initialization is slow. track down why
_last_dt = None
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
s._assign_parents()
_update_node_data(True)
volumes = node._get_data()[0]
_zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)
_nonzero_volume_indices = volumes.nonzero()[0]
# remove old linearmodeladdition
_linmodadd_cur = None
if n:
# create sparse matrix for C in cy'+gy=b
c_diagonal = numpy.zeros(n,dtype=ctypes.c_double)
# most entries are 1 except those corresponding to the 0 and 1 ends
# create the matrix G
#if not species._has_3d:
# # if we have both, then put the 1D stuff into the matrix that already exists for 3D
_diffusion_matrix = [dict() for idx in range(n)]
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
s._setup_diffusion_matrix(_diffusion_matrix)
s._setup_c_matrix(c_diagonal)
#print '_diffusion_matrix.shape = %r, n = %r, species._has_3d = %r' % (_diffusion_matrix.shape, n, species._has_3d)
euler_matrix_i, euler_matrix_j, euler_matrix_nonzero = [], [], []
for i in range(n):
mat_i = _diffusion_matrix[i]
euler_matrix_i.extend(itertools.repeat(i,len(mat_i)))
euler_matrix_j.extend(mat_i.keys())
euler_matrix_nonzero.extend(mat_i.values())
euler_matrix_nnonzero = len(euler_matrix_nonzero)
assert(len(euler_matrix_i) == len(euler_matrix_j) == len(euler_matrix_nonzero))
# modify C for cases where no diffusive coupling of 0, 1 ends
# TODO: is there a better way to handle no diffusion?
#for i in range(n):
# if not _diffusion_matrix[i, i]:
# _linmodadd_c[i, i] = 1
# setup for induced membrane currents
_cur_node_indices = []
for rptr in _all_reactions:
r = rptr()
if r is not None:
r._setup_membrane_fluxes(_cur_node_indices, _cur_map)
#_cvode_object.re_init()
#if species._has_3d:
# _euler_matrix = -_diffusion_matrix
#TODO: Replace this this to handle 1d/3d hybrid models
"""
if species._has_1d and species._has_3d:
# TODO: add connections to matrix; for now: find them
hybrid_neighbors = collections.defaultdict(lambda: [])
hybrid_diams = {}
dxs = set()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
if s._nodes and s._secs:
# have both 1D and 3D, so find the neighbors
# for each of the 3D sections, find the parent sections
for r in s._regions:
dxs.add(r._dx)
for sec in r._secs3d:
parent_seg = sec.trueparentseg()
parent_sec = None if not parent_seg else parent_seg.sec
# are any of these a match with a 1d section?
if s._has_region_section(r, parent_sec):
# this section has a 1d section that is a parent
index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), parent_sec, h.parent_connection(sec=sec))
hybrid_neighbors[index1d] += indices3d
hybrid_diams[index1d] = parent_seg.diam
else:
for sec1d in r._secs1d:
parent_1d_seg = sec1d.trueparentseg()
parent_1d = None if not parent_seg else parent_seg.sec
if parent_1d == sec:
# it is the parent of a 1d section
index1d, indices3d = _get_node_indices(s, r, sec, h.parent_connection(sec=sec1d), sec1d, sec1d.orientation())
hybrid_neighbors[index1d] += indices3d
hybrid_diams[index1d] = parent_1d_seg.diam
break
elif parent_1d == parent_sec:
# it connects to the parent of a 1d section
index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), sec1d, sec1d.orientation())
hybrid_neighbors[index1d] += indices3d
hybrid_diams[index1d] = parent_1d_seg.diam
break
if len(dxs) > 1:
raise RxDException('currently require a unique value for dx')
dx = dxs.pop()
diffs = node._diffs
n = len(_node_get_states())
# TODO: validate that we're doing the right thing at boundaries
for index1d in list(hybrid_neighbors.keys()):
neighbors3d = set(hybrid_neighbors[index1d])
# NOTE: splitting the connection area equally across all the connecting nodes
area = (numpy.pi * 0.25 * hybrid_diams[index1d] ** 2) / len(neighbors3d)
for i in neighbors3d:
d = diffs[i]
vol = node._volumes[i]
rate = d * area / (vol * dx / 2.)
# make the connections on the 3d side
_euler_matrix[i, i] -= rate
_euler_matrix[i, index1d] += rate
# make the connections on the 1d side (scale by vol because conserving mass not volume)
_euler_matrix[index1d, index1d] -= rate * vol
_euler_matrix[index1d, i] += rate * vol
#print 'index1d row sum:', sum(_euler_matrix[index1d, j] for j in xrange(n))
#print 'index1d col sum:', sum(_euler_matrix[j, index1d] for j in xrange(n))
"""
#CRxD
if n and euler_matrix_nnonzero > 0:
_update_node_data()
section1d._transfer_to_legacy()
set_euler_matrix(n, euler_matrix_nnonzero,
_list_to_clong_array(euler_matrix_i),
_list_to_clong_array(euler_matrix_j),
_list_to_cdouble_array(euler_matrix_nonzero),
_zero_volume_indices,
len(_zero_volume_indices),
c_diagonal)
else:
rxd_set_no_diffusion()
setup_solver(_node_get_states(), len(_node_get_states()), _zero_volume_indices, len(_zero_volume_indices), h._ref_t, h._ref_dt)
if _curr_indices is not None and len(_curr_indices) > 0:
rxd_setup_curr_ptrs(len(_curr_indices), _list_to_cint_array(_curr_indices),
numpy.concatenate(_curr_scales), _list_to_pyobject_array(_curr_ptrs))
if section1d._all_cindices is not None and len(section1d._all_cindices) > 0:
rxd_setup_conc_ptrs(len(section1d._all_cindices),
_list_to_cint_array(section1d._all_cindices),
_list_to_pyobject_array(section1d._all_cptrs))
# we do this last because of performance issues with changing sparsity of csr matrices
"""
if _diffusion_matrix is not None:
_diffusion_matrix = _diffusion_matrix.tocsr()
if _euler_matrix is not None:
_euler_matrix = _euler_matrix.tocsr()
if species._has_1d:
if species._has_3d:
_diffusion_matrix = -_euler_matrix
n = species._1d_submatrix_n()
if n:
matrix = _diffusion_matrix[_zero_volume_indices].tocsr()
indptr = matrix.indptr
matrixdata = matrix.data
count = len(_zero_volume_indices)
for row, i in enumerate(_zero_volume_indices):
d = _diffusion_matrix[i, i]
if d:
matrixdata[indptr[row] : indptr[row + 1]] /= -d
matrix[row, i] = 0
else:
matrixdata[indptr[row] : indptr[row + 1]] = 0
global _mat_for_zero_volume_nodes
_mat_for_zero_volume_nodes = matrix
# TODO: _mat_for_zero_volume_nodes is used for CVode.
# Figure out if/how it has to be changed for hybrid 1D/3D sims (probably just augment with identity? or change how its used to avoid multiplying by I)
"""
"""
if pt1 in indices:
ileft = indices[pt1]
dleft = (d + diffs[ileft]) * 0.5
left = dleft * areal / (vol * dx)
euler_matrix[index, ileft] += left
euler_matrix[index, index] -= left
if pt2 in indices:
iright = indices[pt2]
dright = (d + diffs[iright]) * 0.5
right = dright * arear / (vol * dx)
euler_matrix[index, iright] += right
euler_matrix[index, index] -= right
"""
def _get_node_indices(species, region, sec3d, x3d, sec1d, x1d):
# TODO: remove need for this assumption
assert(x1d in (0, 1))
disc_indices = region._indices_from_sec_x(sec3d, x3d)
#print '%r(%g) connects to the 1d section %r(%g)' % (sec3d, x3d, sec1d, x1d)
#print 'disc indices: %r' % disc_indices
indices3d = []
for node in species._nodes:
if node._r == region:
for i, j, k in disc_indices:
if node._i == i and node._j == j and node._k == k:
indices3d.append(node._index)
#print 'found node %d with coordinates (%g, %g, %g)' % (node._index, node.x3d, node.y3d, node.z3d)
# discard duplicates...
# TODO: really, need to figure out all the 3d nodes connecting to a given 1d endpoint, then unique that
indices3d = list(set(indices3d))
#print '3d matrix indices: %r' % indices3d
# TODO: remove the need for this assertion
if x1d == sec1d.orientation():
# TODO: make this whole thing more efficient
# the parent node is the nonzero index on the first row before the diagonal
first_row = min([node._index for node in species.nodes(region)(sec1d)])
for j in range(first_row):
if _euler_matrix[first_row, j] != 0:
index_1d = j
break
else:
raise RxDException('should never get here; could not find parent')
elif x1d == 1 - sec1d.orientation():
# the ending zero-volume node is the one after the last node
# TODO: make this more efficient
index_1d = max([node._index for node in species.nodes(region)(sec1d)]) + 1
else:
raise RxDException('should never get here; _get_node_indices apparently only partly converted to allow connecting to 1d in middle')
#print '1d index is %d' % index_1d
return index_1d, indices3d
def _compile_reactions():
#clear all previous reactions (intracellular & extracellular) and the
#supporting indexes
#_windows_remove_dlls()
clear_rates()
regions_inv = dict() #regions -> reactions that occur there
species_by_region = dict()
all_species_involed = set()
location_count = 0
ecs_regions_inv = dict()
ecs_species_by_region = dict()
ecs_all_species_involed = set()
ecs_mc_species_involved = set()
from . import rate, multiCompartmentReaction
#Find sets of sections that contain the same regions
from .region import _c_region
matched_regions = [] # the different combinations of regions that arise in different sections
for nrnsec in list(section1d._rxd_sec_lookup.keys()):
set_of_regions = set() # a set of the regions that occur in a given section
for sec in section1d._rxd_sec_lookup[nrnsec]:
if sec(): set_of_regions.add(sec()._region)
if set_of_regions not in matched_regions:
matched_regions.append(set_of_regions)
region._c_region_lookup = dict()
#create a c_region instance for each of the unique sets of regions
c_region_list = []
for sets in matched_regions:
c_region_list.append(_c_region(sets))
for rptr in _all_reactions:
r = rptr()
if not r:
continue
#Find all the species involved
if isinstance(r,rate.Rate):
if not r._species():
continue
sptrs = set(list(r._involved_species) + [r._species])
else:
sptrs = set(list(r._involved_species) + r._dests + r._sources)
#Find all the regions involved
if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
if not hasattr(r._mult, 'flatten'):
r._update_indices()
react_regions = [s()._extracellular()._region for s in r._sources + r._dests if isinstance(s(),species.SpeciesOnExtracellular)] + [s()._region() for s in r._sources + r._dests if not isinstance(s(),species.SpeciesOnExtracellular)]
react_regions += [sptr()._region() for sptr in sptrs if isinstance(sptr(),species.SpeciesOnRegion)]
#if regions are specified - use those
elif hasattr(r,'_active_regions'):
react_regions = r._active_regions
#Otherwise use all the regions where the species are
else:
react_regions = set()
nsp = 0
for sp in sptrs:
s = sp()
nsp += 1
if isinstance(s,species.SpeciesOnRegion):
react_regions.add(s._region())
elif isinstance(s,species.SpeciesOnExtracellular):
react_regions.add(s._extracellular()._region)
elif isinstance(s,species._ExtracellularSpecies):
react_regions.add(s._region)
elif None not in s._regions:
[react_regions.add(reg) for reg in s._regions + s._extracellular_regions]
react_regions = list(react_regions)
#Only regions where ALL the species are present -- unless it is a membrane
#from collections import Counter
#from . import geometry as geo
#react_regions = [reg for reg, count in Counter(react_regions).iteritems() if count == nsp or isinstance(reg.geometry,geo.ScalableBorder)]
#Any intracellular regions
if not all([isinstance(x, region.Extracellular) for x in react_regions]):
species_involved = []
for sp in sptrs:
s = sp()
if not isinstance(s, species.SpeciesOnExtracellular):
all_species_involed.add(s)
species_involved.append(s)
for reg in react_regions:
if isinstance(reg, region.Extracellular):
continue
if reg in regions_inv:
regions_inv[reg].append(rptr)
else:
regions_inv[reg] = [rptr]
if reg in species_by_region:
species_by_region[reg] = species_by_region[reg].union(species_involved)
else:
species_by_region[reg] = set(species_involved)
for sec in reg._secs:
location_count += sec.nseg
#Any extracellular regions
if any([isinstance(x, region.Extracellular) for x in react_regions]):
#MultiCompartment - so can have both extracellular and intracellular regions
if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
for sp in sptrs:
s = sp()
if isinstance(s,species._ExtracellularSpecies):
ecs_mc_species_involved.add(s)
elif isinstance(s,species.SpeciesOnExtracellular):
ecs_mc_species_involved.add(s._extracellular())
for reg in react_regions:
if reg in list(ecs_species_by_region.keys()):
ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_mc_species_involved)
else:
ecs_species_by_region[reg] = set(ecs_mc_species_involved)
#Otherwise - reaction can only have extracellular regions
else:
ecs_species_involved = []
for sp in sptrs:
s = sp()
ecs_all_species_involed.add(s)
ecs_species_involved.append(s)
if any([isinstance(x, region.Region) for x in react_regions]):
raise RxDException("Error: an %s cannot have both Extracellular and Intracellular regions. Use a MultiCompartmentReaction or specify the desired region with the 'region=' keyword argument", rptr().__class__)
for reg in react_regions:
if not isinstance(reg, region.Extracellular):
continue
if reg in ecs_regions_inv:
ecs_regions_inv[reg].append(rptr)
else:
ecs_regions_inv[reg] = [rptr]
if reg in ecs_species_by_region:
ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_species_involved)
else:
ecs_species_by_region[reg] = set(ecs_species_involved)
#Create lists of indexes for intracellular reactions and rates
nseg_by_region = [] # a list of the number of segments for each region
# a table for location,species -> state index
location_index = []
for reg in regions_inv:
rptr = weakref.ref(reg)
for c_region in region._c_region_lookup[rptr]:
for react in regions_inv[reg]:
c_region.add_reaction(react,rptr)
c_region.add_species(species_by_region[reg])
if reg in ecs_species_by_region:
c_region.add_ecs_species(ecs_species_by_region[reg])
# now setup the reactions
setup_solver(_node_get_states(), len(_node_get_states()), _zero_volume_indices, len(_zero_volume_indices), h._ref_t, h._ref_dt)
#if there are no reactions
if location_count == 0 and len(ecs_regions_inv) == 0:
return None
#Setup intracellular and multicompartment reactions
if location_count > 0:
from . import rate, multiCompartmentReaction
for creg in c_region_list:
creg._initalize()
mc_mult_count = 0
mc_mult_list = []
species_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)
flux_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)
ecs_species_ids_used = numpy.zeros((creg.num_ecs_species,creg.num_regions),bool)
fxn_string = _c_headers
fxn_string += 'void reaction(double** species, double** rhs, double* mult, double** species_ecs, double** rhs_ecs, double** flux)\n{'
# declare the "rate" variable if any reactions (non-rates)
for rprt in list(creg._react_regions.keys()):
if not isinstance(rprt(),rate.Rate):
fxn_string += '\n\tdouble rate;'
break
for rptr in list(creg._react_regions.keys()):
r = rptr()
if isinstance(r,rate.Rate):
s = r._species()
species_id = creg._species_ids.get(s._id)
if isinstance(s,species.SpeciesOnRegion):
region_ids = [creg._region_ids.get(s._region()._id)]
else:
region_ids = creg._react_regions[rptr]
for region_id in region_ids:
rate_str = re.sub(r'species\[(\d+)\]\[(\d+)\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)
rate_str = re.sub(r'species\[(\d+)\]\[\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)
operator = '+=' if species_ids_used[species_id][region_id] else '='
fxn_string += "\n\trhs[%d][%d] %s %s;" % (species_id, region_id, operator, rate_str)
species_ids_used[species_id][region_id] = True
elif isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
#Lookup the region_id for the reaction
for sptr in r._sources + r._dests:
if isinstance(sptr(),species.SpeciesOnExtracellular):
continue
region_id = creg._region_ids.get(sptr()._region()._id)
rate_str = re.sub(r'species\[(\d+)\]\[(\d+)\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)
rate_str = re.sub(r'species\[(\d+)\]\[\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)
rate_str = re.sub(r'species_ecs\[(\d+)\]',lambda m: "species_ecs[%i][%i]" % (int(m.groups()[0]), region_id), rate_str)
fxn_string += "\n\trate = %s;" % rate_str
for sptr in r._sources + r._dests:
s = sptr()
if isinstance(s,species.SpeciesOnExtracellular):
species_id = s._extracellular()._grid_id
operator = '+=' if ecs_species_ids_used[species_id][region_id] else '='
fxn_string += "\n\trhs_ecs[%d][%d] %s mult[%d] * rate;" % (species_id, region_id, operator, mc_mult_count)
ecs_species_ids_used[species_id][region_id] = True
else:
species_id = creg._species_ids.get(s._id)
region_id = creg._region_ids.get(sptr()._region()._id)
operator = '+=' if species_ids_used[species_id][region_id] else '='
fxn_string += "\n\trhs[%d][%d] %s mult[%d] * rate;" % (species_id, region_id, operator, mc_mult_count)
species_ids_used[species_id][region_id] = True
if r._membrane_flux:
operator = '+=' if flux_ids_used[species_id][region_id] else '='
fxn_string += "\n\tif(flux) flux[%d][%d] %s rate;" % (species_id, region_id, operator)
flux_ids_used[species_id][region_id] = True
#TODO: Fix problem if the whole region isn't part of the same aggregate c_region
mc_mult_count += 1
mc_mult_list.extend(r._mult.flatten())
else:
for region_id in creg._react_regions[rptr]:
rate_str = re.sub(r'species\[(\d+)\]\[(\d+)\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)
rate_str = re.sub(r'species\[(\d+)\]\[\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)
fxn_string += "\n\trate = %s;" % rate_str
summed_mults = collections.defaultdict(lambda: 0)
for (mult, sp) in zip(r._mult, r._sources + r._dests):
summed_mults[creg._species_ids.get(sp()._id)] += mult
for idx in sorted(summed_mults.keys()):
operator = '+=' if species_ids_used[idx][region_id] else '='
species_ids_used[idx][region_id] = True
fxn_string += "\n\trhs[%d][%d] %s (%g) * rate;" % (idx, region_id, operator, summed_mults[idx])
fxn_string += "\n}\n"
register_rate(creg.num_species, creg.num_regions, creg.num_segments, creg.get_state_index(),
creg.num_ecs_species, creg.get_ecs_species_ids(), creg.get_ecs_index(),
mc_mult_count, numpy.array(mc_mult_list, dtype=ctypes.c_double),
_c_compile(fxn_string))
#Setup extracellular reactions
if len(ecs_regions_inv) > 0:
grid_ids = []
all_gids = set()
fxn_string = _c_headers
#TODO: find the nrn include path in python
#It is necessary for a couple of function in python that are not in math.h
fxn_string += 'void reaction(double* species_ecs, double* rhs)\n{'
# declare the "rate" variable if any reactions (non-rates)
for rptr in [r for rlist in list(ecs_regions_inv.values()) for r in rlist]:
if not isinstance(rptr(),rate.Rate):
fxn_string += '\n\tdouble rate;'
break
#get a list of all grid_ids invovled
for rptr in [r for rlist in list(ecs_regions_inv.values()) for r in rlist]:
if isinstance(rptr(),rate.Rate):
for sp in [rptr()._species] + rptr()._involved_species_ecs:
s = sp()[reg]._extracellular() if isinstance(sp(), species.Species) else sp()
all_gids.add(sp()._extracellular()._grid_id if isinstance(s, species.SpeciesOnExtracellular) else s._grid_id)
else:
for sp in rptr()._sources + rptr()._dests + rptr()._involved_species_ecs:
s = sp()[reg]._extracellular() if isinstance(sp(), species.Species) else sp()
all_gids.add(sp()._extracellular()._grid_id if isinstance(s, species.SpeciesOnExtracellular) else s._grid_id)
all_gids = list(all_gids)
for reg in ecs_regions_inv:
for rptr in ecs_regions_inv[reg]:
r = rptr()
rate_str = re.sub(r'species_ecs\[(\d+)\]',lambda m: "species_ecs[%i]" % [pid for pid,gid in enumerate(all_gids) if gid == int(m.groups()[0])][0], r._rate_ecs)
if isinstance(r,rate.Rate):
s = r._species()
#Get underlying rxd._ExtracellularSpecies for the grid_id
if isinstance(s, species.Species):
s = s[reg]._extracellular()
elif isinstance(s, species.SpeciesOnExtracellular):
s = s._extracellular()
if s._grid_id in grid_ids:
operator = '+='
else:
operator = '='
grid_ids.append(s._grid_id)
pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]
fxn_string += "\n\trhs[%d] %s %s;" % (pid, operator, rate_str)
else:
idx=0
fxn_string += "\n\trate = %s;" % rate_str
for sp in r._sources + r._dests:
s = sp()
#Get underlying rxd._ExtracellularSpecies for the grid_id
if isinstance(s, species.Species):
s = s[reg]._extracellular()
elif isinstance(s, species.SpeciesOnExtracellular):
s = s._extracellular()
if s._grid_id in grid_ids:
operator = '+='
else:
operator = '='
grid_ids.append(s._grid_id)
pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]
fxn_string += "\n\trhs[%d] %s (%s)*rate;" % (pid, operator, r._mult[idx])
idx += 1
fxn_string += "\n}\n"
ecs_register_reaction(0, len(all_gids), _list_to_cint_array(all_gids), _c_compile(fxn_string))
def _init():
if len(species._all_species) == 0:
return None
initializer._do_init()
# TODO: check about the 0<x<1 problem alluded to in the documentation
h.define_shape()
# if the shape has changed update the nodes
_update_node_data()
if species._has_1d:
section1d._purge_cptrs()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
# TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)
s._register_cptrs()
s._finitialize()
_setup_matrices()
_compile_reactions()
_setup_memb_currents()
def _init_concentration():
if len(species._all_species) == 0:
return None
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
# TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)
s._finitialize()
_has_nbs_registered = False
_nbs = None
do_setup_matrices_fptr = None
def _do_nbs_register():
global _has_nbs_registered, _nbs, _fih, _fih2, _fih3, do_setup_matrices_fptr
if not _has_nbs_registered:
#from neuron import nonvint_block_supervisor as _nbs
_has_nbs_registered = True
#_nbs.register(_callbacks) not used by crxd
#
# register the initialization handler and the ion register handler
#
_fih = h.FInitializeHandler(_init_concentration)
_fih3 = h.FInitializeHandler(3, _init)
set_setup_matrices = nrn_dll_sym('set_setup_matrices')
set_setup_matrices.argtypes = [fptr_prototype]
do_setup_matrices_fptr = fptr_prototype(_setup_matrices)
set_setup_matrices(do_setup_matrices_fptr)
_fih2 = h.FInitializeHandler(3, initializer._do_ion_register)
#
# register scatter/gather mechanisms
#
_cvode_object.extra_scatter_gather(0, _after_advance)
# register the Python callbacks
do_setup_fptr = fptr_prototype(_setup)
do_initialize_fptr = fptr_prototype(_init)
set_setup(do_setup_fptr)
set_initialize(do_initialize_fptr)
def _windows_remove_dlls():
global _windows_dll_files, _windows_dll
for (dll_ptr,filepath) in zip(_windows_dll,_windows_dll_files):
dll = dll_ptr()
if dll:
handle = dll._handle
del dll
ctypes.windll.kernel32.FreeLibrary(handle)
os.remove(filepath)
_windows_dll_files = []
_windows_dll = []
def nthread(n=None):
if(n):
_set_num_threads(n)
return _get_num_threads()
| []
| []
| [
"CC",
"PATH"
]
| [] | ["CC", "PATH"] | python | 2 | 0 | |
main.go | package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"sync"
"time"
)
type reset func()
type astronaut struct {
Id int64 `json:"id"`
Color string `json:"color"`
Power string `json:"power"`
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func main() {
maxCount := 15
mutex := &sync.Mutex{}
astronauts := []astronaut{}
colorPattern := regexp.MustCompile(`^#[A-Fa-f0-9]{6}$`)
powerPattern := regexp.MustCompile(`^(\x{1F4A5}|\x{1F496}|\x{1F4A7}|\x{1F525}|\x{2B50}|\x{1F48E})$`)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
var count int
var err error
var values url.Values
values, err = url.ParseQuery(r.URL.RawQuery)
if err == nil && values.Get("count") != "" {
count, err = strconv.Atoi(values.Get("count"));
}
if count <= 0 || err != nil {
count = maxCount;
}
astronautsJson, err := json.Marshal(astronauts[0:min(len(astronauts), count)]);
if err != nil {
log.Fatal(err)
}
fmt.Fprintf(w, string(astronautsJson))
case "POST":
decoder := json.NewDecoder(r.Body)
var astro astronaut
err := decoder.Decode(&astro)
if err != nil {
log.Print(err)
http.Error(w, "Error", 500)
}
match := colorPattern.MatchString(astro.Color) && powerPattern.MatchString(astro.Power)
if match == false {
log.Printf("Bad Astronaut %+v", astro)
http.Error(w, "Bad Astronaut", 400)
} else {
mutex.Lock()
astro.Id = time.Now().UnixNano()
astronauts = append([]astronaut{astro}, astronauts[0:min(len(astronauts), maxCount - 1)]...)
mutex.Unlock()
}
}
})
port := os.Getenv("PORT")
if port == "" {
port = "80"
}
fmt.Printf("Astronauts launching on :%s 🚀\n", port)
err := http.ListenAndServe(fmt.Sprintf(":%s", port), nil)
if err != nil {
panic(err)
}
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
thorpy/__init__.py | __version__ = "1.5.2a"
import sys
import os
# verify that pygame is on the machine
try:
import pygame
except Exception:
print("Pygame doesn't seem to be installed on this machine.")
# add thorpy folder to Windows and Python search paths
THORPY_PATH = os.path.abspath(os.path.dirname(__file__))
##THORPY_PATH = "./" #for py2exe
try:
os.environ['PATH'] = ';'.join((THORPY_PATH, os.environ['PATH']))
sys.path.append(THORPY_PATH)
except Exception:
print("Couldn't add Thor to sys.path...\nThorPy path : " + THORPY_PATH)
USEREVENT = pygame.USEREVENT + 1 #horpy takes one event on pygame's userevents
THORPY_EVENT = pygame.USEREVENT
#import subpackages
import thorpy.elements
import thorpy.menus
import thorpy._utils
import thorpy.miscgui
import thorpy.painting as painting
import thorpy.miscgui.application as application
import thorpy.miscgui.storage as storage
import thorpy.painting.graphics as graphics
##import testmodule
from thorpy._utils.images import load_image, change_color_on_img, change_color_on_img_ip
from thorpy.elements.launchers.browserlauncher import BrowserLauncher
from thorpy.elements.launchers.dropdownlistlauncher import DropDownListLauncher
from thorpy.elements.launchers.paramsetterlauncher import ParamSetterLauncher
from thorpy.elements.launchers.colorsetterlauncher import ColorSetterLauncher
from thorpy.elements.paramsetter import ParamSetter
from thorpy.elements.background import Background
from thorpy.elements.image import Image
from thorpy.elements.box import Box, BarBox
from thorpy.elements.browserlight import BrowserLight
from thorpy.elements.browser import Browser
from thorpy.elements.checker import Checker
from thorpy.elements.clickable import Clickable
from thorpy.elements._wrappers import make_button, make_text, make_alert, make_image_button
from thorpy.elements._wrappers import launch_alert, launch_blocking_alert
from thorpy.elements._wrappers import launch_choices, launch_blocking_choices
from thorpy.elements._wrappers import make_alert
from thorpy.elements._wrappers import make_stored_ghost as make_group
from thorpy.elements._wrappers import make_font_setter, make_fontsize_setter
from thorpy.elements._wrappers import make_font_options_setter, make_display_options_setter, make_global_display_options
from thorpy.elements.colorsetter import ColorSetter
from thorpy.elements.ddlf import DropDownListFast as DropDownList
from thorpy.elements.draggable import Draggable, ClickDraggable
from thorpy.elements.element import Element
from thorpy.elements.ghost import Ghost
from thorpy.elements.hoverable import Hoverable
from thorpy.elements.hoverzone import HoverZone
from thorpy.elements.inserter import Inserter
from thorpy.elements.keypressable import KeyPressable
from thorpy.elements.keytogglable import KeyTogglable
from thorpy.elements.paramsetter import ParamSetter
from thorpy.elements.pressable import Pressable
##from thorpy.elements.text import MultilineText
from thorpy.elements.text import OneLineText, MultilineText
from thorpy.elements.slidersetter import SliderXSetter as SliderX
from thorpy.elements.togglable import Togglable
from thorpy.elements.line import Line
from thorpy.elements._makeuputils._halo import Halo
from thorpy.elements._makeuputils._shadow import StaticShadow
from thorpy.elements._makeuputils._shadow import DynamicShadow
# menus:
from thorpy.menus.tickedmenu import TickedMenu as Menu
from thorpy.menus.basicmenu import BasicMenu
# miscellaneous stuff, constants, parameters
from thorpy.miscgui.application import Application
from thorpy.miscgui.reaction import Reaction, ConstantReaction
from thorpy.miscgui import constants, functions
from thorpy.miscgui.functions import get_screen
from thorpy.miscgui.functions import get_current_application as get_application
from thorpy.miscgui.functions import get_current_menu
from thorpy.miscgui import style
from thorpy.miscgui import painterstyle
from thorpy.miscgui import parameters
from thorpy.miscgui.initializer import Initializer
from thorpy.miscgui.state import State
from thorpy.miscgui.storage import Storer, store
from thorpy.miscgui.title import Title
from thorpy.miscgui.varset import VarSet
from thorpy.miscgui import theme
from thorpy.miscgui.theme import set_theme as set_theme
from thorpy.miscgui.metadata import MetaDataManager
from thorpy.miscgui.pools import TogglablePool, RadioPool
from thorpy.miscgui.launchers.launcher import set_launcher, make_launcher, get_launcher, Launcher, make_ok_cancel_box, launch, make_ok_box
from thorpy.painting.writer import Writer
from thorpy.painting import painters
from thorpy.painting import makeup
##from thorpy.painting.painters.imageframe import ButtonImage
from thorpy.painting import mousecursor
from thorpy.gamestools import basegrid
from thorpy.gamestools.basegrid import BaseGrid
from thorpy.gamestools.grid import Grid, PygameGrid
del thorpy, pygame, os, sys
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
testutils/file/collection/data.go | // Copyright 2020 The Sodafoundation Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
This package includes a collection of fake stuffs for testing work.
*/
package collection
import (
backendModel "github.com/opensds/multi-cloud/backend/pkg/model"
fileModel "github.com/opensds/multi-cloud/file/pkg/model"
)
var (
size = int64(2000)
sizeptr = &size
isEncrypted = false
SampleGetFileShares = []fileModel.FileShare{
{
Id: "3769855c-a102-11e7-b772-17b880d2f537",
CreatedAt: "CreatedAt",
UpdatedAt: "UpdatedAt",
Name: "sample-fileshare-01",
Description: "This is first sample fileshare for testing",
UserId: "Sample-UserID",
Backend: "Sample-Backend",
BackendId: "Sample-BackendId",
Size: sizeptr,
Type: "Sample-Type",
TenantId: "Sample-TenantId",
Status: "available",
Region: "asia",
AvailabilityZone: "default",
Protocols: []string{"iscsi"},
SnapshotId: "snapshotid",
Encrypted: &isEncrypted,
EncryptionSettings: map[string]string{"foo": "bar"},
},
}
SampleFileShare1 = fileModel.FileShare{
Id: "3769855c-a102-11e7-b772-17b880d2f539",
CreatedAt: "CreatedAt",
UpdatedAt: "UpdatedAt",
Name: "sample-fileshare-01",
Description: "This is first sample fileshare for testing",
UserId: "Sample-UserID",
Backend: "Sample-Backend",
BackendId: "Sample-BackendId",
Size: sizeptr,
Type: "Sample-Type",
TenantId: "Sample-TenantId",
Status: "available",
Region: "asia",
AvailabilityZone: "default",
Protocols: []string{"iscsi"},
SnapshotId: "snapshotid",
Encrypted: &isEncrypted,
EncryptionSettings: map[string]string{"foo": "bar"},
}
SampleFileShare2 = fileModel.FileShare{
Id: "3769855c-a102-11e7-b772-17b880d2f530",
CreatedAt: "CreatedAt",
UpdatedAt: "UpdatedAt",
Name: "sample-fileshare-01",
Description: "This is first sample fileshare for testing",
UserId: "Sample-UserID",
Backend: "Sample-Backend",
BackendId: "Sample-BackendId",
Size: sizeptr,
Type: "Sample-Type",
TenantId: "Sample-TenantId",
Status: "available",
Region: "asia",
AvailabilityZone: "default",
Protocols: []string{"iscsi"},
SnapshotId: "snapshotid",
Encrypted: &isEncrypted,
EncryptionSettings: map[string]string{"foo": "bar"},
}
SampleListFileShares = []*fileModel.FileShare{
&SampleFileShare1,
&SampleFileShare2,
}
)
var (
SampleBackendDetails = []backendModel.Backend{
{
Id: "4769855c-a102-11e7-b772-17b880d2f530",
TenantId: "sample-backend-tenantID",
UserId: "sample-backend-userID",
Name: "sample-backend-name",
Type: "sample-backend-type",
Region: "sample-backend-region",
Endpoint: "sample-backend-endpoint",
BucketName: "sample-backend-bucketname",
Access: "sample-backend-access",
Security: "sample-backend-security",
},
}
)
| []
| []
| []
| [] | [] | go | null | null | null |
tests/test_decorators.py | import os
import unittest
from typing import Optional
from django.http import HttpResponse
from django.test import RequestFactory
from request_limiter import request_limiter, LimitedIntervalStrategy, \
LimitStrategy, LimitException, django_request_limiter
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_settings')
req_factory = RequestFactory()
class MockStrategy(LimitStrategy):
def __init__(self, allow: bool):
self._allow = allow
def allow(self, key: Optional[str] = None) -> bool:
return self._allow
def get_remaining(self, key: Optional[str] = None) -> float:
return 1
def clean(self):
pass
class TestRequestLimiterDecorator(unittest.TestCase):
def test_when_strategy_not_given_uses_limited_interval_strategy(self):
limiter = request_limiter()
self.assertTrue(isinstance(limiter.strategy, LimitedIntervalStrategy))
def test_when_strategy_allows_invokes_function(self):
@request_limiter(strategy=MockStrategy(allow=True))
def test_func() -> bool:
return True
self.assertTrue(test_func())
def test_when_strategy_denies_raises_exception(self):
@request_limiter(strategy=MockStrategy(allow=False))
def test_func() -> bool:
return True
self.assertRaises(LimitException, test_func)
class TestDjangoRequestLimiter(unittest.TestCase):
def test_limits_based_on_ip(self):
@django_request_limiter
@request_limiter(strategy=LimitedIntervalStrategy(requests=1))
def test_view(request):
return True
res1 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.1'))
assert res1, 'Expected first request to work'
res2 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.1'))
assert isinstance(res2, HttpResponse), 'Expected limit http response'
assert res2.status_code == 429, 'Expected 429 response code'
# change Ip
res3 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.2'))
assert res3, 'Expected different ip request to work'
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/validate-krew-manifest/main.go | // Copyright 2019 The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// validate-krew-manifest makes sure a manifest file is valid.
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/golang/glog"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/krew/pkg/constants"
"sigs.k8s.io/krew/pkg/index"
"sigs.k8s.io/krew/pkg/index/indexscanner"
)
var flManifest string
func init() {
flag.StringVar(&flManifest, "manifest", "", "path to plugin manifest file")
flag.Set("logtostderr", "true") // Set glog default to stderr
// TODO(ahmetb) iterate over glog flags and hide them (not sure if possible without using pflag)
flag.Parse()
}
func main() {
defer glog.Flush()
if flManifest == "" {
glog.Fatal("-manifest must be specified")
}
if err := validateManifestFile(flManifest); err != nil {
glog.Fatalf("%v", err) // with stack trace
}
}
func validateManifestFile(path string) error {
glog.V(4).Infof("reading file %s", path)
p, err := indexscanner.ReadPluginFile(path)
if err != nil {
return errors.Wrap(err, "failed to read plugin file")
}
filename := filepath.Base(path)
manifestExtension := filepath.Ext(filename)
if manifestExtension != constants.ManifestExtension {
return fmt.Errorf("expected manifest extension %q but found %q", constants.ManifestExtension, manifestExtension)
}
pluginNameFromFileName := strings.TrimSuffix(filename, manifestExtension)
glog.V(4).Infof("inferred plugin name as %s", pluginNameFromFileName)
// validate plugin manifest
if err := p.Validate(pluginNameFromFileName); err != nil {
return errors.Wrap(err, "plugin validation error")
}
glog.Infof("structural validation OK")
// make sure each platform matches a supported platform
for i, p := range p.Spec.Platforms {
if os, arch := findAnyMatchingPlatform(p.Selector); os == "" || arch == "" {
return errors.Errorf("spec.platform[%d]'s selector (%v) doesn't match any supported platforms", i, p.Selector)
}
}
glog.Infof("all spec.platform[] items are used")
// validate no supported <os,arch> is matching multiple platform specs
if err := isOverlappingPlatformSelectors(p.Spec.Platforms); err != nil {
return errors.Wrap(err, "overlapping platform selectors found")
}
glog.Infof("no overlapping spec.platform[].selector")
// exercise "install" for all platforms
for i, p := range p.Spec.Platforms { // TODO(ahmetb) make this a testable method
glog.Infof("installing spec.platform[%d]", i)
if err := installPlatformSpec(path, p); err != nil {
return errors.Wrapf(err, "spec.platforms[%d] failed to install", i)
}
glog.Infof("installed spec.platforms[%d]", i)
}
log.Printf("all %d spec.platforms installed fine", len(p.Spec.Platforms))
return nil
}
// isOverlappingPlatformSelectors validates if multiple platforms have selectors
// that match to a supported <os,arch> pair.
func isOverlappingPlatformSelectors(platforms []index.Platform) error {
// TODO(ahmetb) implement
for _, v := range allPlatforms() {
os, arch := v[0], v[1]
var matchIndex []int
for i, p := range platforms {
if selectorMatchesOSArch(p.Selector, os, arch) {
matchIndex = append(matchIndex, i)
}
}
if len(matchIndex) > 1 {
return errors.Errorf("multiple spec.platforms (at indexes %v) have overlapping selectors that select os=%s/arch=%s", matchIndex, os, arch)
}
}
return nil
}
// installPlatformSpec installs the p to a temporary location on disk to verify
// by shelling out to external command.
func installPlatformSpec(manifestFile string, p index.Platform) error {
goos, goarch := findAnyMatchingPlatform(p.Selector)
if goos == "" || goarch == "" {
return errors.Errorf("no supported platform matched platform selector: %+v", p.Selector)
}
tmpDir, err := ioutil.TempDir(os.TempDir(), "krew-test")
if err != nil {
return errors.Wrap(err, "failed to create temp dir for plugin install")
}
defer func() {
if err := os.RemoveAll(tmpDir); err != nil {
glog.Warningf("failed to remove temp dir: %s", tmpDir)
}
}()
cmd := exec.Command("kubectl", "krew", "install", "--manifest", manifestFile, "-v=4")
cmd.Stdin = nil
cmd.Env = []string{
"KREW_ROOT=" + tmpDir,
"KREW_OS=" + goos,
"KREW_ARCH=" + goarch,
}
glog.V(2).Infof("installing plugin with: %+v", cmd.Env)
cmd.Env = append(cmd.Env, "PATH="+os.Getenv("PATH"))
b, err := cmd.CombinedOutput()
if err != nil {
output := strings.Replace(string(b), "\n", "\n\t", -1)
return errors.Wrapf(err, "plugin install command failed: %s", output)
}
return nil
}
// findAnyMatchingPlatform finds an <os,arch> pair matches to given selector
func findAnyMatchingPlatform(selector *metav1.LabelSelector) (string, string) {
for _, p := range allPlatforms() {
if selectorMatchesOSArch(selector, p[0], p[1]) {
glog.V(4).Infof("%s MATCHED <%s,%s>", selector, p[0], p[1])
return p[0], p[1]
}
glog.V(4).Infof("%s didn't match <%s,%s>", selector, p[0], p[1])
}
return "", ""
}
func selectorMatchesOSArch(selector *metav1.LabelSelector, os, arch string) bool {
sel, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
// this should've been caught by plaform.Validate() earlier
glog.Warningf("Failed to convert label selector: %+v", selector)
return false
}
return sel.Matches(labels.Set{
"os": os,
"arch": arch,
})
}
// allPlatforms returns all <os,arch> pairs recognized.
func allPlatforms() [][2]string {
// TODO(ahmetb) find a more authoritative source for this list
return [][2]string{
{"windows", "386"},
{"windows", "amd64"},
{"linux", "386"},
{"linux", "amd64"},
{"linux", "arm"},
{"darwin", "386"},
{"darwin", "amd64"},
}
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
collect_photos.py | import cv2, os, time, sys
import numpy as np
cap = cv2.VideoCapture(cv2.CAP_DSHOW)
cap.set(3, 1280)
cap.set(4, 720)
subject = sys.argv[1]
num_of_photos = sys.argv[2]
save_path = 'valid/'+subject
delay = 0.2
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
if not os.path.exists(save_path):
os.makedirs(save_path)
i = 0
while i < num_of_photos:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100), flags = cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in faces:
img_slice = frame[y:y+h, x:x+w]
img_slice = cv2.resize(img_slice, (160,160), interpolation = cv2.INTER_AREA)
cv2.imwrite(save_path+'/'+subject+'_{:06d}.png'.format(i), img_slice)
i+=1
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
time.sleep(delay)
cap.release()
cv2.destroyAllWindows() | []
| []
| []
| [] | [] | python | null | null | null |
db.py | import motor.motor_asyncio
import os
import time
import uuid
from datetime import datetime, timedelta
connection_url = os.getenv('dburi')
client = motor.motor_asyncio.AsyncIOMotorClient(connection_url)
db = client.discord
member_data = db['members']
infractions_data = db['infractions']
servers_data = db['servers']
async def set_minecraft_ign(user_id, ign, uuid):
if not connection_url: return
await member_data.update_one(
{
'discord': user_id
},
{
'$set': {
'minecraft': {
'ign': ign,
'uuid': uuid
}
}
},
upsert=True
)
async def get_minecraft_data(user_id):
if not connection_url: return
data = await member_data.find_one({
'discord': user_id,
})
if data:
return data.get('minecraft')
async def set_hypixel_rank(user_id, rank):
if not connection_url: return
await member_data.update_one(
{
'discord': user_id
},
{
'$set': {
'hypixel_rank': rank
}
},
upsert=True
)
async def get_hypixel_rank(user_id):
if not connection_url: return
data = await member_data.find_one({
'discord': user_id,
})
if data:
return data.get('hypixel_rank')
async def set_mute_end(user_id, end_time, extra_data={}):
if not connection_url: return
set_data = {
'muted_until': end_time
}
for data in extra_data:
set_data[f'muted_data.{data}'] = extra_data[data]
set_data['muted'] = end_time > time.time()
await member_data.update_one(
{
'discord': user_id
},
{
'$set': set_data
},
upsert=True
)
async def get_is_muted(user_id):
if not connection_url: return
data = await member_data.find_one(
{
'discord': int(user_id)
}
)
if data:
return data.get('muted', False)
else:
return 0
async def get_mute_end(user_id):
if not connection_url: return 0
data = await member_data.find_one(
{
'discord': int(user_id)
}
)
if data:
return data.get('muted_until', 0)
else:
return 0
async def get_mute_data(user_id):
if not connection_url: return {}
data = await member_data.find_one(
{
'discord': int(user_id)
}
)
if data:
return data.get('muted_data', {})
else:
return 0
async def get_active_mutes():
if not connection_url: return
active_mutes = {}
async for member in member_data.find(
{
'muted_until': {
'$gte': time.time()
}
}
):
active_mutes[member['discord']] = member['muted_until']
return active_mutes
async def add_infraction(user_id: int, infraction_type, reason, mute_length=0):
if not connection_url: return
infraction_uuid = str(uuid.uuid4())
await infractions_data.insert_one({
'_id': infraction_uuid,
'user': user_id,
'type': infraction_type,
'reason': reason,
'date': datetime.now(),
'length': str(mute_length) # must be a string otherwise mongodb gets mad on long mutes
})
async def get_infractions(user_id: int):
if not connection_url: return
infractions = []
async for infraction in infractions_data.find({
'user': user_id,
'date': {'$gt': datetime.now() - timedelta(days=30)}
}):
infractions.append(infraction)
return infractions
async def clear_infractions(user_id: int, date):
if not connection_url: return
r = await infractions_data.delete_many({
'user': user_id,
'date': {
'$gte': date,
'$lte': date + timedelta(days=1),
}
})
return r.deleted_count
async def clear_recent_infraction(user_id: int):
if not connection_url: return
async for infraction in infractions_data\
.find({'user': user_id})\
.sort('date', -1)\
.limit(1):
await clear_infraction(infraction['_id'])
async def clear_infraction(infraction_id):
await infractions_data.delete_one({'_id': infraction_id})
async def clear_infraction_by_partial_id(infraction_partial_id):
infraction_data = await infractions_data.find_one({'_id': {
'$regex': '^' + infraction_partial_id + '.*'
}})
if not infraction_data: return None
await infractions_data.delete_one({'_id': infraction_data['_id']})
return infraction_data
async def set_rock(user_id: int):
if not connection_url: return
await member_data.update_one(
{
'discord': user_id
},
{
'$set': {
'last_rock': time.time()
}
},
upsert=True
)
async def get_rock(user_id: int):
if not connection_url: return
data = await member_data.find_one(
{
'discord': int(user_id)
}
)
if data:
return data.get('last_rock', 0)
else:
return 0
async def add_message(user_id: int):
if not connection_url: return
hour_id = int(time.time() / 3600)
await member_data.update_one(
{
'discord': user_id
},
{
'$inc': {
f'messages.{hour_id}': 1
}
},
upsert=True
)
async def get_active_members_from_past_hour(hoursago=1):
if not connection_url: return
hour_id = int((time.time()) / 3600) - hoursago
members = []
async for member in member_data.find(
{
f'messages.{hour_id}': {'$gte': 1}
}
):
print('bruh', member, hour_id)
member_modified = member
member_modified['hourly_messages'] = member['messages'].get(str(hour_id), 0)
del member_modified['messages']
members.append(member_modified)
return members
async def set_is_member(user_id: int):
if not connection_url: return
await member_data.update_one(
{
'discord': user_id
},
{
'$set': {
'member': True
}
},
upsert=True
)
async def get_is_member(user_id: int):
if not connection_url: return
data = await member_data.find_one(
{
'discord': int(user_id)
}
)
if data:
return data.get('member', False)
else:
return 0
async def set_counter(guild_id: int, number: int):
if not connection_url: return
await servers_data.update_one(
{
'id': guild_id
},
{
'$set': {
'counter': number
}
},
upsert=True
)
async def get_counter(guild_id: int):
if not connection_url: return
data = await servers_data.find_one({
'id': guild_id,
})
if data:
return data.get('counter', 0)
async def set_last_general_duel(guild_id: int):
if not connection_url: return
await servers_data.update_one(
{
'id': guild_id
},
{
'$set': {
'last_duel': datetime.now()
}
},
upsert=True
)
async def get_last_general_duel(guild_id: int):
if not connection_url: return
data = await servers_data.find_one({
'id': guild_id,
})
if data:
return data.get('last_duel')
async def set_bobux(user_id: int, amount: int):
if not connection_url: return
await member_data.update_one(
{
'discord': user_id
},
{
'$set': {
'bobux': amount
}
},
upsert=True
)
async def get_bobux(user_id: int):
if not connection_url: return
data = await member_data.find_one(
{
'discord': user_id
}
)
return data.get('bobux', 0)
async def change_bobux(user_id: int, amount: int):
if not connection_url: return
await member_data.update_one(
{
'discord': user_id
},
{
'$inc': {
'bobux': amount
}
},
upsert=True
)
async def get_shop_item(user_id: int, shop_item_id: str):
if not connection_url: return
await member_data.update_one(
{
'discord': user_id
},
{
'$set': {
f'shop.{shop_item_id}': True
}
},
upsert=True
)
async def get_bought_shop_items(user_id: int):
if not connection_url: return
data = await member_data.find_one(
{
'discord': user_id
}
)
shop_items = set()
for item in data.get('shop', {}):
if data['shop'][item]:
shop_items.add(item)
return shop_items
async def has_shop_item(user_id: int, shop_item_id: str):
if not connection_url: return
shop_items = await get_bought_shop_items(user_id)
return shop_item_id in shop_items
async def spend_shop_item(user_id: int, shop_item_id: str):
has_item = await has_shop_item(user_id, shop_item_id)
if has_item:
await lose_shop_item(user_id, shop_item_id)
return has_item
async def lose_shop_item(user_id: int, shop_item_id: str):
if not connection_url: return
await member_data.update_one(
{
'discord': user_id
},
{
'$set': {
f'shop.{shop_item_id}': False
}
},
upsert=True
)
| []
| []
| [
"dburi"
]
| [] | ["dburi"] | python | 1 | 0 | |
samples-python/datalayer.calc/main.py | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Bosch Rexroth AG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import faulthandler
import time
import datalayer
from datalayer.variant import Result
from calculations.basic_arithmetic_operations import BasicArithmeticOperations
# Do NOT change these values
connection_ipc = "ipc://"
port_client = ":2069"
port_provider = ":2070"
addr_root = "sdk-py-calc"
# This is the connection string for TCP in the format: tcp://USER:PASSWORD@IP_ADDRESS
# Please check and change according your environment:
# - USER: Enter your user name here - default is boschrexroth
# - PASSWORD: Enter your password here - default is boschrexroth
# - IP_ADDRESS: 127.0.0.1 If you develop in WSL and you want to connect to a ctrlX CORE virtual with port forwarding
# 10.0.2.2 If you develop in a VM (Virtual Box, QEMU,...) and you want to connect to a ctrlX virtual with port forwarding
# 192.168.1.1 If you are using a ctrlX CORE or ctrlX CORE virtual with TAP adpater
connection_tcp = "tcp://boschrexroth:[email protected]"
def is_snap() -> bool:
return 'SNAP' in os.environ
def start_new_basic_arithmetic_operation(
provider: datalayer.provider.Provider,
client: datalayer.client.Client,
id: str,
mode: str):
basicArithmeticOperation = BasicArithmeticOperations(
provider, client, addr_root, id, mode)
basicArithmeticOperation.register_nodes()
while basicArithmeticOperation.subscribe() != Result.OK:
basicArithmeticOperation.unsubscribe()
print("WARN Starting Data Layer subsciptions for",
addr_root + "/" + id, "failed with: " + str(result))
print("INFO Retry in 5s")
time.sleep(5.0)
pass
if __name__ == '__main__':
print()
print("===========================================================================")
print("ctrlX Application in Python:")
print("- Reads Data Layer values per subscription.")
print("- Runs a algorithmn")
print("- Provides result as Data Layer Node")
print()
print("Will be restarted by the snap system on error.")
print("===========================================================================")
print()
faulthandler.enable()
system = datalayer.system.System("")
system.start(False)
if is_snap():
connection_client = connection_ipc
connection_provider = connection_ipc
else:
connection_client = connection_tcp + port_client
connection_provider = connection_tcp + port_provider
client = system.factory().create_client(connection_client)
client.set_timeout(datalayer.client.TimeoutSetting.PING, 5000)
provider = system.factory().create_provider(connection_provider)
result = provider.start()
if result != Result.OK:
print("ERROR Starting Data Layer provider failed with: " + str(result))
sys.exit(1)
bao_plus = start_new_basic_arithmetic_operation(provider, client, "plus", "+")
# Endless loop
while client.is_connected():
time.sleep(10.0)
print("ERROR Data Layer client is disconnected - exiting application. Will be restarted automatically.")
bao_plus.unsubscribe()
client.close()
system.stop(True)
sys.exit(3)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
sonar-breaker/src/main/java/com/github/daggerok/sonarbreaker/infrastructure/Config.java | package com.github.daggerok.sonarbreaker.infrastructure;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
import java.util.Arrays;
import java.util.Objects;
@NoArgsConstructor(access = AccessLevel.PRIVATE)
public class Config {
public static String get(final Env env, final String... args) {
return get(env.systemProperty, env.value, args);
}
public static String get(final String prop, final String defaultValue, final String... args) {
final String envVariableName = prop.replaceAll("\\.", "_").toUpperCase();
final String envVariable = System.getenv().getOrDefault(envVariableName, defaultValue); // NOSONAR
return Arrays.stream(args)
.filter(Objects::nonNull)
.map(String::trim)
.filter(s -> !s.isEmpty())
.filter(s -> s.startsWith(prop))
.map(s -> s.split("="))
.map(pair -> pair[1])
.findFirst()
.orElse(System.getProperty(prop, envVariable));
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
utils/indigo-service/service/test/tests.py | import os
import time
import unittest
import requests
if __name__ == "__main__":
service_url = "http://front/v2"
if (
"INDIGO_SERVICE_URL" in os.environ
and len(os.environ["INDIGO_SERVICE_URL"]) > 0
):
service_url = os.environ["INDIGO_SERVICE_URL"]
start_time = time.time()
service_is_up = False
while time.time() - start_time < 60:
try:
if (
requests.get(
"{}/info".format(service_url), timeout=None
).status_code
== 200
):
service_is_up = True
break
print("Waiting for front container getting ready...")
except Exception:
pass
finally:
time.sleep(1)
if not service_is_up:
raise RuntimeError(
"Front container service seems to be down, stopping..."
)
print("Front container is ready, starting tests...")
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
ignore_pattern = ""
if (
"IGNORE_PATTERN" in os.environ
and len(os.environ["IGNORE_PATTERN"]) > 0
):
ignore_pattern = os.environ["IGNORE_PATTERN"]
for all_test_suite in unittest.defaultTestLoader.discover(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "api"),
pattern="*.py",
):
for test_suite in all_test_suite:
if not (
len(ignore_pattern) > 0
and ignore_pattern in str(test_suite)
):
suite.addTests(test_suite)
return suite
exit(unittest.main(verbosity=2, warnings="ignore"))
| []
| []
| [
"INDIGO_SERVICE_URL",
"IGNORE_PATTERN"
]
| [] | ["INDIGO_SERVICE_URL", "IGNORE_PATTERN"] | python | 2 | 0 | |
lab-day/LabDayBackend/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "LabDayBackend.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
RequestHandler.go | package seebeez
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"os"
"github.com/pkg/errors"
)
type requestHandler struct{}
func (r *requestHandler) handle(s Seebeez) (response, error) {
// Stop application if no Auth Token is found
if os.Getenv("SeebeezAuth") == "" {
return response{}, errors.New("no auth token set")
}
// Prepare JSON
obj, err := json.Marshal(s)
req, err := http.NewRequest("POST", getURL("job"), bytes.NewBuffer(obj))
//if err != nil {
// return response{}, err
//}
// Set appropriate headers
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
req.Header.Set("Authorization", "Bearer "+os.Getenv("SeebeezAuth"))
// Prepare an HTTP request
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return response{}, err
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
return response{body}, err
}
func (r *requestHandler) checkStatus(res ResInfo) (JobResponse, error) {
// Stop application if no Auth Token is found
if os.Getenv("SeebeezAuth") == "" {
return JobResponse{}, errors.New("no auth token")
}
req, err := http.NewRequest("GET", getURL("job/"+res.ID), nil)
//if err != nil {
// return JobResponse{}, err
//}
// Set appropriate headers
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
req.Header.Set("Authorization", "Bearer "+os.Getenv("SeebeezAuth"))
// Prepare an HTTP request
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return JobResponse{}, err
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
data := JobResponse{}
err = json.Unmarshal(body, &data)
return data, err
}
func (r *requestHandler) getServiceDetails(a *ServiceAPI) ([]byte, error) {
serviceJSON := struct {
Link string `json:"link"`
Format string `json:"format"`
}{a.Link, a.Format}
obj, err := json.Marshal(serviceJSON)
req, err := http.NewRequest("POST", a.URL, bytes.NewBuffer(obj))
//if err != nil {
// return []byte{}, err
//}
// Set appropriate headers
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
// Prepare an HTTP request
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return []byte{}, err
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
return body, err
}
| [
"\"SeebeezAuth\"",
"\"SeebeezAuth\"",
"\"SeebeezAuth\"",
"\"SeebeezAuth\""
]
| []
| [
"SeebeezAuth"
]
| [] | ["SeebeezAuth"] | go | 1 | 0 | |
internal/uvm/create_lcow.go | //go:build windows
package uvm
import (
"context"
"encoding/base64"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/pkg/securitypolicy"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"github.com/Microsoft/hcsshim/internal/gcs"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/processorinfo"
"github.com/Microsoft/hcsshim/internal/protocol/guestrequest"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/osversion"
)
// General information about how this works at a high level.
//
// The purpose is to start an LCOW Utility VM or UVM using the Host Compute Service, an API to create and manipulate running virtual machines
// HCS takes json descriptions of the work to be done.
//
// When a pod (there is a one to one mapping of pod to UVM) is to be created various annotations and defaults are combined into an options object which is
// passed to CreateLCOW (see below) where the options are transformed into a json document to be presented to the HCS VM creation code.
//
// There are two paths in CreateLCOW to creating the json document. The most flexible case is makeLCOWDoc which is used where no specialist hardware security
// applies, then there is makeLCOWSecurityDoc which is used in the case of AMD SEV-SNP memory encryption and integrity protection. There is quite
// a lot of difference between the two paths, for example the regular path has options about the type of kernel and initrd binary whereas the AMD SEV-SNP
// path has only one file but there are many other detail differences, so the code is split for clarity.
//
// makeLCOW*Doc returns an instance of hcsschema.ComputeSystem. That is then serialised to the json string provided to the flat C api. A similar scheme is used
// for later adjustments, for example adding a newtwork adpator.
//
// Examples of the eventual json are inline as comments by these two functions to show the eventual effect of the code.
//
// Note that the schema files, ie the Go objects that represent the json, are generated outside of the local build process.
type PreferredRootFSType int
const (
PreferredRootFSTypeInitRd PreferredRootFSType = iota
PreferredRootFSTypeVHD
PreferredRootFSTypeNA
entropyVsockPort = 1
linuxLogVsockPort = 109
)
// OutputHandler is used to process the output from the program run in the UVM.
type OutputHandler func(io.Reader)
const (
// InitrdFile is the default file name for an initrd.img used to boot LCOW.
InitrdFile = "initrd.img"
// VhdFile is the default file name for a rootfs.vhd used to boot LCOW.
VhdFile = "rootfs.vhd"
// KernelFile is the default file name for a kernel used to boot LCOW.
KernelFile = "kernel"
// UncompressedKernelFile is the default file name for an uncompressed
// kernel used to boot LCOW with KernelDirect.
UncompressedKernelFile = "vmlinux"
// In the SNP case both the kernel (bzImage) and initrd are stored in a vmgs (VM Guest State) file
GuestStateFile = "kernelinitrd.vmgs"
)
// OptionsLCOW are the set of options passed to CreateLCOW() to create a utility vm.
type OptionsLCOW struct {
*Options
BootFilesPath string // Folder in which kernel and root file system reside. Defaults to \Program Files\Linux Containers
KernelFile string // Filename under `BootFilesPath` for the kernel. Defaults to `kernel`
KernelDirect bool // Skip UEFI and boot directly to `kernel`
RootFSFile string // Filename under `BootFilesPath` for the UVMs root file system. Defaults to `InitrdFile`
KernelBootOptions string // Additional boot options for the kernel
EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM
ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe
UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true
ExecCommandLine string // The command line to exec from init. Defaults to GCS
ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false
ForwardStderr bool // Whether stderr will be forwarded from the executed program. Defaults to true
OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages
VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken.
VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`.
VPMemNoMultiMapping bool // Disables LCOW layer multi mapping
PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD`
EnableColdDiscardHint bool // Whether the HCS should use cold discard hints. Defaults to false
VPCIEnabled bool // Whether the kernel should enable pci
EnableScratchEncryption bool // Whether the scratch should be encrypted
SecurityPolicy string // Optional security policy
SecurityPolicyEnabled bool // Set when there is a security policy to apply on actual SNP hardware, use this rathen than checking the string length
UseGuestStateFile bool // Use a vmgs file that contains a kernel and initrd, required for SNP
GuestStateFile string // The vmgs file to load
DisableTimeSyncService bool // Disables the time synchronization service
}
// defaultLCOWOSBootFilesPath returns the default path used to locate the LCOW
// OS kernel and root FS files. This default is the subdirectory
// `LinuxBootFiles` in the directory of the executable that started the current
// process; or, if it does not exist, `%ProgramFiles%\Linux Containers`.
func defaultLCOWOSBootFilesPath() string {
localDirPath := filepath.Join(filepath.Dir(os.Args[0]), "LinuxBootFiles")
if _, err := os.Stat(localDirPath); err == nil {
return localDirPath
}
return filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers")
}
// NewDefaultOptionsLCOW creates the default options for a bootable version of
// LCOW.
//
// `id` the ID of the compute system. If not passed will generate a new GUID.
//
// `owner` the owner of the compute system. If not passed will use the
// executable files name.
func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW {
// Use KernelDirect boot by default on all builds that support it.
kernelDirectSupported := osversion.Build() >= 18286
opts := &OptionsLCOW{
Options: newDefaultOptions(id, owner),
BootFilesPath: defaultLCOWOSBootFilesPath(),
KernelFile: KernelFile,
KernelDirect: kernelDirectSupported,
RootFSFile: InitrdFile,
KernelBootOptions: "",
EnableGraphicsConsole: false,
ConsolePipe: "",
UseGuestConnection: true,
ExecCommandLine: fmt.Sprintf("/bin/gcs -v4 -log-format json -loglevel %s", logrus.StandardLogger().Level.String()),
ForwardStdout: false,
ForwardStderr: true,
OutputHandler: parseLogrus(id),
VPMemDeviceCount: DefaultVPMEMCount,
VPMemSizeBytes: DefaultVPMemSizeBytes,
VPMemNoMultiMapping: osversion.Get().Build < osversion.V19H1,
PreferredRootFSType: PreferredRootFSTypeInitRd,
EnableColdDiscardHint: false,
VPCIEnabled: false,
EnableScratchEncryption: false,
SecurityPolicyEnabled: false,
SecurityPolicy: "",
GuestStateFile: "",
DisableTimeSyncService: false,
}
if _, err := os.Stat(filepath.Join(opts.BootFilesPath, VhdFile)); err == nil {
// We have a rootfs.vhd in the boot files path. Use it over an initrd.img
opts.RootFSFile = VhdFile
opts.PreferredRootFSType = PreferredRootFSTypeVHD
}
if kernelDirectSupported {
// KernelDirect supports uncompressed kernel if the kernel is present.
// Default to uncompressed if on box. NOTE: If `kernel` is already
// uncompressed and simply named 'kernel' it will still be used
// uncompressed automatically.
if _, err := os.Stat(filepath.Join(opts.BootFilesPath, UncompressedKernelFile)); err == nil {
opts.KernelFile = UncompressedKernelFile
}
}
return opts
}
// Get an acceptable number of processors given option and actual constraints.
func fetchProcessor(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (*hcsschema.Processor2, error) {
processorTopology, err := processorinfo.HostProcessorInfo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get host processor information: %s", err)
}
// To maintain compatibility with Docker we need to automatically downgrade
// a user CPU count if the setting is not possible.
uvm.processorCount = uvm.normalizeProcessorCount(ctx, opts.ProcessorCount, processorTopology)
processor := &hcsschema.Processor2{
Count: uvm.processorCount,
Limit: opts.ProcessorLimit,
Weight: opts.ProcessorWeight,
}
// We can set a cpu group for the VM at creation time in recent builds.
if opts.CPUGroupID != "" {
if osversion.Build() < osversion.V21H1 {
return nil, errCPUGroupCreateNotSupported
}
processor.CpuGroup = &hcsschema.CpuGroup{Id: opts.CPUGroupID}
}
return processor, nil
}
/*
Example JSON document produced once the hcsschema.ComputeSytem returned by makeLCOWSecurityDoc is serialised:
{
"Owner": "containerd-shim-runhcs-v1.exe",
"SchemaVersion": {
"Major": 2,
"Minor": 5
},
"ShouldTerminateOnLastHandleClosed": true,
"VirtualMachine": {
"Chipset": {
"Uefi": {
"ApplySecureBootTemplate": "Apply",
"SecureBootTemplateId": "1734c6e8-3154-4dda-ba5f-a874cc483422"
}
},
"ComputeTopology": {
"Memory": {
"SizeInMB": 1024
},
"Processor": {
"Count": 2
}
},
"Devices": {
"Scsi" : { "0" : {} },
"HvSocket": {
"HvSocketConfig": {
"DefaultBindSecurityDescriptor": "D:P(A;;FA;;;WD)",
"DefaultConnectSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
"ServiceTable" : {
"00000808-facb-11e6-bd58-64006a7986d3" : {
"AllowWildcardBinds" : true,
"BindSecurityDescriptor": "D:P(A;;FA;;;WD)",
"ConnectSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)"
},
"0000006d-facb-11e6-bd58-64006a7986d3" : {
"AllowWildcardBinds" : true,
"BindSecurityDescriptor": "D:P(A;;FA;;;WD)",
"ConnectSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)"
},
"00000001-facb-11e6-bd58-64006a7986d3" : {
"AllowWildcardBinds" : true,
"BindSecurityDescriptor": "D:P(A;;FA;;;WD)",
"ConnectSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)"
},
"40000000-facb-11e6-bd58-64006a7986d3" : {
"AllowWildcardBinds" : true,
"BindSecurityDescriptor": "D:P(A;;FA;;;WD)",
"ConnectSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)"
}
}
}
},
"Plan9": {}
},
"GuestState": {
"GuestStateFilePath": "d:\\ken\\aug27\\gcsinitnew.vmgs",
"GuestStateFileType": "FileMode",
"ForceTransientState": true
},
"SecuritySettings": {
"Isolation": {
"IsolationType": "SecureNestedPaging",
"LaunchData": "kBifgKNijdHjxdSUshmavrNofo2B01LiIi1cr8R4ytI="
}
},
"Version": {
"Major": 254,
"Minor": 0
}
}
}
*/
// A large part of difference between the SNP case and the usual kernel+option+initrd case is to do with booting
// from a VMGS file. The VMGS part may be used other than with SNP so is split out here.
// Make a hcsschema.ComputeSytem with the parts that target booting from a VMGS file
func makeLCOWVMGSDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcsschema.ComputeSystem, err error) {
// Kernel and initrd are combined into a single vmgs file.
vmgsFullPath := filepath.Join(opts.BootFilesPath, opts.GuestStateFile)
if _, err := os.Stat(vmgsFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("the GuestState vmgs file '%s' was not found", vmgsFullPath)
}
var processor *hcsschema.Processor2
processor, err = fetchProcessor(ctx, opts, uvm)
if err != nil {
return nil, err
}
// Align the requested memory size.
memorySizeInMB := uvm.normalizeMemorySize(ctx, opts.MemorySizeInMB)
doc := &hcsschema.ComputeSystem{
Owner: uvm.owner,
SchemaVersion: schemaversion.SchemaV25(),
ShouldTerminateOnLastHandleClosed: true,
VirtualMachine: &hcsschema.VirtualMachine{
StopOnReset: true,
Chipset: &hcsschema.Chipset{},
ComputeTopology: &hcsschema.Topology{
Memory: &hcsschema.Memory2{
SizeInMB: memorySizeInMB,
AllowOvercommit: opts.AllowOvercommit,
EnableDeferredCommit: opts.EnableDeferredCommit,
EnableColdDiscardHint: opts.EnableColdDiscardHint,
LowMMIOGapInMB: opts.LowMMIOGapInMB,
HighMMIOBaseInMB: opts.HighMMIOBaseInMB,
HighMMIOGapInMB: opts.HighMMIOGapInMB,
},
Processor: processor,
},
Devices: &hcsschema.Devices{
HvSocket: &hcsschema.HvSocket2{
HvSocketConfig: &hcsschema.HvSocketSystemConfig{
// Allow administrators and SYSTEM to bind to vsock sockets
// so that we can create a GCS log socket.
DefaultBindSecurityDescriptor: "D:P(A;;FA;;;WD)", // Differs for SNP
DefaultConnectSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
ServiceTable: make(map[string]hcsschema.HvSocketServiceConfig),
},
},
Plan9: &hcsschema.Plan9{},
},
},
}
// Set permissions for the VSock ports:
// entropyVsockPort - 1 is the entropy port,
// linuxLogVsockPort - 109 used by vsockexec to log stdout/stderr logging,
// 0x40000000 + 1 (LinuxGcsVsockPort + 1) is the bridge (see guestconnectiuon.go)
hvSockets := [...]uint32{entropyVsockPort, linuxLogVsockPort, gcs.LinuxGcsVsockPort, gcs.LinuxGcsVsockPort + 1}
for _, whichSocket := range hvSockets {
key := fmt.Sprintf("%08x-facb-11e6-bd58-64006a7986d3", whichSocket) // format of a linux hvsock GUID is port#-facb-11e6-bd58-64006a7986d3
doc.VirtualMachine.Devices.HvSocket.HvSocketConfig.ServiceTable[key] = hcsschema.HvSocketServiceConfig{
AllowWildcardBinds: true,
BindSecurityDescriptor: "D:P(A;;FA;;;WD)",
ConnectSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
}
}
// Handle StorageQoS if set
if opts.StorageQoSBandwidthMaximum > 0 || opts.StorageQoSIopsMaximum > 0 {
doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{
IopsMaximum: opts.StorageQoSIopsMaximum,
BandwidthMaximum: opts.StorageQoSBandwidthMaximum,
}
}
if uvm.scsiControllerCount > 0 {
doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{}
for i := 0; i < int(uvm.scsiControllerCount); i++ {
doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{
Attachments: make(map[string]hcsschema.Attachment),
}
}
}
// The rootfs must be provided as an initrd within the VMGS file.
// Raise an error if instructed to use a particular sort of rootfs.
if opts.PreferredRootFSType != PreferredRootFSTypeNA {
return nil, fmt.Errorf("cannot override rootfs when using VMGS file")
}
// Required by HCS for the isolated boot scheme, see also https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/learn-more/generation-2-virtual-machine-security-settings-for-hyper-v
// A complete explanation of the why's and wherefores of starting an encrypted, isolated VM are beond the scope of these comments.
doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{
ApplySecureBootTemplate: "Apply",
SecureBootTemplateId: "1734c6e8-3154-4dda-ba5f-a874cc483422", // aka MicrosoftWindowsSecureBootTemplateGUID equivalent to "Microsoft Windows" template from Get-VMHost | select SecureBootTemplates,
}
// Point at the file that contains the linux kernel and initrd images.
doc.VirtualMachine.GuestState = &hcsschema.GuestState{
GuestStateFilePath: vmgsFullPath,
GuestStateFileType: "FileMode",
ForceTransientState: true, // tell HCS that this is just the source of the images, not ongoing state
}
return doc, nil
}
// Programatically make the hcsschema.ComputeSystem document for the SNP case.
// This is done prior to json seriaisation and sending to the HCS layer to actually do the work of creating the VM.
// Many details are quite different (see the typical JSON examples), in particular it boots from a VMGS file
// which contains both the kernel and initrd as well as kernel boot options.
func makeLCOWSecurityDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcsschema.ComputeSystem, err error) {
doc, vmgsErr := makeLCOWVMGSDoc(ctx, opts, uvm)
if vmgsErr != nil {
return nil, vmgsErr
}
// Part of the protocol to ensure that the rules in the user's Security Policy are
// respected is to provide a hash of the policy to the hardware. This is immutable
// and can be used to check that the policy used by opengcs is the required one as
// a condition of releasing secrets to the container.
policyDigest, err := securitypolicy.NewSecurityPolicyDigest(opts.SecurityPolicy)
if err != nil {
return nil, err
}
// HCS API expect a base64 encoded string as LaunchData. Internally it
// decodes it to bytes. SEV later returns the decoded byte blob as HostData
// field of the report.
hostData := base64.StdEncoding.EncodeToString(policyDigest)
// Put the measurement into the LaunchData field of the HCS creation command.
// This will end-up in HOST_DATA of SNP_LAUNCH_FINISH command the and ATTESTATION_REPORT
// retrieved by the guest later.
doc.VirtualMachine.SecuritySettings = &hcsschema.SecuritySettings{
EnableTpm: false,
Isolation: &hcsschema.IsolationSettings{
IsolationType: "SecureNestedPaging",
LaunchData: hostData,
// HclEnabled: true, /* Not available in schema 2.5 - REQUIRED when using BlockStorage in 2.6 */
},
}
return doc, nil
}
/*
Example JSON document produced once the hcsschema.ComputeSytem returned by makeLCOWDoc is serialised. Note that the boot scheme is entirely different.
{
"Owner": "containerd-shim-runhcs-v1.exe",
"SchemaVersion": {
"Major": 2,
"Minor": 1
},
"VirtualMachine": {
"StopOnReset": true,
"Chipset": {
"LinuxKernelDirect": {
"KernelFilePath": "C:\\ContainerPlat\\LinuxBootFiles\\vmlinux",
"InitRdPath": "C:\\ContainerPlat\\LinuxBootFiles\\initrd.img",
"KernelCmdLine": " 8250_core.nr_uarts=0 panic=-1 quiet pci=off nr_cpus=2 brd.rd_nr=0 pmtmr=0 -- -e 1 /bin/vsockexec -e 109 /bin/gcs -v4 -log-format json -loglevel debug"
}
},
"ComputeTopology": {
"Memory": {
"SizeInMB": 1024,
"AllowOvercommit": true
},
"Processor": {
"Count": 2
}
},
"Devices": {
"Scsi": {
"0": {}
},
"HvSocket": {
"HvSocketConfig": {
"DefaultBindSecurityDescriptor": "D:P(A;;FA;;;SY)(A;;FA;;;BA)"
}
},
"Plan9": {}
}
},
"ShouldTerminateOnLastHandleClosed": true
}
*/
// Make the ComputeSystem document object that will be serialised to json to be presented to the HCS api.
func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcsschema.ComputeSystem, err error) {
logrus.Tracef("makeLCOWDoc %v\n", opts)
kernelFullPath := filepath.Join(opts.BootFilesPath, opts.KernelFile)
if _, err := os.Stat(kernelFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("kernel: '%s' not found", kernelFullPath)
}
rootfsFullPath := filepath.Join(opts.BootFilesPath, opts.RootFSFile)
if _, err := os.Stat(rootfsFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("boot file: '%s' not found", rootfsFullPath)
}
var processor *hcsschema.Processor2
processor, err = fetchProcessor(ctx, opts, uvm) // must happen after the file existence tests above.
if err != nil {
return nil, err
}
// Align the requested memory size.
memorySizeInMB := uvm.normalizeMemorySize(ctx, opts.MemorySizeInMB)
doc := &hcsschema.ComputeSystem{
Owner: uvm.owner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
VirtualMachine: &hcsschema.VirtualMachine{
StopOnReset: true,
Chipset: &hcsschema.Chipset{},
ComputeTopology: &hcsschema.Topology{
Memory: &hcsschema.Memory2{
SizeInMB: memorySizeInMB,
AllowOvercommit: opts.AllowOvercommit,
EnableDeferredCommit: opts.EnableDeferredCommit,
EnableColdDiscardHint: opts.EnableColdDiscardHint,
LowMMIOGapInMB: opts.LowMMIOGapInMB,
HighMMIOBaseInMB: opts.HighMMIOBaseInMB,
HighMMIOGapInMB: opts.HighMMIOGapInMB,
},
Processor: processor,
},
Devices: &hcsschema.Devices{
HvSocket: &hcsschema.HvSocket2{
HvSocketConfig: &hcsschema.HvSocketSystemConfig{
// Allow administrators and SYSTEM to bind to vsock sockets
// so that we can create a GCS log socket.
DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
},
},
Plan9: &hcsschema.Plan9{},
},
},
}
// Handle StorageQoS if set
if opts.StorageQoSBandwidthMaximum > 0 || opts.StorageQoSIopsMaximum > 0 {
doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{
IopsMaximum: opts.StorageQoSIopsMaximum,
BandwidthMaximum: opts.StorageQoSBandwidthMaximum,
}
}
if uvm.scsiControllerCount > 0 {
doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{}
for i := 0; i < int(uvm.scsiControllerCount); i++ {
doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{
Attachments: make(map[string]hcsschema.Attachment),
}
}
}
if uvm.vpmemMaxCount > 0 {
doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{
MaximumCount: uvm.vpmemMaxCount,
MaximumSizeBytes: uvm.vpmemMaxSizeBytes,
}
}
var kernelArgs string
switch opts.PreferredRootFSType {
case PreferredRootFSTypeInitRd:
if !opts.KernelDirect {
kernelArgs = "initrd=/" + opts.RootFSFile
}
case PreferredRootFSTypeVHD:
if uvm.vpmemMaxCount > 0 {
// Support for VPMem VHD(X) booting rather than initrd..
kernelArgs = "root=/dev/pmem0 ro rootwait init=/init"
imageFormat := "Vhd1"
if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" {
imageFormat = "Vhdx"
}
doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{
"0": {
HostPath: rootfsFullPath,
ReadOnly: true,
ImageFormat: imageFormat,
},
}
if uvm.vpmemMultiMapping {
pmem := newPackedVPMemDevice()
pmem.maxMappedDeviceCount = 1
st, err := os.Stat(rootfsFullPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath)
}
devSize := pageAlign(uint64(st.Size()))
memReg, err := pmem.Allocate(devSize)
if err != nil {
return nil, errors.Wrap(err, "failed to allocate memory for rootfs")
}
defer func() {
if err != nil {
if err = pmem.Release(memReg); err != nil {
log.G(ctx).WithError(err).Debug("failed to release memory region")
}
}
}()
dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg)
if err := pmem.mapVHDLayer(ctx, dev); err != nil {
return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device")
}
uvm.vpmemDevicesMultiMapped[0] = pmem
} else {
dev := newDefaultVPMemInfo(opts.RootFSFile, "/")
uvm.vpmemDevicesDefault[0] = dev
}
} else {
kernelArgs = "root=/dev/sda ro rootwait init=/init"
doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{
Type_: "VirtualDisk",
Path: rootfsFullPath,
ReadOnly: true,
}
uvm.scsiLocations[0][0] = newSCSIMount(uvm, rootfsFullPath, "/", "VirtualDisk", "", 1, 0, 0, true, false)
}
}
vmDebugging := false
if opts.ConsolePipe != "" {
vmDebugging = true
kernelArgs += " 8250_core.nr_uarts=1 8250_core.skip_txen_test=1 console=ttyS0,115200"
doc.VirtualMachine.Devices.ComPorts = map[string]hcsschema.ComPort{
"0": { // Which is actually COM1
NamedPipe: opts.ConsolePipe,
},
}
} else {
kernelArgs += " 8250_core.nr_uarts=0"
}
if opts.EnableGraphicsConsole {
vmDebugging = true
kernelArgs += " console=tty"
doc.VirtualMachine.Devices.Keyboard = &hcsschema.Keyboard{}
doc.VirtualMachine.Devices.EnhancedModeVideo = &hcsschema.EnhancedModeVideo{}
doc.VirtualMachine.Devices.VideoMonitor = &hcsschema.VideoMonitor{}
}
if !vmDebugging {
// Terminate the VM if there is a kernel panic.
kernelArgs += " panic=-1 quiet"
}
// Add Kernel Boot options
if opts.KernelBootOptions != "" {
kernelArgs += " " + opts.KernelBootOptions
}
if !opts.VPCIEnabled {
kernelArgs += ` pci=off`
}
// Inject initial entropy over vsock during init launch.
entropyArgs := fmt.Sprintf("-e %d", entropyVsockPort)
// With default options, run GCS with stderr pointing to the vsock port
// created below in order to forward guest logs to logrus.
execCmdArgs := "/bin/vsockexec"
if opts.ForwardStdout {
execCmdArgs += fmt.Sprintf(" -o %d", linuxLogVsockPort)
}
if opts.ForwardStderr {
execCmdArgs += fmt.Sprintf(" -e %d", linuxLogVsockPort)
}
if opts.DisableTimeSyncService {
opts.ExecCommandLine = fmt.Sprintf("%s -disable-time-sync", opts.ExecCommandLine)
}
if log.IsScrubbingEnabled() {
opts.ExecCommandLine += " -scrub-logs"
}
execCmdArgs += " " + opts.ExecCommandLine
if opts.ProcessDumpLocation != "" {
execCmdArgs += " -core-dump-location " + opts.ProcessDumpLocation
}
initArgs := fmt.Sprintf("%s %s", entropyArgs, execCmdArgs)
if vmDebugging {
// Launch a shell on the console.
initArgs = entropyArgs + ` sh -c "` + execCmdArgs + ` & exec sh"`
}
kernelArgs += fmt.Sprintf(" nr_cpus=%d", opts.ProcessorCount)
kernelArgs += ` brd.rd_nr=0 pmtmr=0 -- ` + initArgs
if !opts.KernelDirect {
doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{
BootThis: &hcsschema.UefiBootEntry{
DevicePath: `\` + opts.KernelFile,
DeviceType: "VmbFs",
VmbFsRootPath: opts.BootFilesPath,
OptionalData: kernelArgs,
},
}
} else {
doc.VirtualMachine.Chipset.LinuxKernelDirect = &hcsschema.LinuxKernelDirect{
KernelFilePath: kernelFullPath,
KernelCmdLine: kernelArgs,
}
if opts.PreferredRootFSType == PreferredRootFSTypeInitRd {
doc.VirtualMachine.Chipset.LinuxKernelDirect.InitRdPath = rootfsFullPath
}
}
return doc, nil
}
// CreateLCOW creates an HCS compute system representing a utility VM. It
// consumes a set of options derived from various defaults and options
// expressed as annotations.
func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error) {
ctx, span := oc.StartSpan(ctx, "uvm::CreateLCOW")
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
if opts.ID == "" {
g, err := guid.NewV4()
if err != nil {
return nil, err
}
opts.ID = g.String()
}
span.AddAttributes(trace.StringAttribute(logfields.UVMID, opts.ID))
log.G(ctx).WithField("options", fmt.Sprintf("%+v", opts)).Debug("uvm::CreateLCOW options")
// We dont serialize OutputHandler so if it is missing we need to put it back to the default.
if opts.OutputHandler == nil {
opts.OutputHandler = parseLogrus(opts.ID)
}
uvm := &UtilityVM{
id: opts.ID,
owner: opts.Owner,
operatingSystem: "linux",
scsiControllerCount: opts.SCSIControllerCount,
vpmemMaxCount: opts.VPMemDeviceCount,
vpmemMaxSizeBytes: opts.VPMemSizeBytes,
vpciDevices: make(map[VPCIDeviceKey]*VPCIDevice),
physicallyBacked: !opts.AllowOvercommit,
devicesPhysicallyBacked: opts.FullyPhysicallyBacked,
createOpts: opts,
vpmemMultiMapping: !opts.VPMemNoMultiMapping,
encryptScratch: opts.EnableScratchEncryption,
noWritableFileShares: opts.NoWritableFileShares,
}
defer func() {
if err != nil {
uvm.Close()
}
}()
// vpmemMaxCount has been set to 0 which means we are going to need multiple SCSI controllers
// to support lots of layers.
if osversion.Build() >= osversion.RS5 && uvm.vpmemMaxCount == 0 {
uvm.scsiControllerCount = 4
}
if err = verifyOptions(ctx, opts); err != nil {
return nil, errors.Wrap(err, errBadUVMOpts.Error())
}
// HCS config for SNP isolated vm is quite different to the usual case
var doc *hcsschema.ComputeSystem
if opts.SecurityPolicyEnabled {
doc, err = makeLCOWSecurityDoc(ctx, opts, uvm)
log.G(ctx).Tracef("create_lcow::CreateLCOW makeLCOWSecurityDoc result doc: %v err %v", doc, err)
} else {
doc, err = makeLCOWDoc(ctx, opts, uvm)
log.G(ctx).Tracef("create_lcow::CreateLCOW makeLCOWDoc result doc: %v err %v", doc, err)
}
if err != nil {
return nil, err
}
err = uvm.create(ctx, doc)
log.G(ctx).Tracef("create_lcow::CreateLCOW uvm.create result uvm: %v err %v", uvm, err)
if err != nil {
return nil, fmt.Errorf("error while creating the compute system: %s", err)
}
// Cerate a socket to inject entropy during boot.
uvm.entropyListener, err = uvm.listenVsock(entropyVsockPort)
if err != nil {
return nil, err
}
// Create a socket that the executed program can send to. This is usually
// used by GCS to send log data.
if opts.ForwardStdout || opts.ForwardStderr {
uvm.outputHandler = opts.OutputHandler
uvm.outputProcessingDone = make(chan struct{})
uvm.outputListener, err = uvm.listenVsock(linuxLogVsockPort)
if err != nil {
return nil, err
}
}
if opts.UseGuestConnection {
log.G(ctx).WithField("vmID", uvm.runtimeID).Debug("Using external GCS bridge")
l, err := uvm.listenVsock(gcs.LinuxGcsVsockPort)
if err != nil {
return nil, err
}
uvm.gcListener = l
}
uvm.ncProxyClientAddress = opts.NetworkConfigProxy
return uvm, nil
}
func (uvm *UtilityVM) listenVsock(port uint32) (net.Listener, error) {
return winio.ListenHvsock(&winio.HvsockAddr{
VMID: uvm.runtimeID,
ServiceID: winio.VsockServiceID(port),
})
}
| [
"\"ProgramFiles\""
]
| []
| [
"ProgramFiles"
]
| [] | ["ProgramFiles"] | go | 1 | 0 | |
py/selenium/webdriver/firefox/firefox_binary.py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from platform import system
from subprocess import Popen, STDOUT
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
import time
class FirefoxBinary:
NO_FOCUS_LIBRARY_NAME = "x_ignore_nofocus.so"
def __init__(self, firefox_path=None, log_file=None):
"""
Creates a new instance of Firefox binary.
:Args:
- firefox_path - Path to the Firefox executable. By default, it will be detected from the standard locations.
- log_file - A file object to redirect the firefox process output to. It can be sys.stdout.
Please note that with parallel run the output won't be synchronous.
By default, it will be redirected to /dev/null.
"""
self._start_cmd = firefox_path
# We used to default to subprocess.PIPE instead of /dev/null, but after
# a while the pipe would fill up and Firefox would freeze.
self._log_file = log_file or open(os.devnull, "wb")
self.command_line = None
self.platform = system().lower()
if not self._start_cmd:
self._start_cmd = self._get_firefox_start_cmd()
if not self._start_cmd.strip():
raise WebDriverException(
"Failed to find firefox binary. You can set it by specifying "
"the path to 'firefox_binary':\n\nfrom "
"selenium.webdriver.firefox.firefox_binary import "
"FirefoxBinary\n\nbinary = "
"FirefoxBinary('/path/to/binary')\ndriver = "
"webdriver.Firefox(firefox_binary=binary)")
# Rather than modifying the environment of the calling Python process
# copy it and modify as needed.
self._firefox_env = os.environ.copy()
self._firefox_env["MOZ_CRASHREPORTER_DISABLE"] = "1"
self._firefox_env["MOZ_NO_REMOTE"] = "1"
self._firefox_env["NO_EM_RESTART"] = "1"
def add_command_line_options(self, *args):
self.command_line = args
def launch_browser(self, profile, timeout=30):
"""Launches the browser for the given profile name.
It is assumed the profile already exists.
"""
self.profile = profile
self._start_from_profile_path(self.profile.path)
self._wait_until_connectable(timeout=timeout)
def kill(self):
"""Kill the browser.
This is useful when the browser is stuck.
"""
if self.process:
self.process.kill()
self.process.wait()
def _start_from_profile_path(self, path):
self._firefox_env["XRE_PROFILE_PATH"] = path
if self.platform == 'linux':
self._modify_link_library_path()
command = [self._start_cmd, "-foreground"]
if self.command_line:
for cli in self.command_line:
command.append(cli)
self.process = Popen(
command, stdout=self._log_file, stderr=STDOUT,
env=self._firefox_env)
def _wait_until_connectable(self, timeout=30):
"""Blocks until the extension is connectable in the firefox."""
count = 0
while not utils.is_connectable(self.profile.port):
if self.process.poll():
# Browser has exited
raise WebDriverException(
"The browser appears to have exited "
"before we could connect. If you specified a log_file in "
"the FirefoxBinary constructor, check it for details.")
if count >= timeout:
self.kill()
raise WebDriverException(
"Can't load the profile. Possible firefox version mismatch. "
"You must use GeckoDriver instead for Firefox 48+. Profile "
"Dir: %s If you specified a log_file in the "
"FirefoxBinary constructor, check it for details."
% (self.profile.path))
count += 1
time.sleep(1)
return True
def _find_exe_in_registry(self):
try:
from _winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER
except ImportError:
from winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER
import shlex
keys = (r"SOFTWARE\Classes\FirefoxHTML\shell\open\command",
r"SOFTWARE\Classes\Applications\firefox.exe\shell\open\command")
command = ""
for path in keys:
try:
key = OpenKey(HKEY_LOCAL_MACHINE, path)
command = QueryValue(key, "")
break
except OSError:
try:
key = OpenKey(HKEY_CURRENT_USER, path)
command = QueryValue(key, "")
break
except OSError:
pass
else:
return ""
if not command:
return ""
return shlex.split(command)[0]
def _get_firefox_start_cmd(self):
"""Return the command to start firefox."""
start_cmd = ""
if self.platform == "darwin": # small darwin due to lower() in self.platform
start_cmd = "/Applications/Firefox.app/Contents/MacOS/firefox-bin"
# fallback to homebrew installation for mac users
if not os.path.exists(start_cmd):
start_cmd = os.path.expanduser("~") + start_cmd
elif self.platform == "windows": # same
start_cmd = (self._find_exe_in_registry() or self._default_windows_location())
elif self.platform == 'java' and os._name == 'nt':
start_cmd = self._default_windows_location()
else:
for ffname in ["firefox", "iceweasel"]:
start_cmd = self.which(ffname)
if start_cmd:
break
else:
# couldn't find firefox on the system path
raise RuntimeError(
"Could not find firefox in your system PATH."
" Please specify the firefox binary location or install firefox")
return start_cmd
def _default_windows_location(self):
program_files = [os.getenv("PROGRAMFILES", r"C:\Program Files"),
os.getenv("PROGRAMFILES(X86)", r"C:\Program Files (x86)")]
for path in program_files:
binary_path = os.path.join(path, r"Mozilla Firefox\firefox.exe")
if os.access(binary_path, os.X_OK):
return binary_path
return ""
def _modify_link_library_path(self):
existing_ld_lib_path = os.environ.get('LD_LIBRARY_PATH', '')
new_ld_lib_path = self._extract_and_check(
self.profile, self.NO_FOCUS_LIBRARY_NAME, "x86", "amd64")
new_ld_lib_path += existing_ld_lib_path
self._firefox_env["LD_LIBRARY_PATH"] = new_ld_lib_path
self._firefox_env['LD_PRELOAD'] = self.NO_FOCUS_LIBRARY_NAME
def _extract_and_check(self, profile, no_focus_so_name, x86, amd64):
paths = [x86, amd64]
built_path = ""
for path in paths:
library_path = os.path.join(profile.path, path)
if not os.path.exists(library_path):
os.makedirs(library_path)
import shutil
shutil.copy(os.path.join(
os.path.dirname(__file__),
path,
self.NO_FOCUS_LIBRARY_NAME),
library_path)
built_path += library_path + ":"
return built_path
def which(self, fname):
"""Returns the fully qualified path by searching Path of the given
name"""
for pe in os.environ['PATH'].split(os.pathsep):
checkname = os.path.join(pe, fname)
if os.access(checkname, os.X_OK) and not os.path.isdir(checkname):
return checkname
return None
| []
| []
| [
"PROGRAMFILES(X8",
"LD_LIBRARY_PATH",
"PATH",
"PROGRAMFILES"
]
| [] | ["PROGRAMFILES(X8", "LD_LIBRARY_PATH", "PATH", "PROGRAMFILES"] | python | 4 | 0 | |
test/infrastructure/docker/exp/api/v1alpha4/dockermachinepool_types.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha4
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha4"
)
const (
// MachinePoolFinalizer allows ReconcileDockerMachinePool to clean up resources.
MachinePoolFinalizer = "dockermachinepool.infrastructure.cluster.x-k8s.io"
)
// DockerMachineTemplate defines the desired state of DockerMachine.
type DockerMachineTemplate struct {
// CustomImage allows customizing the container image that is used for
// running the machine
// +optional
CustomImage string `json:"customImage,omitempty"`
// PreLoadImages allows to pre-load images in a newly created machine. This can be used to
// speed up tests by avoiding e.g. to download CNI images on all the containers.
// +optional
PreLoadImages []string `json:"preLoadImages,omitempty"`
// ExtraMounts describes additional mount points for the node container
// These may be used to bind a hostPath
// +optional
ExtraMounts []infrav1.Mount `json:"extraMounts,omitempty"`
}
// DockerMachinePoolSpec defines the desired state of DockerMachinePool.
type DockerMachinePoolSpec struct {
// Template contains the details used to build a replica machine within the Machine Pool
// +optional
Template DockerMachineTemplate `json:"template"`
// ProviderID is the identification ID of the Machine Pool
// +optional
ProviderID string `json:"providerID,omitempty"`
// ProviderIDList is the list of identification IDs of machine instances managed by this Machine Pool
//+optional
ProviderIDList []string `json:"providerIDList,omitempty"`
}
// DockerMachinePoolStatus defines the observed state of DockerMachinePool.
type DockerMachinePoolStatus struct {
// Ready denotes that the machine pool is ready
// +optional
Ready bool `json:"ready"`
// Replicas is the most recently observed number of replicas.
// +optional
Replicas int32 `json:"replicas"`
// The generation observed by the deployment controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Instances contains the status for each instance in the pool
// +optional
Instances []DockerMachinePoolInstanceStatus `json:"instances,omitempty"`
// Conditions defines current service state of the DockerMachinePool.
// +optional
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}
type DockerMachinePoolInstanceStatus struct {
// Addresses contains the associated addresses for the docker machine.
// +optional
Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"`
// InstanceName is the identification of the Machine Instance within the Machine Pool
InstanceName string `json:"instanceName,omitempty"`
// ProviderID is the provider identification of the Machine Pool Instance
// +optional
ProviderID *string `json:"providerID,omitempty"`
// Version defines the Kubernetes version for the Machine Instance
// +optional
Version *string `json:"version,omitempty"`
// Ready denotes that the machine (docker container) is ready
// +optional
Ready bool `json:"ready"`
// Bootstrapped is true when the kubeadm bootstrapping has been run
// against this machine
// +optional
Bootstrapped bool `json:"bootstrapped,omitempty"`
}
// +kubebuilder:resource:path=dockermachinepools,scope=Namespaced,categories=cluster-api
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// DockerMachinePool is the Schema for the dockermachinepools API.
type DockerMachinePool struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DockerMachinePoolSpec `json:"spec,omitempty"`
Status DockerMachinePoolStatus `json:"status,omitempty"`
}
// GetConditions returns the set of conditions for this object.
func (c *DockerMachinePool) GetConditions() clusterv1.Conditions {
return c.Status.Conditions
}
// SetConditions sets the conditions on this object.
func (c *DockerMachinePool) SetConditions(conditions clusterv1.Conditions) {
c.Status.Conditions = conditions
}
// +kubebuilder:object:root=true
// DockerMachinePoolList contains a list of DockerMachinePool.
type DockerMachinePoolList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DockerMachinePool `json:"items"`
}
func init() {
SchemeBuilder.Register(&DockerMachinePool{}, &DockerMachinePoolList{})
}
| []
| []
| []
| [] | [] | go | null | null | null |
Packs/DeprecatedContent/Integrations/Cymon/Cymon.py | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
import os
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
if not demisto.params().get('useProxy', False):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
''' GLOBAL VARS '''
SERVER_URL_V1 = 'https://www.cymon.io:443/api/nexus/v1'
SERVER_DASHBOARD_URL_V1 = 'https://www.cymon.io:443/api/dashboard/v1'
SERVER_URL_V2 = 'https://api.cymon.io/v2/ioc/search'
VERIFY_CERTIFICATES = False if demisto.params().get('unsecure') else True
DEFAULT_HEADERS = {
"Content-Type": "application/json"
}
''' HELPER FUNCTIONS '''
def cymon_says():
return_error('Cymon service discontinued. Please disable or delete the integration instance.')
def http_request(method, url, headers):
try:
res = requests.request(method,
url,
verify=VERIFY_CERTIFICATES,
headers=headers)
if res.status_code == 200:
return res.json()
# 204 HTTP status code is returned when api rate limit has been exceeded
elif res.status_code == 204:
return_error("You've reached your API call quota.")
elif res.status_code == 404:
return {}
res.raise_for_status()
except Exception as e:
raise (e)
''' DOMAIN COMMAND '''
# def get_domain_full_report(domain):
# report_results = []
#
# from_param = 0
# size_param = 10
# total = None
#
# url = '{}/{}/{}?from={}&size={}'.format(SERVER_URL_V2, 'domain', domain, from_param, size_param)
#
# while total is None or total > from_param:
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# hits = response.get('hits', [])
# for hit in hits:
# timestamp = datetime.strptime(
# hit.get('timestamp', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")),
# '%Y-%m-%dT%H:%M:%S.%fZ')
#
# report_results.append({
# 'Title': hit.get('title', "").title(),
# 'Feed': hit.get('feed'),
# 'Timestamp': timestamp.strftime("%Y-%m-%d %H:%M:%S"),
# # Formatting the timestamp to human readable date and time
# 'Tags': hit.get('tags'),
# 'Hostname': hit.get('ioc', {}).get('hostname'),
# 'IP': hit.get('ioc', {}).get('ip'),
# 'Domain': hit.get('ioc', {}).get('domain'),
# 'Reported By': hit.get('reported_by'),
# 'Location': hit.get('location', {}).get('country')
# })
#
# from_param = from_param + size_param
# total = int(response.get('total', 0))
#
# url = '{}/{}/{}?from={}&size={}'.format(SERVER_URL_V2, 'domain', domain, from_param, size_param)
#
# return report_results
# def get_domain_report(domain_full_report):
# reports = {} # type:dict
#
# for report in domain_full_report:
# title = report.get('Title')
# timestamp = datetime.strptime(
# report.get('Timestamp', datetime.now().strftime("%Y-%m-%d %H:%M:%S")), '%Y-%m-%d %H:%M:%S')
#
# if (title in reports and reports.get(title).get('Timestamp') < timestamp) or title not in reports: # type: ignore
# reports.update({title: {
# 'Feed': report.get('Feed'),
# 'Timestamp': timestamp,
# 'Tags': report.get('Tags'),
# 'Hostname': report.get('Hostname'),
# 'IP': report.get('IP'),
# 'Domain': report.get('Domain'),
# 'Reported By': report.get('Reported By'),
# 'Location': report.get('Location')
# }})
#
# report_results = []
#
# for report in reports:
# report_results.append({
# 'Title': report,
# 'Feed': reports.get(report).get('Feed'), # type: ignore
# 'Timestamp': reports.get(report).get('Timestamp').strftime("%Y-%m-%d %H:%M:%S"), # type: ignore
# # Formatting the timestamp to human readable date and time
# 'Tags': reports.get(report).get('Tags'), # type: ignore
# 'Hostname': reports.get(report).get('Hostname'), # type: ignore
# 'IP': reports.get(report).get('IP'), # type: ignore
# 'Domain': reports.get(report).get('Domain'), # type: ignore
# 'Reported By': reports.get(report).get('Reported By'), # type: ignore
# 'Location': reports.get(report).get('Location') # type: ignore
# })
#
# return {
# 'reports': report_results,
# 'total': len(domain_full_report)
# }
# def create_domain_command_markdown(domain, total_hits, reports, domain_full_report, is_full_response):
# md = '## Cymon Domain report for: {}\n'.format(domain)
#
# md += '\n'
#
# md += '**Total Hits:** {}'.format(total_hits)
#
# md += '\n'
#
# md += tableToMarkdown("The following reports are the latest malicious hits resolved to the given domain:", reports,
# ['Title', 'Hostname', 'IP', 'Timestamp', 'Feed', 'Tags', 'Location', 'Reported By', 'Domain'])
#
# if is_full_response:
# md += tableToMarkdown("Full report list:", domain_full_report,
# ['Title', 'Hostname', 'IP', 'Timestamp', 'Feed', 'Tags', 'Location', 'Reported By',
# 'Domain'])
#
# return md
# def create_context_domain_command(domain, reports):
# cymon_domain_context_activities = []
# description = 'Reported suspicious activities: '
#
# for report in reports:
# cymon_domain_context_activities.append({
# 'Title': report.get('Title'),
# 'Tags': report.get('Tags'),
# 'Time': report.get('Timestamp'),
# 'Hostname': report.get('Hostname'),
# 'IP': report.get('IP')
# })
#
# description += '{}, '.format(report.get('Title'))
#
# description = description[:-2]
#
# context = {
# outputPaths['domain']: {
# 'Name': domain,
# 'Malicious': {
# 'Vendor': 'Cymon',
# 'Description': description
# }
# },
# 'Cymon': {
# 'Domain': {
# 'Activities': cymon_domain_context_activities
# }
# }
# }
#
# return context
# def get_domain_report_command():
# args = demisto.args()
#
# domain = args.get('domain')
# is_full_response = args.get('fullResponse') == 'true'
#
# domain_full_report = get_domain_full_report(domain)
# domain_summarized_report = get_domain_report(domain_full_report)
#
# if len(domain_full_report) == 0:
# return "Domain " + domain + " is not in Cymons's dataset"
#
# markdown = create_domain_command_markdown(domain, domain_summarized_report.get('total'),
# domain_summarized_report.get('reports'), domain_full_report,
# is_full_response)
# context = create_context_domain_command(domain, domain_summarized_report.get('reports'))
#
# return {
# 'Type': entryTypes['note'],
# 'Contents': domain_full_report,
# 'ContentsFormat': formats['json'],
# 'HumanReadable': markdown,
# 'EntryContext': context
# }
''' IP COMMAND '''
# def get_ip_events_sources(ip):
# url = '{}/{}/{}'.format(SERVER_URL_V1, 'ip', ip)
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# return response.get('sources', None)
# def get_ip_events(ip):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'events', 100)
# events = {} # type:dict
#
# next_link = url
#
# while next_link is not None:
# response = http_request('GET', next_link, DEFAULT_HEADERS)
#
# for event in response.get('results', []):
# tag = event.get('tag')
# date = datetime.strptime(
# event.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")), '%Y-%m-%dT%H:%M:%SZ')
#
# if (tag in events and events[tag] < date) or tag not in events:
# events.update({tag: date})
#
# next_link = response.get('next')
#
# for event in events:
# events[event] = events[event].strftime(
# "%Y-%m-%d %H:%M:%S") # Formatting the timestamp to human readable date and time
#
# return events
# def get_ip_location(ip):
# url = '{}/{}/{}'.format(SERVER_DASHBOARD_URL_V1, 'geolocation', ip)
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# lon = response.get('longitude', None)
# lat = response.get('latitude', None)
#
# if not lon or not lat:
# return {}
# else:
# return {
# 'lon': lon,
# 'lat': lat
# }
# def get_ip_domains(ip, max_len):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'domains', max_len)
# domains = []
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# for domain in response.get('results', []):
# date = datetime.strptime(
# domain.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")), '%Y-%m-%dT%H:%M:%SZ')
#
# domains.append({'Hostname': domain.get('name'),
# 'Last Resolved': date.strftime("%Y-%m-%d %H:%M:%S")})
#
# return domains
# def get_ip_urls(ip, max_len):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'urls', max_len)
# urls = {} # type:dict
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# for response_url in response.get('results', []):
# url = response_url.get('location')
# if url.endswith("/"):
# url = url[:-1]
#
# date = datetime.strptime(
# response_url.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")),
# '%Y-%m-%dT%H:%M:%SZ')
#
# if (url in urls and urls[url] < date) or url not in urls:
# urls.update({url: date})
#
# urls_result = []
# for url in urls:
# urls_result.append({'Url': url, "Last Resolved": urls[url].strftime(
# "%Y-%m-%d %H:%M:%S")}) # Formatting the timestamp to human readable date and time
#
# return urls_result
# def get_ip_asn(ip):
# url = '{}/{}/{}'.format(SERVER_DASHBOARD_URL_V1, 'ipwhois', ip)
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# asn = response.get('asn')
# asn_country_code = response.get('asn_country_code')
#
# if not asn or not asn_country_code:
# return {}
# else:
# return {
# 'asn': asn,
# 'country': asn_country_code
# }
# def create_ip_command_markdown(ip, sources, events, domains, urls, asn):
# md = '## Cymon IP report for: {}\n'.format(ip)
#
# if asn:
# md += 'ASN: **{}** ({})\n'.format(asn.get('asn'), asn.get('country'))
#
# md += '\n'
#
# if events:
# md += '### Reports\n'
# for event in events:
# md += '**{}** (Last reported on: {})\n'.format(event.title(), events[event])
#
# if sources:
# md += '#### Sources\n'
# for source in sources:
# md += '{}\n'.format(source)
#
# if domains and len(domains) > 0:
# md += tableToMarkdown("The following domains were resolved to the given IP address:", domains)
#
# if urls and len(urls) > 0:
# md += tableToMarkdown("The following urls were resolved to the given IP address:", urls)
#
# return md
# def create_ip_command_context(ip, asn, events, domains):
# if events:
# description = 'Reported suspicious activities: '
#
# for event in events:
# description += '{}, '.format(event)
#
# description = description[:-2]
# else:
# description = 'No suspicious activities were reported'
#
# asn_in_context = {} # type:dict
#
# if asn:
# asn_in_context = {
# 'ASN': asn.get('asn'),
# 'Geo': {
# 'Country': asn.get('country')
# }
# }
#
# context = {'Cymon': {
# 'IP': {
# 'Domains': domains
# }
# }, outputPaths['ip']: {
# 'Address': ip,
# 'Malicious': {
# 'Vendor': 'Cymon',
# 'Description': description
# }
# }}
#
# context[outputPaths['ip']].update(asn_in_context)
#
# return context
# def get_ip_report_command():
# args = demisto.args()
#
# full_response = args.get('fullResponse') == 'true'
#
# ip = args.get('ip')
# if not is_ip_valid(ip):
# return_error('An inalid IP was specified')
#
# sources = get_ip_events_sources(ip)
#
# if not sources:
# return "IP " + ip + " is not in Cymons's dataset"
#
# if full_response:
# max_len = 1000
# else:
# max_len = 50
#
# events = get_ip_events(ip)
# location = get_ip_location(ip)
# domains = get_ip_domains(ip, max_len)
# urls = get_ip_urls(ip, max_len)
# asn = get_ip_asn(ip)
#
# markdown = create_ip_command_markdown(ip, sources, events, domains, urls, asn)
# context = create_ip_command_context(ip, asn, events, domains)
#
# return [
# {
# 'Type': entryTypes['map'],
# 'Contents': {
# 'lat': float(location.get('lat')),
# 'lng': float(location.get('lon'))
# },
# 'ContentsFormat': formats['json']
# },
# {
# 'Type': entryTypes['note'],
# 'Contents': {
# 'events': events,
# 'sources': sources,
# 'location': location,
# 'domains': domains,
# 'urls': urls,
# 'asn': asn
# },
# 'HumanReadable': markdown,
# 'EntryContext': context,
# 'ContentsFormat': formats['json']
# }]
''' EXECUTION CODE '''
try:
command = demisto.command()
if command == 'test-module':
demisto.results('Cymon has been Deprecated and is no longer in service. Please delete the instance.')
elif command == 'ip':
cymon_says()
elif command == 'domain':
cymon_says()
except Exception as e:
raise
| []
| []
| [
"HTTP_PROXY",
"HTTPS_PROXY",
"http_proxy",
"https_proxy"
]
| [] | ["HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy"] | python | 4 | 0 | |
exporter/awsxrayexporter/xray_client.go | // Copyright 2019, OpenTelemetry Authors
// Portions of this file Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package awsxrayexporter
import (
"os"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/xray"
"go.opentelemetry.io/collector/component"
"go.uber.org/zap"
)
var collectorDistribution = "opentelemetry-collector-contrib"
// XRay defines X-Ray api call structure.
type XRay interface {
PutTraceSegments(input *xray.PutTraceSegmentsInput) (*xray.PutTraceSegmentsOutput, error)
PutTelemetryRecords(input *xray.PutTelemetryRecordsInput) (*xray.PutTelemetryRecordsOutput, error)
}
// XRayClient represents X-Ray client.
type XRayClient struct {
xRay *xray.XRay
}
// PutTraceSegments makes PutTraceSegments api call on X-Ray client.
func (c *XRayClient) PutTraceSegments(input *xray.PutTraceSegmentsInput) (*xray.PutTraceSegmentsOutput, error) {
return c.xRay.PutTraceSegments(input)
}
// PutTelemetryRecords makes PutTelemetryRecords api call on X-Ray client.
func (c *XRayClient) PutTelemetryRecords(input *xray.PutTelemetryRecordsInput) (*xray.PutTelemetryRecordsOutput, error) {
return c.xRay.PutTelemetryRecords(input)
}
// newXRay creates a new instance of the XRay client with a aws configuration and session .
func newXRay(logger *zap.Logger, awsConfig *aws.Config, buildInfo component.BuildInfo, s *session.Session) XRay {
x := xray.New(s, awsConfig)
logger.Debug("Using Endpoint: %s", zap.String("endpoint", x.Endpoint))
x.Handlers.Build.PushBackNamed(request.NamedHandler{
Name: "tracing.XRayVersionUserAgentHandler",
Fn: request.MakeAddToUserAgentHandler("xray", "1.0", os.Getenv("AWS_EXECUTION_ENV")),
})
x.Handlers.Build.PushFrontNamed(newCollectorUserAgentHandler(buildInfo))
x.Handlers.Sign.PushFrontNamed(request.NamedHandler{
Name: "tracing.TimestampHandler",
Fn: func(r *request.Request) {
r.HTTPRequest.Header.Set("X-Amzn-Xray-Timestamp",
strconv.FormatFloat(float64(time.Now().UnixNano())/float64(time.Second), 'f', 9, 64))
},
})
return &XRayClient{
xRay: x,
}
}
// IsTimeoutError checks whether error is timeout error.
func IsTimeoutError(err error) bool {
awsError, ok := err.(awserr.Error)
if ok {
if strings.Contains(awsError.Error(), "net/http: request canceled") {
return true
}
}
return false
}
func newCollectorUserAgentHandler(buildInfo component.BuildInfo) request.NamedHandler {
return request.NamedHandler{
Name: "otel.collector.UserAgentHandler",
Fn: request.MakeAddToUserAgentHandler(collectorDistribution, buildInfo.Version),
}
}
| [
"\"AWS_EXECUTION_ENV\""
]
| []
| [
"AWS_EXECUTION_ENV"
]
| [] | ["AWS_EXECUTION_ENV"] | go | 1 | 0 | |
main.go | package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/bendahl/uinput"
"github.com/godbus/dbus"
"github.com/mitchellh/go-homedir"
"github.com/muesli/streamdeck"
)
var (
deck *Deck
dbusConn *dbus.Conn
keyboard uinput.Keyboard
xorg *Xorg
recentWindows []Window
deckFile = flag.String("deck", "main.deck", "path to deck config file")
device = flag.String("device", "", "which device to use (serial number)")
brightness = flag.Uint("brightness", 80, "brightness in percent")
)
const (
longPressDuration = 350 * time.Millisecond
)
func fatal(v ...interface{}) {
fmt.Fprintln(os.Stderr, v...)
os.Exit(1)
}
func fatalf(format string, a ...interface{}) {
fmt.Fprintf(os.Stderr, format, a...)
os.Exit(1)
}
func expandPath(base, path string) (string, error) {
var err error
path, err = homedir.Expand(path)
if err != nil {
return "", err
}
if base == "" {
return path, nil
}
if !filepath.IsAbs(path) {
path = filepath.Join(base, path)
}
return filepath.Abs(path)
}
func eventLoop(dev *streamdeck.Device, tch chan interface{}) {
var keyStates sync.Map
keyTimestamps := make(map[uint8]time.Time)
kch, err := dev.ReadKeys()
if err != nil {
fatal(err)
}
for {
select {
case <-time.After(100 * time.Millisecond):
deck.updateWidgets(dev)
case k, ok := <-kch:
if !ok {
err = dev.Open()
if err != nil {
fatal(err)
}
continue
}
var state bool
if ks, ok := keyStates.Load(k.Index); ok {
state = ks.(bool)
}
keyStates.Store(k.Index, k.Pressed)
if state && !k.Pressed {
// key was released
if time.Since(keyTimestamps[k.Index]) < longPressDuration {
// fmt.Println("Triggering short action")
deck.triggerAction(dev, k.Index, false)
}
}
if !state && k.Pressed {
// key was pressed
go func() {
// launch timer to observe keystate
time.Sleep(longPressDuration)
if state, ok := keyStates.Load(k.Index); ok && state.(bool) {
// key still pressed
// fmt.Println("Triggering long action")
deck.triggerAction(dev, k.Index, true)
}
}()
}
keyTimestamps[k.Index] = time.Now()
case e := <-tch:
switch event := e.(type) {
case WindowClosedEvent:
handleWindowClosed(dev, event)
case ActiveWindowChangedEvent:
handleActiveWindowChanged(dev, event)
}
}
}
}
func initDevice() (*streamdeck.Device, error) {
d, err := streamdeck.Devices()
if err != nil {
fatal(err)
}
if len(d) == 0 {
return nil, fmt.Errorf("no Stream Deck devices found")
}
dev := d[0]
if len(*device) > 0 {
found := false
for _, v := range d {
if v.Serial == *device {
dev = v
found = true
break
}
}
if !found {
fmt.Println("Can't find device. Available devices:")
for _, v := range d {
fmt.Printf("Serial %s (%d buttons)\n", v.Serial, v.Columns*v.Rows)
}
os.Exit(1)
}
}
if err := dev.Open(); err != nil {
return nil, err
}
ver, err := dev.FirmwareVersion()
if err != nil {
return nil, err
}
fmt.Printf("Found device with serial %s (%d buttons, firmware %s)\n",
dev.Serial, dev.Columns*dev.Rows, ver)
if err := dev.Reset(); err != nil {
return nil, err
}
if *brightness > 100 {
*brightness = 100
}
if err = dev.SetBrightness(uint8(*brightness)); err != nil {
return nil, err
}
return &dev, nil
}
func main() {
flag.Parse()
// initialize device
dev, err := initDevice()
if err != nil {
fatal(err)
}
// initialize dbus connection
dbusConn, err = dbus.SessionBus()
if err != nil {
fatal(err)
}
// initialize xorg connection and track window focus
tch := make(chan interface{})
xorg, err = Connect(os.Getenv("DISPLAY"))
if err == nil {
defer xorg.Close()
xorg.TrackWindows(tch, time.Second)
} else {
fmt.Printf("Could not connect to X server: %s\n", err)
fmt.Println("Tracking window manager will be disabled!")
}
// initialize virtual keyboard
keyboard, err = uinput.CreateKeyboard("/dev/uinput", []byte("Deckmaster"))
if err != nil {
fmt.Printf("Could not create virtual input device (/dev/uinput): %s\n", err)
fmt.Println("Emulating keyboard events will be disabled!")
} else {
defer keyboard.Close() //nolint:errcheck
}
// load deck
deck, err = LoadDeck(dev, ".", *deckFile)
if err != nil {
fatal(err)
}
deck.updateWidgets(dev)
eventLoop(dev, tch)
}
| [
"\"DISPLAY\""
]
| []
| [
"DISPLAY"
]
| [] | ["DISPLAY"] | go | 1 | 0 | |
store/main.go | package main
import (
"fmt"
"net/http"
"time"
"os"
"strings"
"github.com/garyburd/redigo/redis"
"github.com/satori/go.uuid"
)
var redisConn redis.Conn
var contentDir string = "./app"
func init() {
redisAddr := os.Getenv("DATA_CACHE_HOST")
if redisAddr == "" {
redisAddr = "127.0.0.1"
}
url := fmt.Sprintf("redis://%s:6379/0", redisAddr)
var err error
redisConn, err = redis.DialURL(url)
if err != nil {
fmt.Printf("Failed to reach redis - %s\n", err)
os.Exit(1)
}
if len(os.Args) > 1 {
contentDir = os.Args[1]
}
}
func main() {
addr := "0.0.0.0:8080"
serveStore(addr)
}
func serveStore(listenAddr string) {
// '/store' for our '/store' routing
http.Handle("/store/buy", logHandler(buyHandler())) // says bought
http.Handle("/store", logHandler(http.StripPrefix("/store", http.FileServer(http.Dir(contentDir))))) // serve static files
http.Handle("/buy", logHandler(buyHandler())) // says bought
http.Handle("/", logHandler(http.FileServer(http.Dir(contentDir)))) // serve static files
// start server
fmt.Printf("Starting server on %s...\n", listenAddr)
err := http.ListenAndServe(listenAddr, nil)
if err != nil {
fmt.Printf("Failed to start server - %s\n", err)
}
}
// buy's handler (POST only)
func buyHandler() http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
if req.Method != "POST" {
rw.WriteHeader(http.StatusNotFound)
return
}
buy(rw, req)
})
}
// buy says bought
func buy(rw http.ResponseWriter, req *http.Request) {
// _, err := redisConn.Do("Set", uuid.NewV4(), "sold")
_, err := redisConn.Do("RPUSH", "sold", uuid.NewV4())
if err != nil {
rw.WriteHeader(500)
rw.Write([]byte("Something broke!\n"))
return
}
rw.WriteHeader(http.StatusOK)
rw.Write([]byte("Consider it bought!\n"))
}
// logging middleware
func logHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
start := time.Now()
writer := &LogRW{rw, 0, 0}
h.ServeHTTP(writer, req)
// if writer.status != http.StatusOK {
if writer.status == http.StatusNotFound {
writer.CustomErr(rw, req, writer.status)
}
remoteAddr := req.RemoteAddr
if fwdFor := req.Header.Get("X-Forwarded-For"); len(fwdFor) > 0 {
// get actual remote (last is oldest remote addr)
fwds := strings.Split(string(fwdFor), ",")
remoteAddr = strings.Trim(fwds[len(fwds)-1], " ")
}
fmt.Printf("%s %s %s%s %s %d(%d) - %s [User-Agent: %s] (%s)\n",
time.Now().Format(time.RFC3339), req.Method, req.Host, req.RequestURI,
req.Proto, writer.status, writer.wrote, remoteAddr,
req.Header.Get("User-Agent"), time.Since(start))
})
}
// LogRW is provides the logging functionality i've always wanted, giving access
// to the number bytes written, as well as the status. (I try to always writeheader
// prior to write, so status works fine for me)
type LogRW struct {
http.ResponseWriter
status int
wrote int
}
// WriteHeader matches the response writer interface, and stores the status
func (n *LogRW) WriteHeader(status int) {
n.status = status
// http.FileServer and its (http.)Error() function will write text/plain headers
// which cause the browser to not render the html from our custom error page.
// write 404 page to current url rather than redirect so refreshing the page will
// work properly (if the page becomes available later)
if status != 404 {
n.ResponseWriter.WriteHeader(status)
}
}
// Write matches the response writer interface, and stores the number of bytes written
func (n *LogRW) Write(p []byte) (int, error) {
if n.status == http.StatusNotFound {
n.wrote = len(p)
return n.wrote, nil
}
wrote, err := n.ResponseWriter.Write(p)
n.wrote = wrote
return wrote, err
}
// CustomErr allows us to write a custom error file to the user. It is part of
// LogRW so we can track the bytes written.
func (n *LogRW) CustomErr(w http.ResponseWriter, r *http.Request, status int) {
if status == http.StatusNotFound {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusNotFound)
n.wrote, _ = w.Write([]byte("Not found\n"))
}
}
| [
"\"DATA_CACHE_HOST\""
]
| []
| [
"DATA_CACHE_HOST"
]
| [] | ["DATA_CACHE_HOST"] | go | 1 | 0 | |
core/database/schema.go | package database
import (
"fmt"
"os"
"github.com/jmoiron/sqlx"
"github.com/lucasmarqs/stonks/core/database/migrations"
_ "github.com/mattn/go-sqlite3"
migrate "github.com/rubenv/sql-migrate"
)
var datasourceName = fmt.Sprintf("%s/.stonks.db", os.Getenv("HOME"))
var db *sqlx.DB
func migrateDB() error {
migrations := &migrate.MemoryMigrationSource{
Migrations: []*migrate.Migration{
&migrate.Migration{
Id: "01",
Up: migrations.Up_01_create_stocks,
},
&migrate.Migration{
Id: "02",
Up: migrations.Up_02_create_entries,
},
&migrate.Migration{
Id: "03",
Up: migrations.Up_03_create_reports,
},
},
}
if db == nil {
EstablishConnection()
}
n, err := migrate.Exec(db.DB, "sqlite3", migrations, migrate.Up)
if err != nil {
return err
}
if n > 0 {
fmt.Printf("applied %d migrations!\n", n)
}
return nil
}
// EstablishConnection attempts to create a new connection of SQLite database.
// It panics if connection or migration fail.
func EstablishConnection() *sqlx.DB {
if db != nil {
return db
}
db = sqlx.MustConnect("sqlite3", datasourceName)
if err := migrateDB(); err != nil {
panic(err)
}
return db
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
storage/s3-sdk/noxfile_config.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
# We are reaching maximum number of HMAC keys on the service account.
# We change the service account based on the value of
# RUN_TESTS_SESSION. The reason we can not use multiple project is
# that our new projects are enforced to have
# 'constraints/iam.disableServiceAccountKeyCreation' policy.
def get_service_account_email():
session = os.environ.get('RUN_TESTS_SESSION')
if session == 'py-3.6':
return ('py36-storage-test@'
'python-docs-samples-tests.iam.gserviceaccount.com')
if session == 'py-3.7':
return ('py37-storage-test@'
'python-docs-samples-tests.iam.gserviceaccount.com')
if session == 'py-3.8':
return ('py38-storage-test@'
'python-docs-samples-tests.iam.gserviceaccount.com')
return os.environ['HMAC_KEY_TEST_SERVICE_ACCOUNT']
TEST_CONFIG_OVERRIDE = {
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
'envs': {
'HMAC_KEY_TEST_SERVICE_ACCOUNT': get_service_account_email(),
# Some tests can not use multiple projects because of several reasons:
# 1. The new projects is enforced to have the
# 'constraints/iam.disableServiceAccountKeyCreation' policy.
# 2. The new projects buckets need to have universal permission model.
# For those tests, we'll use the original project.
'MAIN_GOOGLE_CLOUD_PROJECT': 'python-docs-samples-tests'
},
}
| []
| []
| [
"RUN_TESTS_SESSION",
"HMAC_KEY_TEST_SERVICE_ACCOUNT"
]
| [] | ["RUN_TESTS_SESSION", "HMAC_KEY_TEST_SERVICE_ACCOUNT"] | python | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.