filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tiny_kubernetes/kubernetes.py | # (C) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import base64
import json
import os
import tempfile
import dpath.util
import requests
import yaml
from dotmap import DotMap
CACERT_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'
TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
KUBE_CONFIG_PATH = os.environ.get('KUBECONFIG', '~/.kube/config')
KUBERNETES_SERVICE_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', 'kubernetes.default')
KUBERNETES_SERVICE_PORT = os.environ.get('KUBERNETES_SERVICE_PORT', '443')
KUBERNETES_API_URL = "https://{}:{}".format(KUBERNETES_SERVICE_HOST, KUBERNETES_SERVICE_PORT)
DEFAULT_TIMEOUT = 10
TEMP_CERTS = []
def write_temp_cert(encoded_data):
temp_handle, temp_path = tempfile.mkstemp('k8s-cert', text=True)
with os.fdopen(temp_handle, 'wb') as fout:
fout.write(base64.b64decode(encoded_data))
fout.close()
TEMP_CERTS.append(temp_path)
return temp_path
def load_current_kube_credentials():
with open(os.path.expanduser(KUBE_CONFIG_PATH), 'r') as f:
config = DotMap(yaml.safe_load(f))
ctx_name = config['current-context']
ctx = next(c for c in config.contexts if c.name == ctx_name)
cluster = next(c for c in config.clusters if c.name == ctx.context.cluster).cluster
if 'certificate-authority' in cluster:
ca_cert = cluster['certificate-authority']
elif 'certificate-authority-data' in cluster:
ca_cert = write_temp_cert(cluster['certificate-authority-data'])
else:
ca_cert = None
if ctx.context.user:
user = next(u for u in config.users if u.name == ctx.context.user).user
if 'token' in user:
return cluster.server, ca_cert, None, user['token']
else:
if 'client-certificate-data' in user:
client_cert = write_temp_cert(user['client-certificate-data'])
else:
client_cert = user['client-certificate']
if 'client-key-data' in user:
client_key = write_temp_cert(user['client-key-data'])
else:
client_key = user['client-key']
return cluster.server, ca_cert, (client_cert, client_key), None
else:
return cluster.server, ca_cert, None
def cleanup_temp_certs():
for cert in TEMP_CERTS:
os.unlink(cert)
atexit.register(cleanup_temp_certs)
class KubernetesAPIError(Exception):
pass
class KubernetesAPIResponse(DotMap):
def __init__(self, response, decoded=None):
super(KubernetesAPIResponse, self).__init__(decoded, _dynamic=False)
self.response = response
def get(self, glob, separator="/"):
return dpath.util.get(self, glob, separator)
def search(self, glob, yielded=False, separator="/", afilter=None, dirs=True):
return dpath.util.search(self, glob, yielded, separator, afilter, dirs)
def set(self, glob, value):
return dpath.util.set(self, glob, value)
def new(self, path, value):
return dpath.util.new(self, path, value)
@property
def status_code(self):
return self.response.status_code
class KubernetesAPIClient(object):
def __init__(self, verify=True):
self.session = requests.Session()
self.session.verify = verify
self.api_url = None
def load_cluster_config(self):
self.session.verify = CACERT_PATH
# requests will purge request Content-Type headers if we hit a
# redirect, so try to avoid running into one because of an extra /
self.api_url = KUBERNETES_API_URL.rstrip('/')
with open(TOKEN_PATH, 'r') as f:
token = f.read()
self.session.headers.update({
'Authorization': 'Bearer {}'.format(token)
})
def load_kube_config(self):
server, ca_cert, cert, token = load_current_kube_credentials()
self.api_url = server.rstrip('/')
self.session.verify = ca_cert
if cert is not None:
self.session.cert = cert
elif token is not None:
self.session.headers.update({
'Authorization': 'Bearer {}'.format(token)
})
if self.api_url.endswith('/'):
self.api_url = self.api_url.rstrip('/')
def load_auto_config(self):
if os.path.exists(os.path.expanduser(KUBE_CONFIG_PATH)):
self.load_kube_config()
elif os.path.exists(TOKEN_PATH):
self.load_cluster_config()
else:
raise KubernetesAPIError('No supported API configuration found!')
def request(self, method, path, *args, **kwargs):
raise_for_status = kwargs.pop('raise_for_status', True)
decode = kwargs.pop('decode', True)
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT)
if args:
path = path.format(*args)
slash = '' if path.startswith('/') else '/'
res = self.session.request(
method,
'{}{}{}'.format(self.api_url, slash, path),
timeout=timeout,
**kwargs)
if raise_for_status:
res.raise_for_status()
if decode:
return KubernetesAPIResponse(res, res.json())
else:
return KubernetesAPIResponse(res)
def get(self, path, *args, **kwargs):
kwargs.setdefault('allow_redirects', True)
return self.request('GET', path, *args, **kwargs)
def post(self, path, *args, **kwargs):
return self.request('POST', path, *args, **kwargs)
def delete(self, path, *args, **kwargs):
return self.request('DELETE', path, *args, **kwargs)
def patch(self, path, *args, **kwargs):
return self.request('PATCH', path, *args, **kwargs)
def json_patch(self, ops, path, *args, **kwargs):
if kwargs.get('allow_redirects') is True:
raise ValueError('Patch is not compatible with redirects!')
headers = kwargs.pop('headers', None)
if headers:
headers = headers.copy()
else:
headers = {}
headers['Content-Type'] = 'application/json-patch+json'
resp = self.patch(path,
data=json.dumps(ops), headers=headers,
allow_redirects=False,
*args, **kwargs)
if 300 <= resp.status_code < 400:
raise KubernetesAPIError('Encountered a redirect while sending a '
'PATCH, failing!')
return resp
@classmethod
def from_auto_config(cls, **kwargs):
c = KubernetesAPIClient(**kwargs)
c.load_auto_config()
return c
| []
| []
| [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT",
"KUBECONFIG"
]
| [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT", "KUBECONFIG"] | python | 3 | 0 | |
server.go | package main
import (
"os"
"strings"
"github.com/deiu/eth-auth/internal/server"
)
func main() {
conf := server.Config{}
if os.Getenv("INFURA_API_URL") != "" {
conf.InfuraURL = os.Getenv("INFURA_API_URL")
}
if os.Getenv("INFURA_API_KEY") != "" {
conf.InfuraKey = os.Getenv("INFURA_API_KEY")
}
if os.Getenv("ORIGINS") != "" {
conf.Origins = strings.Fields(os.Getenv("ORIGINS"))
}
if os.Getenv("ETH_PRIVKEY") != "" {
conf.JWTSecret = os.Getenv("ETH_PRIVKEY")
}
if os.Getenv("LOGGING") != "" {
conf.Logging = true
}
server.Listen(3000, conf)
}
| [
"\"INFURA_API_URL\"",
"\"INFURA_API_URL\"",
"\"INFURA_API_KEY\"",
"\"INFURA_API_KEY\"",
"\"ORIGINS\"",
"\"ORIGINS\"",
"\"ETH_PRIVKEY\"",
"\"ETH_PRIVKEY\"",
"\"LOGGING\""
]
| []
| [
"INFURA_API_KEY",
"ETH_PRIVKEY",
"ORIGINS",
"INFURA_API_URL",
"LOGGING"
]
| [] | ["INFURA_API_KEY", "ETH_PRIVKEY", "ORIGINS", "INFURA_API_URL", "LOGGING"] | go | 5 | 0 | |
torture_test.go | package govaluate_test
/*
Courtesy of abrander
ref: https://gist.github.com/abrander/fa05ae9b181b48ffe7afb12c961b6e90
*/
import (
"fmt"
"math/rand"
"os"
"testing"
"time"
)
var (
hello = "hello"
empty struct{}
empty2 *string
empty3 *int
values = []interface{}{
-1,
0,
12,
13,
"",
"hello",
&hello,
nil,
"nil",
empty,
empty2,
true,
false,
time.Now(),
rune('r'),
int64(34),
time.Duration(0),
"true",
"false",
"\ntrue\n",
"\nfalse\n",
"12",
"nil",
"arg1",
"arg2",
int(12),
int32(12),
int64(12),
complex(1.0, 1.0),
[]byte{0, 0, 0},
[]int{0, 0, 0},
[]string{},
"[]",
"{}",
"\"\"",
"\"12\"",
"\"hello\"",
".*",
"==",
"!=",
">",
">=",
"<",
"<=",
"=~",
"!~",
"in",
"&&",
"||",
"^",
"&",
"|",
">>",
"<<",
"+",
"-",
"*",
"/",
"%",
"**",
"-",
"!",
"~",
"?",
":",
"??",
"+",
"-",
"*",
"/",
"%",
"**",
"&",
"|",
"^",
">>",
"<<",
",",
"(",
")",
"[",
"]",
"\n",
"\000",
}
panics = 0
)
const (
ITERATIONS = 10000000
SEED = 1487873697990155515
)
func init() {
rand.Seed(SEED)
}
func TestPanics(test *testing.T) {
if os.Getenv("GOVALUATE_TORTURE_TEST") == "" {
test.Logf("'GOVALUATE_TORTURE_TEST' env var not set - skipping torture test.")
test.Skip()
return
}
fmt.Printf("Running %d torture test cases...\n", ITERATIONS)
for i := 0; i < ITERATIONS; i++ {
num := rand.Intn(3) + 2
expression := ""
for n := 0; n < num; n++ {
expression += fmt.Sprintf(" %s", getRandom(values))
}
checkPanic(expression, test)
}
test.Logf("Done. %d/%d panics.\n", panics, ITERATIONS)
if panics > 0 {
test.Fail()
}
}
func checkPanic(expression string, test *testing.T) {
parameters := make(map[string]interface{})
defer func() {
if r := recover(); r != nil {
test.Logf("Panic: \"%s\". Expression: \"%s\". Parameters: %+v\n", r, expression, parameters)
panics++
}
}()
eval, _ := NewEvaluableExpression(expression)
if eval == nil {
return
}
vars := eval.Vars()
for _, v := range vars {
parameters[v] = getRandom(values)
}
eval.Evaluate(parameters)
}
func getRandom(haystack []interface{}) interface{} {
i := rand.Intn(len(haystack))
return haystack[i]
}
| [
"\"GOVALUATE_TORTURE_TEST\""
]
| []
| [
"GOVALUATE_TORTURE_TEST"
]
| [] | ["GOVALUATE_TORTURE_TEST"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crud.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
arvinddhindsa/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'arvinddhindsa.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
locust/locustfile.py | import json
import random
import uuid
import time
import jwt
import urllib
import os
from locust import HttpLocust, TaskSet, task
from locust.exception import InterruptTaskSet, StopLocust
from locust.web import logger
an_endpoint_uri = "/some/uri/with/path/value/%i"
def queryGlobalMethodExample(self, value):
headers = {
"authId": self.locust.user_token,
"Content-Type": "application/json"
}
response = self.client.get(
an_endpoint_uri % value, name="/some/uri/with/path/value/[value]",
headers=headers)
if response.status_code == 200:
return response.json()
return None
class UserBehaviourTaskSet(TaskSet):
@task(1)
class ExampleOneTaskSet(TaskSet):
min_wait = 60000
max_wait = 60000
endpoint_1_uri = "/some/uri/with/path/value/%i"
def queryLocalMethodExample(self, value):
headers = {
"authId": self.locust.user_token,
"Content-Type": "application/json"
}
response = self.client.get(
self.endpoint_1_uri % value, name="/some/uri/with/path/value/[value]",
headers=headers)
if response.status_code == 200:
return response.json()
return None
@task(0) # integer defines the weight of this task in relation to the other tasks
def debug(self):
if not self.locust.user_token:
logger.warn("Aborting task - unexpected user token")
raise InterruptTaskSet(False)
@task(1)
def executeTestOne(self):
result1 = queryGlobalMethodExample(self, "some-value")
if result1 is not None:
logger.debug("result2: %s" % result1)
else:
logger.warn("Error occurred... aborting task.")
raise InterruptTaskSet(False)
@task(1)
def executeTestTwo(self):
result1 = self.queryLocalMethodExample("some-value")
if result1 is not None:
logger.debug("result1: %s" % result1)
else:
logger.warn("Unrecoverable error occurred!")
raise StopLocust("RIP...")
class UserBehaviourLocust(HttpLocust):
task_set = UserBehaviourTaskSet
user_token = None
def __init__(self, *args, **kwargs):
super(UserBehaviourLocust, self).__init__(*args, **kwargs)
self.user_token = os.environ['EXT_TOKEN']
if self.user_token is None:
raise StopLocust("Invalid token. Seppuku!!!")
logger.info("Locust [%s] is joining the swarm..." % self.user_id)
| []
| []
| [
"EXT_TOKEN"
]
| [] | ["EXT_TOKEN"] | python | 1 | 0 | |
internal/tools/terraform/plan.go | package terraform
import (
"encoding/json"
"fmt"
"os"
"strings"
model "github.com/mattermost/mattermost-apps/model"
"github.com/pkg/errors"
)
type terraformOutput struct {
Sensitive bool `json:"sensitive"`
Type string `json:"type"`
Value interface{} `json:"value"`
}
// Init invokes terraform init.
func (c *Cmd) Init(remoteKey string) error {
_, _, err := c.run(
"init",
arg("backend-config", fmt.Sprintf("bucket=%s", c.remoteStateBucket)),
arg("backend-config", fmt.Sprintf("key=%s", remoteKey)),
arg("backend-config", "region=us-east-1"),
)
if err != nil {
return errors.Wrap(err, "failed to invoke terraform init")
}
return nil
}
// Plan invokes terraform Plan.
func (c *Cmd) Plan(function model.Function) error {
_, _, err := c.run(
"plan",
arg("input", "false"),
arg("var", fmt.Sprintf("lambda_name=%s", function.Name)),
arg("var", fmt.Sprintf("lambda_file=%s", function.ZipFile)),
arg("var", fmt.Sprintf("environment=%s", function.Environment)),
arg("var", fmt.Sprintf("bundle_name=%s", function.BundleName)),
arg("var", fmt.Sprintf("handler=%s", function.Handler)),
arg("var", fmt.Sprintf("runtime=%s", function.Runtime)),
arg("var", fmt.Sprintf("private_subnet_ids=%s", os.Getenv("PrivateSubnetIDs"))),
)
if err != nil {
return errors.Wrap(err, "failed to invoke terraform plan")
}
return nil
}
// Apply invokes terraform apply.
func (c *Cmd) Apply(function model.Function) error {
_, _, err := c.run(
"apply",
arg("input", "false"),
arg("var", fmt.Sprintf("lambda_name=%s", function.Name)),
arg("var", fmt.Sprintf("lambda_file=%s", function.ZipFile)),
arg("var", fmt.Sprintf("environment=%s", function.Environment)),
arg("var", fmt.Sprintf("bundle_name=%s", function.BundleName)),
arg("var", fmt.Sprintf("handler=%s", function.Handler)),
arg("var", fmt.Sprintf("runtime=%s", function.Runtime)),
arg("var", fmt.Sprintf("private_subnet_ids=%s", os.Getenv("PrivateSubnetIDs"))),
arg("auto-approve"),
)
if err != nil {
return errors.Wrap(err, "failed to invoke terraform apply")
}
return nil
}
// ApplyTarget invokes terraform apply with the given target.
func (c *Cmd) ApplyTarget(target string) error {
_, _, err := c.run(
"apply",
arg("input", "false"),
arg("target", target),
arg("auto-approve"),
)
if err != nil {
return errors.Wrap(err, "failed to invoke terraform apply")
}
return nil
}
// Destroy invokes terraform destroy.
func (c *Cmd) Destroy() error {
_, _, err := c.run(
"destroy",
"-auto-approve",
)
if err != nil {
return errors.Wrap(err, "failed to invoke terraform destroy")
}
return nil
}
// Output invokes terraform output and returns the named value, true if it exists, and an empty
// string and false if it does not.
func (c *Cmd) Output(variable string) (string, bool, error) {
stdout, _, err := c.run(
"output",
"-json",
)
if err != nil {
return string(stdout), false, errors.Wrap(err, "failed to invoke terraform output")
}
var outputs map[string]terraformOutput
err = json.Unmarshal(stdout, &outputs)
if err != nil {
return string(stdout), false, errors.Wrap(err, "failed to parse terraform output")
}
value, ok := outputs[variable]
return fmt.Sprintf("%s", value.Value), ok, nil
}
// Version invokes terraform version and returns the value.
func (c *Cmd) Version() (string, error) {
stdout, _, err := c.run("version")
trimmed := strings.TrimSuffix(string(stdout), "\n")
if err != nil {
return trimmed, errors.Wrap(err, "failed to invoke terraform version")
}
return trimmed, nil
}
| [
"\"PrivateSubnetIDs\"",
"\"PrivateSubnetIDs\""
]
| []
| [
"PrivateSubnetIDs"
]
| [] | ["PrivateSubnetIDs"] | go | 1 | 0 | |
scripts/trainable/seq_modeling/elmosclstm.py |
#############################################
# USAGE
# CUDA_VISIBLE_DEVICES=1 python elmosclstm.py probword ../../data -1
# CUDA_VISIBLE_DEVICES=2 python elmosclstm.py bea40kfinetune ../../data -1
#
# CUDA_VISIBLE_DEVICES=1 python elmosclstm.py none ../../data 1
# CUDA_VISIBLE_DEVICES=1 python elmosclstm.py random ../../data 1
# CUDA_VISIBLE_DEVICES=1 python elmosclstm.py word ../../data 1
# CUDA_VISIBLE_DEVICES=1 python elmosclstm.py prob ../../data 1
# CUDA_VISIBLE_DEVICES=1 python elmosclstm.py probword ../../data 1
# CUDA_VISIBLE_DEVICES=1 python elmosclstm.py probword_v2 ../../data 1
#############################################
############################################
# TO-DO
# ----
# 1. How to set multip-gpu in torch for training
############################################
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/.")
# export CUDA_VISIBLE_DEVICES=1,2 && echo $CUDA_VISIBLE_DEVICES
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
from tqdm import tqdm
import numpy as np
import re
import time
from typing import List
import torch
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
DEVICE = 'cuda:0' if torch.cuda.is_available() else "cpu"
# torch.cuda.set_device(1)
# print(torch.cuda.current_device())
from helpers import progressBar
from helpers import load_vocab_dict, save_vocab_dict
from helpers import load_data, train_validation_split, get_char_tokens, get_tokens, num_unk_tokens
from helpers import batch_iter, labelize, tokenize, char_tokenize, sclstm_tokenize
from helpers import untokenize, untokenize_without_unks, untokenize_without_unks2, untokenize_without_unks3, get_model_nparams
from helpers import batch_accuracy_func
from helpers2 import get_line_representation, get_lines
from models import ElmoSCLSTM
from allennlp.modules.elmo import batch_to_ids as elmo_batch_to_ids
from evals import get_metrics
""" NEW: reranking snippets """
# (GPT/GPT-2/CTRL/Transformer-XL/XLNet)
import torch
from torch.nn import CrossEntropyLoss
HFACE_BATCH_SIZE = 8
RERANKER = "GPT-2" # GPT/GPT-2/CTRL/Transformer-XL/XLNet
if RERANKER=="GPT":
from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel
gpt2Tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
gpt2LMHeadModel = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
gpt2Tokenizer.add_special_tokens({'pad_token':"[PAD]"})
gpt2LMHeadModel.resize_token_embeddings(len(gpt2Tokenizer))
assert gpt2Tokenizer.pad_token == '[PAD]'
elif "GPT-2":
from transformers import GPT2Tokenizer, GPT2LMHeadModel
gpt2Tokenizer = GPT2Tokenizer.from_pretrained('gpt2-medium')
gpt2LMHeadModel = GPT2LMHeadModel.from_pretrained('gpt2-medium')
gpt2Tokenizer.pad_token = gpt2Tokenizer.eos_token
elif "Transformer-XL":
from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel
txlTokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
txlLMHeadModel = TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
txlTokenizer.pad_token = txlTokenizer.eos_token
else:
raise NotImplementedError
def get_losses_from_gpt_lm(this_sents: "list[str]", gpt2LMHeadModel, gpt2Tokenizer, DEVICE):
this_input_ids = gpt2Tokenizer.batch_encode_plus(this_sents, add_special_tokens=True, pad_to_max_length=True, add_space_before_punct_symbol=True)["input_ids"]
this_labels = torch.tensor([[i if i!=gpt2Tokenizer.pad_token_id else -100 for i in row] for row in this_input_ids]).to(DEVICE)
this_input_ids = torch.tensor(this_input_ids).to(DEVICE)
this_outputs = gpt2LMHeadModel(input_ids=this_input_ids)
this_lm_logits = this_outputs[0]
# Shift so that tokens < n predict n
shift_logits2 = this_lm_logits[:, :-1, :]
shift_labels2 = this_labels[:, 1:]
# Flatten the tokens
loss_fct = CrossEntropyLoss(reduction='none')
loss = loss_fct(shift_logits2.permute(0,2,1), shift_labels2)
losses = loss.sum(dim=-1).cpu().detach().numpy().tolist()
return losses
def get_losses_from_txl_lm(this_sents: "list[str]", txlLMHeadModel, txlTokenizer, DEVICE):
this_input_ids_dict = txlTokenizer.batch_encode_plus(this_sents, add_special_tokens=True, pad_to_max_length=True, add_space_before_punct_symbol=True)
this_input_ids = this_input_ids_dict["input_ids"]
chunks = [sum(val) for val in this_input_ids_dict["attention_mask"]]
chunks_cumsum = np.cumsum(chunks).tolist()
this_labels = torch.tensor([[i if i!=txlTokenizer.pad_token_id else -100 for i in row] for row in this_input_ids]).to(DEVICE)
this_input_ids = torch.tensor(this_input_ids).to(DEVICE)
this_outputs = txlLMHeadModel(input_ids=this_input_ids,labels=this_labels)
this_loss = this_outputs[0]
this_loss = this_loss.view(-1).cpu().detach().numpy()
losses = [sum(this_loss[str_pos:end_pos-1]) for str_pos,end_pos in zip([0]+chunks_cumsum[:-1],chunks_cumsum)]
return losses
def load_model(vocab, verbose=False):
model = ElmoSCLSTM(3*len(vocab["chartoken2idx"]),vocab["token2idx"][ vocab["pad_token"] ],len(vocab["token_freq"]))
if verbose:
print(model)
print( get_model_nparams(model) )
return model
def load_pretrained(model, CHECKPOINT_PATH, optimizer=None, device='cuda'):
if torch.cuda.is_available() and device != "cpu":
map_location = lambda storage, loc: storage.cuda()
else:
map_location = 'cpu'
print(f"Loading model params from checkpoint dir: {CHECKPOINT_PATH}")
checkpoint_data = torch.load(os.path.join(CHECKPOINT_PATH, "model.pth.tar"), map_location=map_location)
# print(f"previously model saved at : {checkpoint_data['epoch_id']}")
model.load_state_dict(checkpoint_data['model_state_dict'])
if optimizer is not None:
optimizer.load_state_dict(checkpoint_data['optimizer_state_dict'])
max_dev_acc, argmax_dev_acc = checkpoint_data["max_dev_acc"], checkpoint_data["argmax_dev_acc"]
print(f"previously, max_dev_acc: {max_dev_acc:.5f} and argmax_dev_acc: {argmax_dev_acc:.5f}")
if optimizer is not None:
return model, optimizer, max_dev_acc, argmax_dev_acc
return model
def model_predictions(model, data, vocab, DEVICE, BATCH_SIZE=16, backoff="pass-through"):
"""
model: an instance of ElmoSCLSTM
data: list of tuples, with each tuple consisting of correct and incorrect
sentence string (would be split at whitespaces)
"""
topk = 1
# print("###############################################")
# inference_st_time = time.time()
final_sentences = []
VALID_BATCH_SIZE = BATCH_SIZE
# print("data size: {}".format(len(data)))
data_iter = batch_iter(data, batch_size=VALID_BATCH_SIZE, shuffle=False)
model.eval()
model.to(DEVICE)
for batch_id, (batch_clean_sentences,batch_corrupt_sentences) in enumerate(data_iter):
# set batch data
batch_labels, batch_lengths = labelize(batch_clean_sentences, vocab)
batch_idxs, batch_lengths_ = sclstm_tokenize(batch_corrupt_sentences, vocab)
assert (batch_lengths_==batch_lengths).all()==True
batch_idxs = [batch_idxs_.to(DEVICE) for batch_idxs_ in batch_idxs]
batch_lengths = batch_lengths.to(DEVICE)
batch_labels = batch_labels.to(DEVICE)
batch_elmo_inp = elmo_batch_to_ids([line.split() for line in batch_corrupt_sentences]).to(DEVICE)
# forward
with torch.no_grad():
"""
NEW: batch_predictions can now be of shape (batch_size,batch_max_seq_len,topk) if topk>1, else (batch_size,batch_max_seq_len)
"""
_, batch_predictions = model(batch_idxs, batch_lengths, batch_elmo_inp, targets=batch_labels, topk=topk)
batch_predictions = untokenize_without_unks(batch_predictions, batch_lengths, vocab, batch_clean_sentences, backoff=backoff)
final_sentences.extend(batch_predictions)
# print("total inference time for this data is: {:4f} secs".format(time.time()-inference_st_time))
return final_sentences
def model_predictions_for_ui(model, data, vocab, DEVICE, BATCH_SIZE=16, backoff="pass-through", beam_search=True, topk=3):
"""
model: an instance of ElmoSCLSTM
data: list of tuples, with each tuple consisting of correct and incorrect
sentence string (would be split at whitespaces)
"""
assert len(data)==1, print(len(data))
if beam_search:
if topk<2:
raise Exception("when using beam_search, topk must be greater than 1, topk is used as beam width")
else:
print(f":: doing BEAM SEARCH with topk:{topk} ::")
else:
assert topk==1, print("if not beam_search, topk is set to 1 for UI-website purposes")
print(f"beam_search: {beam_search} and topk: {topk}")
print("data size: {}".format(len(data)))
final_sentences = []
VALID_BATCH_SIZE = BATCH_SIZE
data_iter = batch_iter(data, batch_size=VALID_BATCH_SIZE, shuffle=False)
model.eval()
model.to(DEVICE)
for batch_id, (batch_clean_sentences,batch_corrupt_sentences) in enumerate(data_iter):
# set batch data
batch_labels, batch_lengths = labelize(batch_clean_sentences, vocab)
batch_idxs, batch_lengths_ = sclstm_tokenize(batch_corrupt_sentences, vocab)
assert (batch_lengths_==batch_lengths).all()==True
batch_idxs = [batch_idxs_.to(DEVICE) for batch_idxs_ in batch_idxs]
batch_lengths = batch_lengths.to(DEVICE)
batch_labels = batch_labels.to(DEVICE)
batch_elmo_inp = elmo_batch_to_ids([line.split() for line in batch_corrupt_sentences]).to(DEVICE)
# forward
try:
with torch.no_grad():
if not beam_search:
"""
NEW: batch_predictions can now be of shape (batch_size,batch_max_seq_len,topk) if topk>1, else (batch_size,batch_max_seq_len) if topk==1
"""
_, batch_predictions = model(batch_idxs, batch_lengths, batch_elmo_inp, targets=batch_labels, topk=topk) # topk=1 or 5
else:
"""
NEW: batch_predictions can now be of shape (batch_size,batch_max_seq_len,topk) if topk==None
"""
_, batch_predictions, batch_predictions_probs = model(batch_idxs, batch_lengths, batch_elmo_inp, targets=batch_labels, topk=topk, beam_search=True)
except RuntimeError:
print(f"batch_idxs:{len(batch_idxs)},batch_lengths:{batch_lengths.shape},batch_elmo_inp:{batch_elmo_inp.shape},batch_labels:{batch_labels.shape}")
raise Exception("")
# based on beam_search, do either greedy topk or beam search for topk
if not beam_search:
batch_predictions = untokenize_without_unks(batch_predictions, batch_lengths, vocab, batch_clean_sentences, backoff=backoff)
final_sentences = batch_predictions # a list with single answer
else:
k_batch_predictions, k_batch_predictions_probs = untokenize_without_unks3(batch_predictions, batch_predictions_probs, batch_lengths, vocab, batch_clean_sentences, topk)
final_sentences = [x[0] for x in k_batch_predictions] # a list with multiple answers
print("*&$&%^$*^*&%")
print(final_sentences)
print("*&$&%^$*^*&%")
return final_sentences
def model_inference(model, data, topk, DEVICE, BATCH_SIZE=16, beam_search=False, selected_lines_file=None, vocab_=None):
"""
model: an instance of ElmoSCLSTM
data: list of tuples, with each tuple consisting of correct and incorrect
sentence string (would be split at whitespaces)
topk: how many of the topk softmax predictions are considered for metrics calculations
DEVICE: "cuda:0" or "cpu"
BATCH_SIZE: batch size for input to the model
beam_search: if True, greedy topk will not be performed
"""
if vocab_ is not None:
vocab = vocab_
if beam_search:
if topk<2:
raise Exception("when using beam_search, topk must be greater than 1, topk is used as beam width")
else:
print(f":: doing BEAM SEARCH with topk:{topk} ::")
if selected_lines_file is not None:
raise Exception("when using beam_search, ***selected_lines_file*** arg is not used; no implementation")
# list of dicts with keys {"id":, "original":, "noised":, "predicted":, "topk":, "topk_prediction_probs":, "topk_reranker_losses":,}
results = []
line_index = 0
inference_st_time = time.time()
VALID_BATCH_SIZE = BATCH_SIZE
valid_loss, valid_acc = 0., 0.
corr2corr, corr2incorr, incorr2corr, incorr2incorr = 0, 0, 0, 0
predictions = []
print("data size: {}".format(len(data)))
data_iter = batch_iter(data, batch_size=VALID_BATCH_SIZE, shuffle=False)
model.eval()
model.to(DEVICE)
for batch_id, (batch_clean_sentences,batch_corrupt_sentences) in tqdm(enumerate(data_iter)):
torch.cuda.empty_cache()
# st_time = time.time()
# set batch data
batch_labels, batch_lengths = labelize(batch_clean_sentences, vocab)
batch_idxs, batch_lengths_ = sclstm_tokenize(batch_corrupt_sentences, vocab)
assert (batch_lengths_==batch_lengths).all()==True
batch_idxs = [batch_idxs_.to(DEVICE) for batch_idxs_ in batch_idxs]
batch_lengths = batch_lengths.to(DEVICE)
batch_labels = batch_labels.to(DEVICE)
batch_elmo_inp = elmo_batch_to_ids([line.split() for line in batch_corrupt_sentences]).to(DEVICE)
# forward
try:
with torch.no_grad():
if not beam_search:
"""
NEW: batch_predictions can now be of shape (batch_size,batch_max_seq_len,topk) if topk>1, else (batch_size,batch_max_seq_len) if topk==1
"""
batch_loss, batch_predictions = model(batch_idxs, batch_lengths, batch_elmo_inp, targets=batch_labels, topk=topk) # topk=1 or 5
else:
"""
NEW: batch_predictions can now be of shape (batch_size,batch_max_seq_len,topk) if topk==None
"""
batch_loss, batch_predictions, batch_predictions_probs = model(batch_idxs, batch_lengths, batch_elmo_inp, targets=batch_labels, topk=topk, beam_search=True)
except RuntimeError:
print(f"batch_idxs:{len(batch_idxs)},batch_lengths:{batch_lengths.shape},batch_elmo_inp:{batch_elmo_inp.shape},batch_labels:{batch_labels.shape}")
raise Exception("")
valid_loss += batch_loss
# compute accuracy in numpy
batch_labels = batch_labels.cpu().detach().numpy()
batch_lengths = batch_lengths.cpu().detach().numpy()
# based on beam_search, do either greedy topk or beam search for topk
if not beam_search:
# based on topk, obtain either strings of batch_predictions or list of tokens
if topk==1:
batch_predictions = untokenize_without_unks(batch_predictions, batch_lengths, vocab, batch_corrupt_sentences)
else:
batch_predictions = untokenize_without_unks2(batch_predictions, batch_lengths, vocab, batch_corrupt_sentences)
predictions.extend(batch_predictions)
batch_clean_sentences = [line.lower() for line in batch_clean_sentences]
batch_corrupt_sentences = [line.lower() for line in batch_corrupt_sentences]
batch_predictions = [line.lower() for line in batch_predictions]
corr2corr_, corr2incorr_, incorr2corr_, incorr2incorr_ = \
get_metrics(batch_clean_sentences,batch_corrupt_sentences,batch_predictions,check_until_topk=topk,return_mistakes=False)
corr2corr+=corr2corr_
corr2incorr+=corr2incorr_
incorr2corr+=incorr2corr_
incorr2incorr+=incorr2incorr_
for i, (a,b,c) in enumerate(zip(batch_clean_sentences,batch_corrupt_sentences,batch_predictions)):
results.append({"id":line_index+i, "original":a, "noised":b, "predicted":c, "topk":[], "topk_prediction_probs":[], "topk_reranker_losses":[]})
line_index += len(batch_clean_sentences)
else:
"""
NEW: use untokenize_without_unks3 for beam search outputs
"""
# k different lists each of type batch_predictions as in topk==1
# List[List[Strings]]
k_batch_predictions, k_batch_predictions_probs = untokenize_without_unks3(batch_predictions, batch_predictions_probs, batch_lengths, vocab, batch_corrupt_sentences, topk)
##########################################################
############## this takes top1 as-is #####################
# corr2corr_, corr2incorr_, incorr2corr_, incorr2incorr_ = \
# get_metrics(batch_clean_sentences,batch_corrupt_sentences,k_batch_predictions[0],check_until_topk=1,return_mistakes=False)
# corr2corr+=corr2corr_
# corr2incorr+=corr2incorr_
# incorr2corr+=incorr2corr_
# incorr2incorr+=incorr2incorr_
##########################################################
############### this does reranking ######################
gpt2LMHeadModel.to(DEVICE)
gpt2LMHeadModel.eval()
# txlLMHeadModel.to(DEVICE)
# txlLMHeadModel.eval()
reranked_batch_predictions = []
batch_clean_sentences_ = []
batch_corrupt_sentences_ = []
batch_losses_ = []
with torch.no_grad():
for b in range(len(batch_clean_sentences)):
losses = []
this_sents = [k_batch_predictions[k][b] for k in range(topk)]
losses = get_losses_from_gpt_lm(this_sents, gpt2LMHeadModel, gpt2Tokenizer, DEVICE)
# losses = get_losses_from_txl_lm(this_sents, txlLMHeadModel, txlTokenizer, DEVICE)
kmin = np.argmin(losses)
reranked_batch_predictions.append(k_batch_predictions[kmin][b])
batch_clean_sentences_.append(batch_clean_sentences[b])
batch_corrupt_sentences_.append(batch_corrupt_sentences[b])
batch_losses_.append(losses)
corr2corr_, corr2incorr_, incorr2corr_, incorr2incorr_ = \
get_metrics(batch_clean_sentences_,batch_corrupt_sentences_,reranked_batch_predictions,check_until_topk=1,return_mistakes=False)
corr2corr+=corr2corr_
corr2incorr+=corr2incorr_
incorr2corr+=incorr2corr_
incorr2incorr+=incorr2incorr_
batch_predictions_k = [[k_batch_predictions[j][i] for j in range(len(k_batch_predictions))] for i in range(len(k_batch_predictions[0]))]
batch_predictions_probs_k = [[k_batch_predictions_probs[j][i] for j in range(len(k_batch_predictions_probs))] for i in range(len(k_batch_predictions_probs[0]))]
for i, (a,b,c,d,e,f) in \
enumerate(zip(batch_clean_sentences_,batch_corrupt_sentences_,reranked_batch_predictions,batch_predictions_k,batch_predictions_probs_k,batch_losses_)):
results.append({"id":line_index+i, "original":a, "noised":b, "predicted":c, "topk":d, "topk_prediction_probs":e, "topk_reranker_losses":f})
line_index += len(batch_clean_sentences)
# delete
del batch_loss
del batch_predictions
del batch_labels, batch_lengths, batch_idxs, batch_lengths_, batch_elmo_inp
torch.cuda.empty_cache()
# '''
# # update progress
# progressBar(batch_id+1,
# int(np.ceil(len(data) / VALID_BATCH_SIZE)),
# ["batch_time","batch_loss","avg_batch_loss","batch_acc","avg_batch_acc"],
# [time.time()-st_time,batch_loss,valid_loss/(batch_id+1),None,None])
# '''
print(f"\nEpoch {None} valid_loss: {valid_loss/(batch_id+1)}")
print("total inference time for this data is: {:4f} secs".format(time.time()-inference_st_time))
print("###############################################")
print("total token count: {}".format(corr2corr+corr2incorr+incorr2corr+incorr2incorr))
print(f"corr2corr:{corr2corr}, corr2incorr:{corr2incorr}, incorr2corr:{incorr2corr}, incorr2incorr:{incorr2incorr}")
print(f"accuracy is {(corr2corr+incorr2corr)/(corr2corr+corr2incorr+incorr2corr+incorr2incorr)}")
print(f"word correction rate is {(incorr2corr)/(incorr2corr+incorr2incorr)}")
print("###############################################")
if not beam_search and selected_lines_file is not None:
print("evaluating only for selected lines ... ")
assert len(data)==len(predictions), print(len(data),len(predictions),"lengths mismatch")
if selected_lines_file is not None:
selected_lines = {num:"" for num in [int(line.strip()) for line in open(selected_lines_file,'r')]}
else:
selected_lines = None
clean_lines, corrupt_lines,predictions_lines = [tpl[0] for tpl in data], [tpl[1] for tpl in data], predictions
corr2corr, corr2incorr, incorr2corr, incorr2incorr, mistakes = \
get_metrics(clean_lines,corrupt_lines,predictions_lines,return_mistakes=True,selected_lines=selected_lines)
print("###############################################")
print("total token count: {}".format(corr2corr+corr2incorr+incorr2corr+incorr2incorr))
print(f"corr2corr:{corr2corr}, corr2incorr:{corr2incorr}, incorr2corr:{incorr2corr}, incorr2incorr:{incorr2incorr}")
print(f"accuracy is {(corr2corr+incorr2corr)/(corr2corr+corr2incorr+incorr2corr+incorr2incorr)}")
print(f"word correction rate is {(incorr2corr)/(incorr2corr+incorr2incorr)}")
print("###############################################")
return results
if __name__=="__main__":
print("#########################"+"\n")
# "word", "prob", "probword", 'random', bea40kfinetune', 'moviereviewsfinetune'
TRAIN_NOISE_TYPE = sys.argv[1]
# "../../data"
BASE_PATH = sys.argv[2]
# -ve value for inference only; 1 for training a new model from scratch; >1 for continuing training
START_EPOCH = int(sys.argv[3])
if START_EPOCH==0:
raise Exception("START_EPOCH must be a non-zero value; If starting from scratch, use 1 instead of 0")
# :NEW: finetune now from a specific epoch of a model
# "probword"
if len(sys.argv)>4:
FINETUNE = sys.argv[4]
if FINETUNE=='probword':
SRC_CHECKPOINT_PATH = os.path.join(BASE_PATH, "checkpoints/elmoscrnn-probwordnoise")
SRC_VOCAB_PATH = os.path.join(SRC_CHECKPOINT_PATH,"vocab.pkl")
print(f"Model finetuning with arg: {FINETUNE}, and source model selected from: {SRC_CHECKPOINT_PATH}")
else:
raise Exception("only ```probword``` is now supported for finetuning")
assert os.path.exists(SRC_CHECKPOINT_PATH), print(f"{SRC_CHECKPOINT_PATH} path unavailable")
else:
FINETUNE = ""
#############################################
# environment
#############################################
# checkpoint path for this model
if TRAIN_NOISE_TYPE=="word":
CHECKPOINT_PATH = os.path.join(BASE_PATH, "checkpoints/elmoscrnn-wordnoise")
elif TRAIN_NOISE_TYPE=="prob":
CHECKPOINT_PATH = os.path.join(BASE_PATH, "checkpoints/elmoscrnn-probnoise")
elif TRAIN_NOISE_TYPE=="random":
CHECKPOINT_PATH = os.path.join(BASE_PATH, "checkpoints/elmoscrnn-randomnoise")
elif TRAIN_NOISE_TYPE=="probword":
CHECKPOINT_PATH = os.path.join(BASE_PATH, "checkpoints/elmoscrnn-probwordnoise")
elif TRAIN_NOISE_TYPE=="probword_v2":
CHECKPOINT_PATH = os.path.join(BASE_PATH, "checkpoints/elmoscrnn-probwordnoise_v2")
elif TRAIN_NOISE_TYPE=="bea40kfinetune":
CHECKPOINT_PATH = os.path.join(BASE_PATH, "checkpoints/elmoscrnn-probwordnoise-bea40kfinetune")
elif TRAIN_NOISE_TYPE=="moviereviewsfinetune":
CHECKPOINT_PATH = os.path.join(BASE_PATH, "checkpoints/elmoscrnn-probwordnoise-moviereviewsfinetune2")
elif TRAIN_NOISE_TYPE=="none":
CHECKPOINT_PATH = os.path.join(BASE_PATH, "checkpoints/elmoscrnn-none")
else:
raise Exception("invalid TRAIN_NOISE_TYPE")
if not os.path.exists(CHECKPOINT_PATH):
os.makedirs(CHECKPOINT_PATH)
VOCAB_PATH = os.path.join(CHECKPOINT_PATH,"vocab.pkl")
# settings
print("#########################"+"\n")
START_EPOCH, N_EPOCHS = START_EPOCH, 50
TRAIN_BATCH_SIZE, VALID_BATCH_SIZE = 64, 50 # 16, 16
#############################################
# load train data (if required)
#############################################
TRAIN_TEST_FILE_PATH = os.path.join(BASE_PATH, "traintest/")
if START_EPOCH>0:
if FINETUNE!="":
print("loading vocab for finetuning")
print(f"loading vocab from {SRC_VOCAB_PATH}")
vocab = load_vocab_dict(SRC_VOCAB_PATH)
save_vocab_dict(VOCAB_PATH, vocab)
# load traintest data
if TRAIN_NOISE_TYPE=="bea40kfinetune":
train_data = load_data(TRAIN_TEST_FILE_PATH, "train.bea40k", "train.bea40k.noise")
train_data, valid_data = train_validation_split(train_data, 0.90, seed=11690)
print(len(train_data),len(valid_data))
elif TRAIN_NOISE_TYPE=="moviereviewsfinetune":
#
train_data_clean = get_lines(os.path.join(TRAIN_TEST_FILE_PATH, "train.moviereviews"))
train_data_noise1 = get_line_representation(train_data_clean,rep_list=['swap','drop','add','key'], probs=[0.25,0.25,0.25,0.25])
train_data_noise2 = get_line_representation(train_data_clean,rep_list=['swap','drop','add','key'], probs=[1.00,0.00,0.00,0.00])
train_data_noise3 = get_line_representation(train_data_clean,rep_list=['swap','drop','add','key'], probs=[0.00,1.00,0.00,0.00])
train_data_noise4 = get_line_representation(train_data_clean,rep_list=['swap','drop','add','key'], probs=[0.00,0.00,1.00,0.00])
train_data_noise5 = get_line_representation(train_data_clean,rep_list=['swap','drop','add','key'], probs=[0.00,0.00,0.00,1.00])
train_data_noise = train_data_noise1+train_data_noise2+train_data_noise3+train_data_noise4+train_data_noise5
train_data_clean = train_data_clean*5
train_data = [(a,b) for a,b in zip(train_data_clean,train_data_noise)]
#
valid_data_clean = get_lines(os.path.join(TRAIN_TEST_FILE_PATH, "valid.moviereviews"))
valid_data_noise1 = get_line_representation(valid_data_clean,rep_list=['swap','drop','add','key'], probs=[0.25,0.25,0.25,0.25])
valid_data_noise2 = get_line_representation(valid_data_clean,rep_list=['swap','drop','add','key'], probs=[1.00,0.00,0.00,0.00])
valid_data_noise3 = get_line_representation(valid_data_clean,rep_list=['swap','drop','add','key'], probs=[0.00,1.00,0.00,0.00])
valid_data_noise4 = get_line_representation(valid_data_clean,rep_list=['swap','drop','add','key'], probs=[0.00,0.00,1.00,0.00])
valid_data_noise5 = get_line_representation(valid_data_clean,rep_list=['swap','drop','add','key'], probs=[0.00,0.00,0.00,1.00])
valid_data_noise = valid_data_noise1+valid_data_noise2+valid_data_noise3+valid_data_noise4+valid_data_noise5
valid_data_clean = valid_data_clean*5
valid_data = [(a,b) for a,b in zip(valid_data_clean,valid_data_noise)]
print(len(train_data),len(valid_data))
else:
raise Exception("invalid TRAIN_NOISE_TYPE in finetuning")
else:
# load traintest data
if TRAIN_NOISE_TYPE=="word":
train_data = load_data(TRAIN_TEST_FILE_PATH, "train.1blm", "train.1blm.noise.word")
train_data, valid_data = train_validation_split(train_data, 0.8, seed=11690)
print(len(train_data),len(valid_data))
elif TRAIN_NOISE_TYPE=="prob":
train_data = load_data(TRAIN_TEST_FILE_PATH, "train.1blm", "train.1blm.noise.prob")
train_data, valid_data = train_validation_split(train_data, 0.8, seed=11690)
print(len(train_data),len(valid_data))
elif TRAIN_NOISE_TYPE=="random":
train_data = load_data(TRAIN_TEST_FILE_PATH, "train.1blm", "train.1blm.noise.random")
train_data, valid_data = train_validation_split(train_data, 0.8, seed=11690)
print(len(train_data),len(valid_data))
elif TRAIN_NOISE_TYPE=="probword":
train_data1 = load_data(TRAIN_TEST_FILE_PATH, "train.1blm", "train.1blm.noise.prob")
train_data1, valid_data1 = train_validation_split(train_data1, 0.8, seed=11690)
print(len(train_data1),len(valid_data1))
train_data2 = load_data(TRAIN_TEST_FILE_PATH, "train.1blm", "train.1blm.noise.word")
train_data2, valid_data2 = train_validation_split(train_data2, 0.8, seed=11690)
print(len(train_data2),len(valid_data2))
train_data = train_data1+train_data2
valid_data = valid_data1+valid_data2
print(len(train_data),len(valid_data))
elif TRAIN_NOISE_TYPE=="probword_v2":
train_data = load_data(TRAIN_TEST_FILE_PATH, "train.1blm.v2", "train.1blm.v2.noise.probword")
train_data, valid_data = train_validation_split(train_data, 0.8, seed=11690)
print(len(train_data),len(valid_data))
elif TRAIN_NOISE_TYPE=="none":
train_data = load_data(TRAIN_TEST_FILE_PATH, "train.1blm", "train.1blm")
train_data, valid_data = train_validation_split(train_data, 0.8, seed=11690)
print(len(train_data),len(valid_data))
else:
raise Exception("invalid TRAIN_NOISE_TYPE")
#############################################
# load vocab
#############################################
if START_EPOCH!=1: # if not training from scratch or for inference
print(f"loading vocab from {VOCAB_PATH}")
vocab = load_vocab_dict(VOCAB_PATH)
else:
# load a vocab for reference
vocab_ref = {}
# opfile = open(os.path.join(BASE_PATH, "vocab/phonemedataset.txt"),"r")
# for line in opfile: vocab_ref.update( {line.strip():0} )
# opfile.close()
print(f"loading vocab from train data itself and saving it at {VOCAB_PATH}")
vocab = get_tokens([i[0] for i in train_data],
keep_simple=True,
min_max_freq=(2,float("inf")),
topk=100000,
intersect=vocab_ref,
load_char_tokens=True)
save_vocab_dict(VOCAB_PATH, vocab)
if START_EPOCH>0:
# see how many tokens in labels are going to be UNK
print ( num_unk_tokens([i[0] for i in train_data], vocab) )
print ( num_unk_tokens([i[0] for i in valid_data], vocab) )
print("")
print([*vocab.keys()])
#print(vocab["token_freq"])
#print([(idx,vocab["idx2token"][idx]) for idx in range(100)])
#############################################
# load ElmoSCLSTM
#############################################
model = load_model(vocab, verbose=False)
#############################################
# training or inference ??!
#############################################
if START_EPOCH>0:
#############################################
# training and validation
#############################################
# running stats
max_dev_acc, argmax_dev_acc = -1, -1
patience = 100
# Create an optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# model to device
model.to(DEVICE)
# load parameters if not training from scratch
if START_EPOCH>1:
# file to write progress to
progress_write_file = open(os.path.join(CHECKPOINT_PATH,f"progress_retrain_from_epoch{START_EPOCH}.txt"),'w')
# model and optimizer load_state_dict
if FINETUNE!="":
print("loading pretrained weights for finetuning")
print(f"loading pretrained weights from {SRC_CHECKPOINT_PATH}")
model, optimizer, _, _ = load_pretrained(model, SRC_CHECKPOINT_PATH, optimizer=optimizer)
progress_write_file.write(f"Training model params after loading from path: {SRC_CHECKPOINT_PATH}\n")
else:
print(f"loading pretrained weights from {CHECKPOINT_PATH}")
model, optimizer, max_dev_acc, argmax_dev_acc = load_pretrained(model, CHECKPOINT_PATH, optimizer=optimizer)
progress_write_file.write(f"Training model params after loading from path: {CHECKPOINT_PATH}\n")
else:
# file to write progress to
progress_write_file = open(os.path.join(CHECKPOINT_PATH,"progress.txt"),'w')
print(f"Training model params from scratch")
progress_write_file.write(f"Training model params from scratch\n")
progress_write_file.flush()
# train and eval
for epoch_id in range(START_EPOCH,N_EPOCHS+1):
# check for patience
if (epoch_id-argmax_dev_acc)>patience:
print("patience count reached. early stopping initiated")
print("max_dev_acc: {}, argmax_dev_acc: {}".format(max_dev_acc, argmax_dev_acc))
break
# if finetuning and the noise type is moviereviews,
# create a different train data every epoch
if TRAIN_NOISE_TYPE=="moviereviewsfinetune":
train_data_clean = get_lines(os.path.join(TRAIN_TEST_FILE_PATH, "train.moviereviews"))
train_data_noise1 = get_line_representation(train_data_clean,rep_list=['swap','drop','add','key'], probs=[0.25,0.25,0.25,0.25])
train_data_noise2 = get_line_representation(train_data_clean,rep_list=['swap','drop','add','key'], probs=[1.00,0.00,0.00,0.00])
train_data_noise3 = get_line_representation(train_data_clean,rep_list=['swap','drop','add','key'], probs=[0.00,1.00,0.00,0.00])
train_data_noise4 = get_line_representation(train_data_clean,rep_list=['swap','drop','add','key'], probs=[0.00,0.00,1.00,0.00])
train_data_noise5 = get_line_representation(train_data_clean,rep_list=['swap','drop','add','key'], probs=[0.00,0.00,0.00,1.00])
train_data_noise = train_data_noise1+train_data_noise2+train_data_noise3+train_data_noise4+train_data_noise5
train_data_clean = train_data_clean*5
train_data = [(a,b) for a,b in zip(train_data_clean,train_data_noise)]
print(f"new training instances created, train data size now: {len(train_data)}")
# print epoch
print(f"In epoch: {epoch_id}")
progress_write_file.write(f"In epoch: {epoch_id}\n")
progress_write_file.flush()
# train loss and backprop
train_loss = 0.
train_acc = 0.
train_acc_count = 0.
print("train_data size: {}".format(len(train_data)))
progress_write_file.write("train_data size: {}\n".format(len(train_data)))
progress_write_file.flush()
train_data_iter = batch_iter(train_data, batch_size=TRAIN_BATCH_SIZE, shuffle=True)
#for batch_id, (batch_labels,batch_sentences) in tqdm(enumerate(train_data_iter)):
for batch_id, (batch_labels,batch_sentences) in enumerate(train_data_iter):
optimizer.zero_grad()
st_time = time.time()
# set batch data
batch_labels, batch_lengths = labelize(batch_labels, vocab)
batch_idxs, batch_lengths_ = sclstm_tokenize(batch_sentences, vocab)
assert (batch_lengths_==batch_lengths).all()==True
batch_idxs = [batch_idxs_.to(DEVICE) for batch_idxs_ in batch_idxs]
batch_lengths = batch_lengths.to(DEVICE)
batch_labels = batch_labels.to(DEVICE)
batch_elmo_inp = elmo_batch_to_ids([line.split() for line in batch_sentences]).to(DEVICE)
# forward
model.train()
loss = model(batch_idxs, batch_lengths, batch_elmo_inp, targets=batch_labels)
batch_loss = loss.cpu().detach().numpy()
train_loss += batch_loss
# backward
loss.backward()
optimizer.step()
# compute accuracy in numpy
if batch_id%10000==0:
train_acc_count += 1
model.eval()
with torch.no_grad():
_, batch_predictions = model(batch_idxs, batch_lengths, batch_elmo_inp, targets=batch_labels)
model.train()
batch_labels = batch_labels.cpu().detach().numpy()
batch_lengths = batch_lengths.cpu().detach().numpy()
ncorr,ntotal = batch_accuracy_func(batch_predictions,batch_labels,batch_lengths)
batch_acc = ncorr/ntotal
train_acc += batch_acc
# update progress
progressBar(batch_id+1,
int(np.ceil(len(train_data) / TRAIN_BATCH_SIZE)),
["batch_time","batch_loss","avg_batch_loss","batch_acc","avg_batch_acc"],
[time.time()-st_time,batch_loss,train_loss/(batch_id+1),batch_acc,train_acc/train_acc_count])
if batch_id==0 or (batch_id+1)%5000==0:
nb = int(np.ceil(len(train_data) / TRAIN_BATCH_SIZE))
progress_write_file.write(f"{batch_id+1}/{nb}\n")
progress_write_file.write(f"batch_time: {time.time()-st_time}, avg_batch_loss: {train_loss/(batch_id+1)}, avg_batch_acc: {train_acc/(batch_id+1)}\n")
progress_write_file.flush()
print(f"\nEpoch {epoch_id} train_loss: {train_loss/(batch_id+1)}")
try:
# valid loss
valid_loss = 0.
valid_acc = 0.
print("valid_data size: {}".format(len(valid_data)))
progress_write_file.write("valid_data size: {}\n".format(len(valid_data)))
progress_write_file.flush()
valid_data_iter = batch_iter(valid_data, batch_size=VALID_BATCH_SIZE, shuffle=False)
for batch_id, (batch_labels,batch_sentences) in enumerate(valid_data_iter):
st_time = time.time()
# set batch data
batch_labels, batch_lengths = labelize(batch_labels, vocab)
batch_idxs, batch_lengths_ = sclstm_tokenize(batch_sentences, vocab)
assert (batch_lengths_==batch_lengths).all()==True
batch_idxs = [batch_idxs_.to(DEVICE) for batch_idxs_ in batch_idxs]
batch_lengths = batch_lengths.to(DEVICE)
batch_labels = batch_labels.to(DEVICE)
batch_elmo_inp = elmo_batch_to_ids([line.split() for line in batch_sentences]).to(DEVICE)
# forward
model.eval()
with torch.no_grad():
batch_loss, batch_predictions = model(batch_idxs, batch_lengths, batch_elmo_inp, targets=batch_labels)
model.train()
valid_loss += batch_loss
# compute accuracy in numpy
batch_labels = batch_labels.cpu().detach().numpy()
batch_lengths = batch_lengths.cpu().detach().numpy()
ncorr,ntotal = batch_accuracy_func(batch_predictions,batch_labels,batch_lengths)
batch_acc = ncorr/ntotal
valid_acc += batch_acc
# update progress
progressBar(batch_id+1,
int(np.ceil(len(valid_data) / VALID_BATCH_SIZE)),
["batch_time","batch_loss","avg_batch_loss","batch_acc","avg_batch_acc"],
[time.time()-st_time,batch_loss,valid_loss/(batch_id+1),batch_acc,valid_acc/(batch_id+1)])
if batch_id==0 or (batch_id+1)%2000==0:
nb = int(np.ceil(len(valid_data) / VALID_BATCH_SIZE))
progress_write_file.write(f"{batch_id}/{nb}\n")
progress_write_file.write(f"batch_time: {time.time()-st_time}, avg_batch_loss: {valid_loss/(batch_id+1)}, avg_batch_acc: {valid_acc/(batch_id+1)}\n")
progress_write_file.flush()
print(f"\nEpoch {epoch_id} valid_loss: {valid_loss/(batch_id+1)}")
# save model, optimizer and test_predictions if val_acc is improved
if valid_acc>=max_dev_acc:
# to file
#name = "model-epoch{}.pth.tar".format(epoch_id)
name = "model.pth.tar".format(epoch_id)
torch.save({
'epoch_id': epoch_id,
'previous_max_dev_acc': max_dev_acc,
'previous_argmax_dev_acc': argmax_dev_acc,
'max_dev_acc': valid_acc,
'argmax_dev_acc': epoch_id,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
os.path.join(CHECKPOINT_PATH,name))
print("Model saved at {} in epoch {}".format(os.path.join(CHECKPOINT_PATH,name),epoch_id))
# re-assign
max_dev_acc, argmax_dev_acc = valid_acc, epoch_id
except Exception as e:
temp_folder = os.path.join(CHECKPOINT_PATH,"temp")
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
name = "model.pth.tar".format(epoch_id)
torch.save({
'epoch_id': epoch_id,
'previous_max_dev_acc': max_dev_acc,
'previous_argmax_dev_acc': argmax_dev_acc,
'max_dev_acc': valid_acc,
'argmax_dev_acc': epoch_id,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
os.path.join(temp_folder,name))
print("Model saved at {} in epoch {}".format(os.path.join(temp_folder,name),epoch_id))
raise Exception(e)
else:
#############################################
# inference
#############################################
# load parameters
model = load_pretrained(model, CHECKPOINT_PATH)
# infer
TRAIN_TEST_FILE_PATH1 = os.path.join(BASE_PATH, "traintest")
TRAIN_TEST_FILE_PATH2 = os.path.join(BASE_PATH, "traintest/wo_context")
'''
paths = [TRAIN_TEST_FILE_PATH1,TRAIN_TEST_FILE_PATH1,TRAIN_TEST_FILE_PATH1,TRAIN_TEST_FILE_PATH2,TRAIN_TEST_FILE_PATH2,TRAIN_TEST_FILE_PATH2]
files1 = ["test.bea60k","test.1blm","test.1blm","combined_data","aspell_big","aspell_small"]
files2 = ["test.bea60k.noise","test.1blm.noise.prob","test.1blm.noise.word","combined_data.noise","aspell_big.noise","aspell_small.noise"]
INFER_BATCH_SIZE = 16
'''
'''
paths = [TRAIN_TEST_FILE_PATH2,TRAIN_TEST_FILE_PATH2,TRAIN_TEST_FILE_PATH2]
files1 = ["combined_data","aspell_big","aspell_small"]
files2 = ["combined_data.noise","aspell_big.noise","aspell_small.noise"]
INFER_BATCH_SIZE = 1024
'''
'''
paths = [TRAIN_TEST_FILE_PATH1]
files1 = ["test.1blm","test.1blm"]
files2 = ["test.1blm.noise.prob","test.1blm.noise.word"]
INFER_BATCH_SIZE = 64 # 128
'''
'''
paths = [TRAIN_TEST_FILE_PATH1]
files1 = ["test.1blm"]
files2 = ["test.1blm.noise.prob"]
INFER_BATCH_SIZE = 20 #64 #128
ANALYSIS_DIR = f"../seq_modeling_analysis/elmoscrnn/analysis_{TRAIN_NOISE_TYPE}_probnoise"
selected_lines_file = None
'''
'''
paths = [TRAIN_TEST_FILE_PATH1]
files1 = ["test.1blm"]
files2 = ["test.1blm.noise.word"]
INFER_BATCH_SIZE = 20 #64 #128
ANALYSIS_DIR = f"../seq_modeling_analysis/elmoscrnn/analysis_{TRAIN_NOISE_TYPE}_wordnoise"
selected_lines_file = None
'''
'''
paths = [TRAIN_TEST_FILE_PATH1]
files1 = ["test.1blm"]
files2 = ["test.1blm.noise.random"]
INFER_BATCH_SIZE = 20 #20 #64 #128
ANALYSIS_DIR = f"../seq_modeling_analysis/elmoscrnn/analysis_{TRAIN_NOISE_TYPE}_randomnoise"
selected_lines_file = None
'''
'''
paths = [TRAIN_TEST_FILE_PATH1, TRAIN_TEST_FILE_PATH1]
files1 = ["test.1blm","test.1blm"]
files2 = ["test.1blm.noise.prob","test.1blm.noise.word"]
INFER_BATCH_SIZE = 32
'''
'''
paths = [TRAIN_TEST_FILE_PATH1]
files1 = ["test.bea4k",]
files2 = ["test.bea4k.noise"]
INFER_BATCH_SIZE = 8
ANALYSIS_DIR = f"../seq_modeling_analysis/elmoscrnn/analysis_{TRAIN_NOISE_TYPE}_bea4k"
selected_lines_file = None # "../gec-pseudodata/test.bea4k.lines.txt" # None
'''
'''
paths = [TRAIN_TEST_FILE_PATH1]
files1 = ["test.bea60k"]
files2 = ["test.bea60k.noise"]
INFER_BATCH_SIZE = 8
ANALYSIS_DIR = f"../seq_modeling_analysis/elmoscrnn/analysis_{TRAIN_NOISE_TYPE}_bea60k"
selected_lines_file = None # "../gec-pseudodata/test.bea60k.lines.txt" # None
'''
'''
paths = [TRAIN_TEST_FILE_PATH1]
files1 = ["test.bea60k.ambiguous_artificial"]
files2 = ["test.bea60k.ambiguous_artificial.noise"]
INFER_BATCH_SIZE = 8
ANALYSIS_DIR = f"../seq_modeling_analysis/elmoscrnn/analysis_{TRAIN_NOISE_TYPE}_bea60k_ambiguous_artificial"
selected_lines_file = None
'''
'''
paths = [TRAIN_TEST_FILE_PATH1, TRAIN_TEST_FILE_PATH1]
files1 = ["test.bea60k.ambiguous_natural_v7", "test.bea60k.ambiguous_natural_v8"]
files2 = ["test.bea60k.ambiguous_natural_v7.noise", "test.bea60k.ambiguous_natural_v8.noise"]
INFER_BATCH_SIZE = 8
ANALYSIS_DIR = f"../seq_modeling_analysis/elmoscrnn/analysis_{TRAIN_NOISE_TYPE}_bea60k_ambiguous_natural_v5"
selected_lines_file = None
'''
'''
paths = [TRAIN_TEST_FILE_PATH1]
files1 = ["test.bea20k"]
files2 = ["test.bea20k.noise"]
INFER_BATCH_SIZE = 10
ANALYSIS_DIR = f"../seq_modeling_analysis/elmoscrnn/analysis_{TRAIN_NOISE_TYPE}_bea20k"
selected_lines_file = None # "../gec-pseudodata/test.bea20k.lines.txt" # None
'''
'''
paths = [TRAIN_TEST_FILE_PATH1]
files1 = ["test.jfleg"]
files2 = ["test.jfleg.noise"]
INFER_BATCH_SIZE = 8
ANALYSIS_DIR = f"../seq_modeling_analysis/elmoscrnn/analysis_{TRAIN_NOISE_TYPE}_jfleg"
selected_lines_file = None # "../gec-pseudodata/test.jfleg.lines.txt" # None
'''
paths = [TRAIN_TEST_FILE_PATH1]
files1 = ["test.bea60k.ambiguous_natural_v7"]
files2 = ["test.bea60k.ambiguous_natural_v7.noise"]
INFER_BATCH_SIZE = 8
ANALYSIS_DIR = f"../seq_modeling_analysis/elmoscrnn/analysis_{TRAIN_NOISE_TYPE}_ambiguous_natural_v7"
selected_lines_file = None
# expect a dict as {"id":, "original":, "noised":, "predicted":, "topk":, "topk_prediction_probs":, "topk_reranker_losses":,}
for x,y,z in zip(paths,files1,files2):
print("\n\n\n\n")
print(x,y,z)
test_data = load_data(x,y,z)
print ( num_unk_tokens([i[0] for i in test_data], vocab) )
greedy_results = model_inference(model,test_data,topk=1,DEVICE=DEVICE,BATCH_SIZE=INFER_BATCH_SIZE,beam_search=False,selected_lines_file=selected_lines_file)
# beam_search_results = model_inference(model,test_data,topk=10,DEVICE=DEVICE,BATCH_SIZE=INFER_BATCH_SIZE,beam_search=True)
print(ANALYSIS_DIR)
if not os.path.exists(ANALYSIS_DIR):
os.makedirs(ANALYSIS_DIR)
import jsonlines
#
print("greedy...")
greedy_lines_fully_correct = {line["id"]:"" for line in greedy_results if line["original"]==line["predicted"]}
greedy_lines_otherwise = {line["id"]:"" for line in greedy_results if line["original"]!=line["predicted"]}
print(f'# Lines Predicted Fully Correct: {len(greedy_lines_fully_correct)}')
print(f'# Lines Otherwise: {len(greedy_lines_otherwise)}')
opfile = jsonlines.open(os.path.join(ANALYSIS_DIR,"greedy_results.jsonl"),'w')
for line in greedy_results: opfile.write(line)
opfile.close()
opfile = jsonlines.open(os.path.join(ANALYSIS_DIR,"greedy_results_corr_preds.jsonl"),'w')
for line in [line for line in greedy_results if line["original"]==line["predicted"]]: opfile.write(line)
opfile.close()
opfile = jsonlines.open(os.path.join(ANALYSIS_DIR,"greedy_results_incorr_preds.jsonl"),'w')
for line in [line for line in greedy_results if line["original"]!=line["predicted"]]: opfile.write(line)
opfile.close()
#
# for better view
opfile = open(os.path.join(ANALYSIS_DIR,"greedy_results.txt"),'w')
for line in greedy_results:
ls = [(o,n,p) if o==n==p else ("**"+o+"**","**"+n+"**","**"+p+"**")for o,n,p in zip(line["original"].split(),line["noised"].split(),line["predicted"].split())]
x,y,z = map(list, zip(*ls))
opfile.write(f'{line["id"]}\n{" ".join(x)}\n{" ".join(y)}\n{" ".join(z)}\n')
opfile.close()
opfile = open(os.path.join(ANALYSIS_DIR,"greedy_results_corr_preds.txt"),'w')
for line in [line for line in greedy_results if line["original"]==line["predicted"]]:
ls = [(o,n,p) if o==n==p else ("**"+o+"**","**"+n+"**","**"+p+"**")for o,n,p in zip(line["original"].split(),line["noised"].split(),line["predicted"].split())]
x,y,z = map(list, zip(*ls))
opfile.write(f'{line["id"]}\n{" ".join(x)}\n{" ".join(y)}\n{" ".join(z)}\n')
opfile.close()
opfile = open(os.path.join(ANALYSIS_DIR,"greedy_results_incorr_preds.txt"),'w')
for line in [line for line in greedy_results if line["original"]!=line["predicted"]]:
ls = [(o,n,p) if o==n==p else ("**"+o+"**","**"+n+"**","**"+p+"**")for o,n,p in zip(line["original"].split(),line["noised"].split(),line["predicted"].split())]
x,y,z = map(list, zip(*ls))
opfile.write(f'{line["id"]}\n{" ".join(x)}\n{" ".join(y)}\n{" ".join(z)}\n')
opfile.close()
# print("beam_search...")
# beam_search_lines_fully_correct = {line["id"]:"" for line in beam_search_results if line["original"]==line["predicted"]}
# beam_search_lines_otherwise = {line["id"]:"" for line in beam_search_results if line["original"]!=line["predicted"]}
# print(f'# Lines Predicted Fully Correct: {len(beam_search_lines_fully_correct)}')
# print(f'# Lines Otherwise: {len(beam_search_lines_otherwise)}')
# opfile = jsonlines.open(os.path.join(ANALYSIS_DIR,"beam_search_results.jsonl"),'w')
# for line in beam_search_results: opfile.write(line)
# opfile.close()
# opfile = jsonlines.open(os.path.join(ANALYSIS_DIR,"beam_search_results_corr_preds.jsonl"),'w')
# for line in [line for line in beam_search_results if line["original"]==line["predicted"]]: opfile.write(line)
# opfile.close()
# opfile = jsonlines.open(os.path.join(ANALYSIS_DIR,"beam_search_results_incorr_preds.jsonl"),'w')
# for line in [line for line in beam_search_results if line["original"]!=line["predicted"]]: opfile.write(line)
# opfile.close()
# #
# # confusion matrix
# corr2corr = len([k for k in greedy_lines_fully_correct if k in beam_search_lines_fully_correct])
# corr2incorr = len([k for k in greedy_lines_fully_correct if k in beam_search_lines_otherwise])
# incorr2corr = len([k for k in greedy_lines_otherwise if k in beam_search_lines_fully_correct])
# incorr2incorr = len([k for k in greedy_lines_otherwise if k in beam_search_lines_otherwise])
# print("Confusion Matrix for before and after beam search: ")
# print(f"corr2corr:{corr2corr}, corr2incorr:{corr2incorr}, incorr2corr:{incorr2corr}, incorr2incorr:{incorr2incorr}")
#########################################
# reranking snippets from past
#########################################
# if save_dir is not None:
# line_index = 0
# analysis_path = save_dir
# if not os.path.exists(analysis_path):
# os.makedirs(analysis_path)
# if beam_search:
# line_index_wrong_opfile = open(f"./{analysis_path}/beam_search_wrong.txt","w")
# line_index_right_opfile = open(f"./{analysis_path}/beam_search_right.txt","w")
# k_wrong_opfile = open(f"./{analysis_path}/beam_search_k_wrong.txt","w")
# k_right_opfile = open(f"./{analysis_path}/beam_search_k_right.txt","w")
# else:
# line_index_wrong_opfile = open(f"./{analysis_path}/greedy_wrong.txt","w")
# line_index_right_opfile = open(f"./{analysis_path}/greedy_right.txt","w")
# reranked_batch_predictions = []
# batch_clean_sentences_ = []
# batch_corrupt_sentences_ = []
# with torch.no_grad():
# for b in range(len(batch_clean_sentences)):
# try:
# losses = []
# for sent in [k_batch_predictions[k][b] for k in range(topk)]:
# if sent!="" or sent is not None:
# input_ids = torch.tensor(gpt2Tokenizer.encode(sent, add_special_tokens=True)).unsqueeze(0) # Batch size 1
# input_ids = input_ids.to(DEVICE)
# outputs = gpt2LMHeadModel(input_ids, labels=input_ids)
# loss = outputs[0].item()
# else:
# loss = 10000.0
# losses.append(loss)
# kmin = np.argmin(losses)
# reranked_batch_predictions.append(k_batch_predictions[kmin][b])
# batch_clean_sentences_.append(batch_clean_sentences[b])
# batch_corrupt_sentences_.append(batch_corrupt_sentences[b])
# except Exception as e:
# reranked_batch_predictions.append(k_batch_predictions[0][b])
# batch_clean_sentences_.append(batch_clean_sentences[b])
# batch_corrupt_sentences_.append(batch_corrupt_sentences[b])
# corr2corr_, corr2incorr_, incorr2corr_, incorr2incorr_ = \
# get_metrics(batch_clean_sentences_,batch_corrupt_sentences_,reranked_batch_predictions,check_until_topk=1,return_mistakes=False)
# corr2corr+=corr2corr_
# corr2incorr+=corr2incorr_
# incorr2corr+=incorr2corr_
# incorr2incorr+=incorr2incorr_
# this_batch = [[k_batch_predictions[k][i] for k in range(len(k_batch_predictions))] for i in range(len(k_batch_predictions[0]))]
# flat_batch = sum(this_batch,[]); # print(flat_batch); print(len(flat_batch))
# lens = [len(s) for s in this_batch]
# ii = 0
# flat_losses = []
# model.eval()
# model.to(DEVICE)
# with torch.no_grad():
# while ii<len(flat_batch):
# try:
# curr_batch = flat_batch[ii:ii+HFACE_BATCH_SIZE]
# curr_inputs = gpt2Tokenizer.batch_encode_plus(curr_batch,pad_to_max_length=True)
# curr_inputs_ids = curr_inputs["input_ids"]
# curr_inputs = {k:torch.tensor(v).to(DEVICE) for k,v in curr_inputs.items()}
# curr_outputs = gpt2LMHeadModel(input_ids=curr_inputs["input_ids"],token_type_ids=curr_inputs["token_type_ids"],attention_mask=curr_inputs["attention_mask"])
# lm_logits = curr_outputs[0]
# labels = torch.tensor([[i if i!=50256 else -100 for i in row] for row in curr_inputs_ids]).to(DEVICE)
# # Shift so that tokens < n predict n
# shift_logits = lm_logits[..., :-1, :].contiguous(); # print(shift_logits.shape)
# shift_labels = labels[..., 1:].contiguous(); # print(shift_labels.shape)
# # Flatten the tokens
# loss_fct = CrossEntropyLoss(reduction='none')
# loss = loss_fct(shift_logits.permute(0, 2, 1), shift_labels)
# flat_losses.extend(loss.sum(axis=-1).cpu().detach().numpy().tolist())
# ii += HFACE_BATCH_SIZE
# except Exception as e:
# # print(this_batch)
# raise Exception(e)
# offset = 0
# batch_losses = []
# for val in lens:
# batch_losses.append(flat_losses[offset:offset+val])
# offset += val
# print(np.array(batch_losses))
# reranked_batch_predictions = [k_batch_predictions[np.argmin(batch_losses[i])][i] for i in range(len(batch_losses))]
# print(batch_clean_sentences)
# print("")
# print(reranked_batch_predictions)
# raise Exception("debug...")
# corr2corr_, corr2incorr_, incorr2corr_, incorr2incorr_ = \
# get_metrics(batch_clean_sentences,batch_corrupt_sentences,reranked_batch_predictions,check_until_topk=1,return_mistakes=False)
# corr2corr+=corr2corr_
# corr2incorr+=corr2incorr_
# incorr2corr+=incorr2corr_
# incorr2incorr+=incorr2incorr_
##########################################################
# for i, (a,b,c,d) in enumerate(zip(batch_clean_sentences_,batch_corrupt_sentences_,reranked_batch_predictions,batch_predictions_k)):
# if a==c: # right
# line_index_right_opfile.write(f"{line_index+i}\t{a}\t{b}\t{c}\n")
# else:
# line_index_wrong_opfile.write(f"{line_index+i}\t{a}\t{b}\t{c}\n")
# line_index+=len(batch_clean_sentences_)
# line_index_right_opfile.flush()
# line_index_wrong_opfile.flush()
# __mistakes = []
# __inds = []
# for i in range(len(batch_clean_sentences)):
# if batch_clean_sentences[i].strip()!=k_batch_predictions[0][i].strip():
# __mistakes.append(f"{batch_clean_sentences[i]}\n")
# __inds.append(i)
# for k in range(topk):
# batch_predictions_probs = k_batch_predictions_probs[k]
# ii = 0
# for ind in __inds:
# __mistakes[ii]+=f"{batch_predictions_probs[ind]:.4f}\t"
# ii+=1
# batch_predictions = k_batch_predictions[k]
# ii = 0
# for ind in __inds:
# __mistakes[ii]+=f"{batch_predictions[ind]}\n"
# ii+=1
# ii=0
# for i,_ in enumerate(batch_clean_sentences):
# if i in __inds:
# __mistakes[ii]+="\n"
# ii+=1
# for mis in __mistakes:
# k_wrong_opfile.write(mis)
# __predictions = []
# for sent in batch_clean_sentences:
# __predictions.append(f"{sent}\n")
# for k in range(topk):
# batch_predictions_probs = k_batch_predictions_probs[k]
# for i,val in enumerate(batch_predictions_probs):
# __predictions[i]+=f"{val:.4f}\t"
# batch_predictions = k_batch_predictions[k]
# for i,sent in enumerate(batch_predictions):
# __predictions[i]+=f"{sent}\n"
# for i,_ in enumerate(batch_clean_sentences):
# __predictions[i]+="\n"
# for pred in __predictions:
# k_right_opfile.write(pred)
# if beam_search:
# line_index_right_opfile.close()
# line_index_wrong_opfile.close()
# k_wrong_opfile.close()
# k_right_opfile.close()
# else:
# line_index_right_opfile.close()
# line_index_wrong_opfile.close()
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
collabinn/collabinn/asgi.py | """
ASGI config for collabinn project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'collabinn.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
mesonbuild/backend/ninjabackend.py | # Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from enum import Enum, unique
from functools import lru_cache
from pathlib import PurePath, Path
from textwrap import dedent
import itertools
import json
import os
import pickle
import re
import shlex
import subprocess
import typing as T
from . import backends
from .. import modules
from .. import environment, mesonlib
from .. import build
from .. import mlog
from .. import compilers
from ..arglist import CompilerArgs
from ..compilers import (
Compiler, CCompiler,
FortranCompiler,
mixins,
PGICCompiler,
VisualStudioLikeCompiler,
)
from ..linkers import ArLinker, RSPFileSyntax
from ..mesonlib import (
File, LibType, MachineChoice, MesonException, OrderedSet, PerMachine,
ProgressBar, quote_arg
)
from ..mesonlib import get_compiler_for_source, has_path_sep, OptionKey
from .backends import CleanTrees
from ..build import GeneratedList, InvalidArguments, ExtractedObjects
from ..interpreter import Interpreter
if T.TYPE_CHECKING:
from .._typing import ImmutableListProtocol
from ..linkers import DynamicLinker, StaticLinker
from ..compilers.cs import CsCompiler
FORTRAN_INCLUDE_PAT = r"^\s*#?include\s*['\"](\w+\.\w+)['\"]"
FORTRAN_MODULE_PAT = r"^\s*\bmodule\b\s+(\w+)\s*(?:!+.*)*$"
FORTRAN_SUBMOD_PAT = r"^\s*\bsubmodule\b\s*\((\w+:?\w+)\)\s*(\w+)"
FORTRAN_USE_PAT = r"^\s*use,?\s*(?:non_intrinsic)?\s*(?:::)?\s*(\w+)"
def cmd_quote(s):
# see: https://docs.microsoft.com/en-us/windows/desktop/api/shellapi/nf-shellapi-commandlinetoargvw#remarks
# backslash escape any existing double quotes
# any existing backslashes preceding a quote are doubled
s = re.sub(r'(\\*)"', lambda m: '\\' * (len(m.group(1)) * 2 + 1) + '"', s)
# any terminal backslashes likewise need doubling
s = re.sub(r'(\\*)$', lambda m: '\\' * (len(m.group(1)) * 2), s)
# and double quote
s = f'"{s}"'
return s
def gcc_rsp_quote(s):
# see: the function buildargv() in libiberty
#
# this differs from sh-quoting in that a backslash *always* escapes the
# following character, even inside single quotes.
s = s.replace('\\', '\\\\')
return shlex.quote(s)
# How ninja executes command lines differs between Unix and Windows
# (see https://ninja-build.org/manual.html#ref_rule_command)
if mesonlib.is_windows():
quote_func = cmd_quote
execute_wrapper = ['cmd', '/c'] # unused
rmfile_prefix = ['del', '/f', '/s', '/q', '{}', '&&']
else:
quote_func = quote_arg
execute_wrapper = []
rmfile_prefix = ['rm', '-f', '{}', '&&']
def get_rsp_threshold():
'''Return a conservative estimate of the commandline size in bytes
above which a response file should be used. May be overridden for
debugging by setting environment variable MESON_RSP_THRESHOLD.'''
if mesonlib.is_windows():
# Usually 32k, but some projects might use cmd.exe,
# and that has a limit of 8k.
limit = 8192
else:
# On Linux, ninja always passes the commandline as a single
# big string to /bin/sh, and the kernel limits the size of a
# single argument; see MAX_ARG_STRLEN
limit = 131072
# Be conservative
limit = limit / 2
return int(os.environ.get('MESON_RSP_THRESHOLD', limit))
# a conservative estimate of the command-line length limit
rsp_threshold = get_rsp_threshold()
# ninja variables whose value should remain unquoted. The value of these ninja
# variables (or variables we use them in) is interpreted directly by ninja
# (e.g. the value of the depfile variable is a pathname that ninja will read
# from, etc.), so it must not be shell quoted.
raw_names = {'DEPFILE_UNQUOTED', 'DESC', 'pool', 'description', 'targetdep', 'dyndep'}
NINJA_QUOTE_BUILD_PAT = re.compile(r"[$ :\n]")
NINJA_QUOTE_VAR_PAT = re.compile(r"[$ \n]")
def ninja_quote(text: str, is_build_line=False) -> str:
if is_build_line:
quote_re = NINJA_QUOTE_BUILD_PAT
else:
quote_re = NINJA_QUOTE_VAR_PAT
# Fast path for when no quoting is necessary
if not quote_re.search(text):
return text
if '\n' in text:
errmsg = f'''Ninja does not support newlines in rules. The content was:
{text}
Please report this error with a test case to the Meson bug tracker.'''
raise MesonException(errmsg)
return quote_re.sub(r'$\g<0>', text)
class TargetDependencyScannerInfo:
def __init__(self, private_dir: str, source2object: T.Dict[str, str]):
self.private_dir = private_dir
self.source2object = source2object
@unique
class Quoting(Enum):
both = 0
notShell = 1
notNinja = 2
none = 3
class NinjaCommandArg:
def __init__(self, s, quoting = Quoting.both):
self.s = s
self.quoting = quoting
def __str__(self):
return self.s
@staticmethod
def list(l, q):
return [NinjaCommandArg(i, q) for i in l]
class NinjaComment:
def __init__(self, comment):
self.comment = comment
def write(self, outfile):
for l in self.comment.split('\n'):
outfile.write('# ')
outfile.write(l)
outfile.write('\n')
outfile.write('\n')
class NinjaRule:
def __init__(self, rule, command, args, description,
rspable = False, deps = None, depfile = None, extra = None,
rspfile_quote_style: RSPFileSyntax = RSPFileSyntax.GCC):
def strToCommandArg(c):
if isinstance(c, NinjaCommandArg):
return c
# deal with common cases here, so we don't have to explicitly
# annotate the required quoting everywhere
if c == '&&':
# shell constructs shouldn't be shell quoted
return NinjaCommandArg(c, Quoting.notShell)
if c.startswith('$'):
var = re.search(r'\$\{?(\w*)\}?', c).group(1)
if var not in raw_names:
# ninja variables shouldn't be ninja quoted, and their value
# is already shell quoted
return NinjaCommandArg(c, Quoting.none)
else:
# shell quote the use of ninja variables whose value must
# not be shell quoted (as it also used by ninja)
return NinjaCommandArg(c, Quoting.notNinja)
return NinjaCommandArg(c)
self.name = rule
self.command = list(map(strToCommandArg, command)) # includes args which never go into a rspfile
self.args = list(map(strToCommandArg, args)) # args which will go into a rspfile, if used
self.description = description
self.deps = deps # depstyle 'gcc' or 'msvc'
self.depfile = depfile
self.extra = extra
self.rspable = rspable # if a rspfile can be used
self.refcount = 0
self.rsprefcount = 0
self.rspfile_quote_style = rspfile_quote_style
if self.depfile == '$DEPFILE':
self.depfile += '_UNQUOTED'
@staticmethod
def _quoter(x, qf = quote_func):
if isinstance(x, NinjaCommandArg):
if x.quoting == Quoting.none:
return x.s
elif x.quoting == Quoting.notNinja:
return qf(x.s)
elif x.quoting == Quoting.notShell:
return ninja_quote(x.s)
# fallthrough
return ninja_quote(qf(str(x)))
def write(self, outfile):
if self.rspfile_quote_style is RSPFileSyntax.MSVC:
rspfile_quote_func = cmd_quote
else:
rspfile_quote_func = gcc_rsp_quote
def rule_iter():
if self.refcount:
yield ''
if self.rsprefcount:
yield '_RSP'
for rsp in rule_iter():
outfile.write(f'rule {self.name}{rsp}\n')
if rsp == '_RSP':
outfile.write(' command = {} @$out.rsp\n'.format(' '.join([self._quoter(x) for x in self.command])))
outfile.write(' rspfile = $out.rsp\n')
outfile.write(' rspfile_content = {}\n'.format(' '.join([self._quoter(x, rspfile_quote_func) for x in self.args])))
else:
outfile.write(' command = {}\n'.format(' '.join([self._quoter(x) for x in self.command + self.args])))
if self.deps:
outfile.write(f' deps = {self.deps}\n')
if self.depfile:
outfile.write(f' depfile = {self.depfile}\n')
outfile.write(f' description = {self.description}\n')
if self.extra:
for l in self.extra.split('\n'):
outfile.write(' ')
outfile.write(l)
outfile.write('\n')
outfile.write('\n')
def length_estimate(self, infiles, outfiles, elems):
# determine variables
# this order of actions only approximates ninja's scoping rules, as
# documented at: https://ninja-build.org/manual.html#ref_scope
ninja_vars = {}
for e in elems:
(name, value) = e
ninja_vars[name] = value
ninja_vars['deps'] = self.deps
ninja_vars['depfile'] = self.depfile
ninja_vars['in'] = infiles
ninja_vars['out'] = outfiles
# expand variables in command
command = ' '.join([self._quoter(x) for x in self.command + self.args])
estimate = len(command)
for m in re.finditer(r'(\${\w+}|\$\w+)?[^$]*', command):
if m.start(1) != -1:
estimate -= m.end(1) - m.start(1) + 1
chunk = m.group(1)
if chunk[1] == '{':
chunk = chunk[2:-1]
else:
chunk = chunk[1:]
chunk = ninja_vars.get(chunk, []) # undefined ninja variables are empty
estimate += len(' '.join(chunk))
# determine command length
return estimate
class NinjaBuildElement:
def __init__(self, all_outputs, outfilenames, rulename, infilenames, implicit_outs=None):
self.implicit_outfilenames = implicit_outs or []
if isinstance(outfilenames, str):
self.outfilenames = [outfilenames]
else:
self.outfilenames = outfilenames
assert isinstance(rulename, str)
self.rulename = rulename
if isinstance(infilenames, str):
self.infilenames = [infilenames]
else:
self.infilenames = infilenames
self.deps = OrderedSet()
self.orderdeps = OrderedSet()
self.elems = []
self.all_outputs = all_outputs
def add_dep(self, dep):
if isinstance(dep, list):
self.deps.update(dep)
else:
self.deps.add(dep)
def add_orderdep(self, dep):
if isinstance(dep, list):
self.orderdeps.update(dep)
else:
self.orderdeps.add(dep)
def add_item(self, name, elems):
# Always convert from GCC-style argument naming to the naming used by the
# current compiler. Also filter system include paths, deduplicate, etc.
if isinstance(elems, CompilerArgs):
elems = elems.to_native()
if isinstance(elems, str):
elems = [elems]
self.elems.append((name, elems))
if name == 'DEPFILE':
self.elems.append((name + '_UNQUOTED', elems))
def _should_use_rspfile(self):
# 'phony' is a rule built-in to ninja
if self.rulename == 'phony':
return False
if not self.rule.rspable:
return False
infilenames = ' '.join([ninja_quote(i, True) for i in self.infilenames])
outfilenames = ' '.join([ninja_quote(i, True) for i in self.outfilenames])
return self.rule.length_estimate(infilenames,
outfilenames,
self.elems) >= rsp_threshold
def count_rule_references(self):
if self.rulename != 'phony':
if self._should_use_rspfile():
self.rule.rsprefcount += 1
else:
self.rule.refcount += 1
def write(self, outfile):
self.check_outputs()
ins = ' '.join([ninja_quote(i, True) for i in self.infilenames])
outs = ' '.join([ninja_quote(i, True) for i in self.outfilenames])
implicit_outs = ' '.join([ninja_quote(i, True) for i in self.implicit_outfilenames])
if implicit_outs:
implicit_outs = ' | ' + implicit_outs
use_rspfile = self._should_use_rspfile()
if use_rspfile:
rulename = self.rulename + '_RSP'
mlog.debug(f'Command line for building {self.outfilenames} is long, using a response file')
else:
rulename = self.rulename
line = f'build {outs}{implicit_outs}: {rulename} {ins}'
if len(self.deps) > 0:
line += ' | ' + ' '.join([ninja_quote(x, True) for x in sorted(self.deps)])
if len(self.orderdeps) > 0:
line += ' || ' + ' '.join([ninja_quote(x, True) for x in sorted(self.orderdeps)])
line += '\n'
# This is the only way I could find to make this work on all
# platforms including Windows command shell. Slash is a dir separator
# on Windows, too, so all characters are unambiguous and, more importantly,
# do not require quoting, unless explicitly specified, which is necessary for
# the csc compiler.
line = line.replace('\\', '/')
if mesonlib.is_windows():
# Support network paths as backslash, otherwise they are interpreted as
# arguments for compile/link commands when using MSVC
line = ' '.join(
(l.replace('//', '\\\\', 1) if l.startswith('//') else l)
for l in line.split(' ')
)
outfile.write(line)
if use_rspfile:
if self.rule.rspfile_quote_style is RSPFileSyntax.MSVC:
qf = cmd_quote
else:
qf = gcc_rsp_quote
else:
qf = quote_func
for e in self.elems:
(name, elems) = e
should_quote = name not in raw_names
line = f' {name} = '
newelems = []
for i in elems:
if not should_quote or i == '&&': # Hackety hack hack
newelems.append(ninja_quote(i))
else:
newelems.append(ninja_quote(qf(i)))
line += ' '.join(newelems)
line += '\n'
outfile.write(line)
outfile.write('\n')
def check_outputs(self):
for n in self.outfilenames:
if n in self.all_outputs:
raise MesonException(f'Multiple producers for Ninja target "{n}". Please rename your targets.')
self.all_outputs[n] = True
class NinjaBackend(backends.Backend):
def __init__(self, build: T.Optional[build.Build], interpreter: T.Optional[Interpreter]):
super().__init__(build, interpreter)
self.name = 'ninja'
self.ninja_filename = 'build.ninja'
self.fortran_deps = {}
self.all_outputs = {}
self.introspection_data = {}
self.created_llvm_ir_rule = PerMachine(False, False)
def create_target_alias(self, to_target):
# We need to use aliases for targets that might be used as directory
# names to workaround a Ninja bug that breaks `ninja -t clean`.
# This is used for 'reserved' targets such as 'test', 'install',
# 'benchmark', etc, and also for RunTargets.
# https://github.com/mesonbuild/meson/issues/1644
if not to_target.startswith('meson-'):
raise AssertionError(f'Invalid usage of create_target_alias with {to_target!r}')
from_target = to_target[len('meson-'):]
elem = NinjaBuildElement(self.all_outputs, from_target, 'phony', to_target)
self.add_build(elem)
def detect_vs_dep_prefix(self, tempfilename):
'''VS writes its dependency in a locale dependent format.
Detect the search prefix to use.'''
# TODO don't hard-code host
for compiler in self.environment.coredata.compilers.host.values():
# Have to detect the dependency format
# IFort on windows is MSVC like, but doesn't have /showincludes
if isinstance(compiler, FortranCompiler):
continue
if isinstance(compiler, PGICCompiler) and mesonlib.is_windows():
# for the purpose of this function, PGI doesn't act enough like MSVC
return open(tempfilename, 'a', encoding='utf-8')
if isinstance(compiler, VisualStudioLikeCompiler):
break
else:
# None of our compilers are MSVC, we're done.
return open(tempfilename, 'a', encoding='utf-8')
filename = os.path.join(self.environment.get_scratch_dir(),
'incdetect.c')
with open(filename, 'w', encoding='utf-8') as f:
f.write(dedent('''\
#include<stdio.h>
int dummy;
'''))
# The output of cl dependency information is language
# and locale dependent. Any attempt at converting it to
# Python strings leads to failure. We _must_ do this detection
# in raw byte mode and write the result in raw bytes.
pc = subprocess.Popen(compiler.get_exelist() +
['/showIncludes', '/c', 'incdetect.c'],
cwd=self.environment.get_scratch_dir(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = pc.communicate()
# We want to match 'Note: including file: ' in the line
# 'Note: including file: d:\MyDir\include\stdio.h', however
# different locales have different messages with a different
# number of colons. Match up to the the drive name 'd:\'.
# When used in cross compilation, the path separator is a
# forward slash rather than a backslash so handle both; i.e.
# the path is /MyDir/include/stdio.h.
# With certain cross compilation wrappings of MSVC, the paths
# use backslashes, but without the leading drive name, so
# allow the path to start with any path separator, i.e.
# \MyDir\include\stdio.h.
matchre = re.compile(rb"^(.*\s)([a-zA-Z]:\\|[\\\/]).*stdio.h$")
def detect_prefix(out):
for line in re.split(rb'\r?\n', out):
match = matchre.match(line)
if match:
with open(tempfilename, 'ab') as binfile:
binfile.write(b'msvc_deps_prefix = ' + match.group(1) + b'\n')
return open(tempfilename, 'a', encoding='utf-8')
return None
# Some cl wrappers (e.g. Squish Coco) output dependency info
# to stderr rather than stdout
result = detect_prefix(stdout) or detect_prefix(stderr)
if result:
return result
raise MesonException('Could not determine vs dep dependency prefix string.')
def generate(self):
ninja = environment.detect_ninja_command_and_version(log=True)
if self.build.need_vsenv:
builddir = Path(self.environment.get_build_dir())
try:
# For prettier printing, reduce to a relative path. If
# impossible (e.g., because builddir and cwd are on
# different Windows drives), skip and use the full path.
builddir = builddir.relative_to(Path.cwd())
except ValueError:
pass
meson_command = mesonlib.join_args(mesonlib.get_meson_command())
mlog.log()
mlog.log('Visual Studio environment is needed to run Ninja. It is recommended to use Meson wrapper:')
mlog.log(f'{meson_command} compile -C {builddir}')
if ninja is None:
raise MesonException('Could not detect Ninja v1.8.2 or newer')
(self.ninja_command, self.ninja_version) = ninja
outfilename = os.path.join(self.environment.get_build_dir(), self.ninja_filename)
tempfilename = outfilename + '~'
with open(tempfilename, 'w', encoding='utf-8') as outfile:
outfile.write(f'# This is the build file for project "{self.build.get_project()}"\n')
outfile.write('# It is autogenerated by the Meson build system.\n')
outfile.write('# Do not edit by hand.\n\n')
outfile.write('ninja_required_version = 1.8.2\n\n')
num_pools = self.environment.coredata.options[OptionKey('backend_max_links')].value
if num_pools > 0:
outfile.write(f'''pool link_pool
depth = {num_pools}
''')
with self.detect_vs_dep_prefix(tempfilename) as outfile:
self.generate_rules()
self.build_elements = []
self.generate_phony()
self.add_build_comment(NinjaComment('Build rules for targets'))
for t in ProgressBar(self.build.get_targets().values(), desc='Generating targets'):
self.generate_target(t)
self.add_build_comment(NinjaComment('Test rules'))
self.generate_tests()
self.add_build_comment(NinjaComment('Install rules'))
self.generate_install()
self.generate_dist()
key = OptionKey('b_coverage')
if (key in self.environment.coredata.options and
self.environment.coredata.options[key].value):
gcovr_exe, gcovr_version, lcov_exe, genhtml_exe, _ = environment.find_coverage_tools()
if gcovr_exe or (lcov_exe and genhtml_exe):
self.add_build_comment(NinjaComment('Coverage rules'))
self.generate_coverage_rules(gcovr_exe, gcovr_version)
else:
# FIXME: since we explicitly opted in, should this be an error?
# The docs just say these targets will be created "if possible".
mlog.warning('Need gcovr or lcov/genhtml to generate any coverage reports')
self.add_build_comment(NinjaComment('Suffix'))
self.generate_utils()
self.generate_ending()
self.write_rules(outfile)
self.write_builds(outfile)
default = 'default all\n\n'
outfile.write(default)
# Only overwrite the old build file after the new one has been
# fully created.
os.replace(tempfilename, outfilename)
mlog.cmd_ci_include(outfilename) # For CI debugging
# Refresh Ninja's caches. https://github.com/ninja-build/ninja/pull/1685
if mesonlib.version_compare(self.ninja_version, '>=1.10.0') and os.path.exists('.ninja_deps'):
subprocess.call(self.ninja_command + ['-t', 'restat'])
subprocess.call(self.ninja_command + ['-t', 'cleandead'])
self.generate_compdb()
# http://clang.llvm.org/docs/JSONCompilationDatabase.html
def generate_compdb(self):
rules = []
# TODO: Rather than an explicit list here, rules could be marked in the
# rule store as being wanted in compdb
for for_machine in MachineChoice:
for lang in self.environment.coredata.compilers[for_machine]:
rules += [f"{rule}{ext}" for rule in [self.get_compiler_rule_name(lang, for_machine)]
for ext in ['', '_RSP']]
rules += [f"{rule}{ext}" for rule in [self.get_pch_rule_name(lang, for_machine)]
for ext in ['', '_RSP']]
compdb_options = ['-x'] if mesonlib.version_compare(self.ninja_version, '>=1.9') else []
ninja_compdb = self.ninja_command + ['-t', 'compdb'] + compdb_options + rules
builddir = self.environment.get_build_dir()
try:
jsondb = subprocess.check_output(ninja_compdb, cwd=builddir)
with open(os.path.join(builddir, 'compile_commands.json'), 'wb') as f:
f.write(jsondb)
except Exception:
mlog.warning('Could not create compilation database.')
# Get all generated headers. Any source file might need them so
# we need to add an order dependency to them.
def get_generated_headers(self, target):
if hasattr(target, 'cached_generated_headers'):
return target.cached_generated_headers
header_deps = []
# XXX: Why don't we add deps to CustomTarget headers here?
for genlist in target.get_generated_sources():
if isinstance(genlist, (build.CustomTarget, build.CustomTargetIndex)):
continue
for src in genlist.get_outputs():
if self.environment.is_header(src):
header_deps.append(self.get_target_generated_dir(target, genlist, src))
if 'vala' in target.compilers and not isinstance(target, build.Executable):
vala_header = File.from_built_file(self.get_target_dir(target), target.vala_header)
header_deps.append(vala_header)
# Recurse and find generated headers
for dep in itertools.chain(target.link_targets, target.link_whole_targets):
if isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
header_deps += self.get_generated_headers(dep)
target.cached_generated_headers = header_deps
return header_deps
def get_target_generated_sources(self, target: build.BuildTarget) -> T.MutableMapping[str, File]:
"""
Returns a dictionary with the keys being the path to the file
(relative to the build directory) of that type and the value
being the GeneratorList or CustomTarget that generated it.
"""
srcs: T.MutableMapping[str, File] = OrderedDict()
for gensrc in target.get_generated_sources():
for s in gensrc.get_outputs():
f = self.get_target_generated_dir(target, gensrc, s)
srcs[f] = s
return srcs
def get_target_sources(self, target: build.BuildTarget) -> T.MutableMapping[str, File]:
srcs: T.MutableMapping[str, File] = OrderedDict()
for s in target.get_sources():
# BuildTarget sources are always mesonlib.File files which are
# either in the source root, or generated with configure_file and
# in the build root
if not isinstance(s, File):
raise InvalidArguments(f'All sources in target {s!r} must be of type mesonlib.File')
f = s.rel_to_builddir(self.build_to_src)
srcs[f] = s
return srcs
def get_target_source_can_unity(self, target, source):
if isinstance(source, File):
source = source.fname
if self.environment.is_llvm_ir(source) or \
self.environment.is_assembly(source):
return False
suffix = os.path.splitext(source)[1][1:].lower()
for lang in backends.LANGS_CANT_UNITY:
if lang not in target.compilers:
continue
if suffix in target.compilers[lang].file_suffixes:
return False
return True
def create_target_source_introspection(self, target: build.Target, comp: compilers.Compiler, parameters, sources, generated_sources):
'''
Adds the source file introspection information for a language of a target
Internal introspection storage formart:
self.introspection_data = {
'<target ID>': {
<id tuple>: {
'language: 'lang',
'compiler': ['comp', 'exe', 'list'],
'parameters': ['UNIQUE', 'parameter', 'list'],
'sources': [],
'generated_sources': [],
}
}
}
'''
tid = target.get_id()
lang = comp.get_language()
tgt = self.introspection_data[tid]
# Find an existing entry or create a new one
id_hash = (lang, tuple(parameters))
src_block = tgt.get(id_hash, None)
if src_block is None:
# Convert parameters
if isinstance(parameters, CompilerArgs):
parameters = parameters.to_native(copy=True)
parameters = comp.compute_parameters_with_absolute_paths(parameters, self.build_dir)
# The new entry
src_block = {
'language': lang,
'compiler': comp.get_exelist(),
'parameters': parameters,
'sources': [],
'generated_sources': [],
}
tgt[id_hash] = src_block
# Make source files absolute
sources = [x.absolute_path(self.source_dir, self.build_dir) if isinstance(x, File) else os.path.normpath(os.path.join(self.build_dir, x))
for x in sources]
generated_sources = [x.absolute_path(self.source_dir, self.build_dir) if isinstance(x, File) else os.path.normpath(os.path.join(self.build_dir, x))
for x in generated_sources]
# Add the source files
src_block['sources'] += sources
src_block['generated_sources'] += generated_sources
def generate_target(self, target):
try:
if isinstance(target, build.BuildTarget):
os.makedirs(self.get_target_private_dir_abs(target))
except FileExistsError:
pass
if isinstance(target, build.CustomTarget):
self.generate_custom_target(target)
if isinstance(target, build.RunTarget):
self.generate_run_target(target)
compiled_sources = []
source2object = {}
name = target.get_id()
if name in self.processed_targets:
return
self.processed_targets.add(name)
# Initialize an empty introspection source list
self.introspection_data[name] = {}
# Generate rules for all dependency targets
self.process_target_dependencies(target)
self.generate_shlib_aliases(target, self.get_target_dir(target))
# If target uses a language that cannot link to C objects,
# just generate for that language and return.
if isinstance(target, build.Jar):
self.generate_jar_target(target)
return
if target.uses_rust():
self.generate_rust_target(target)
return
if 'cs' in target.compilers:
self.generate_cs_target(target)
return
if 'swift' in target.compilers:
self.generate_swift_target(target)
return
# Pre-existing target C/C++ sources to be built; dict of full path to
# source relative to build root and the original File object.
target_sources: T.MutableMapping[str, File]
# GeneratedList and CustomTarget sources to be built; dict of the full
# path to source relative to build root and the generating target/list
generated_sources: T.MutableMapping[str, File]
# List of sources that have been transpiled from a DSL (like Vala) into
# a language that is haneled below, such as C or C++
transpiled_sources: T.List[str]
if 'vala' in target.compilers:
# Sources consumed by valac are filtered out. These only contain
# C/C++ sources, objects, generated libs, and unknown sources now.
target_sources, generated_sources, \
transpiled_sources = self.generate_vala_compile(target)
elif 'cython' in target.compilers:
target_sources, generated_sources, \
transpiled_sources = self.generate_cython_transpile(target)
else:
target_sources = self.get_target_sources(target)
generated_sources = self.get_target_generated_sources(target)
transpiled_sources = []
self.scan_fortran_module_outputs(target)
# Generate rules for GeneratedLists
self.generate_generator_list_rules(target)
# Generate rules for building the remaining source files in this target
outname = self.get_target_filename(target)
obj_list = []
is_unity = self.is_unity(target)
header_deps = []
unity_src = []
unity_deps = [] # Generated sources that must be built before compiling a Unity target.
header_deps += self.get_generated_headers(target)
if is_unity:
# Warn about incompatible sources if a unity build is enabled
langs = set(target.compilers.keys())
langs_cant = langs.intersection(backends.LANGS_CANT_UNITY)
if langs_cant:
langs_are = langs = ', '.join(langs_cant).upper()
langs_are += ' are' if len(langs_cant) > 1 else ' is'
msg = f'{langs_are} not supported in Unity builds yet, so {langs} ' \
f'sources in the {target.name!r} target will be compiled normally'
mlog.log(mlog.red('FIXME'), msg)
# Get a list of all generated headers that will be needed while building
# this target's sources (generated sources and pre-existing sources).
# This will be set as dependencies of all the target's sources. At the
# same time, also deal with generated sources that need to be compiled.
generated_source_files = []
for rel_src in generated_sources.keys():
dirpart, fnamepart = os.path.split(rel_src)
raw_src = File(True, dirpart, fnamepart)
if self.environment.is_source(rel_src) and not self.environment.is_header(rel_src):
if is_unity and self.get_target_source_can_unity(target, rel_src):
unity_deps.append(raw_src)
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
unity_src.append(abs_src)
else:
generated_source_files.append(raw_src)
elif self.environment.is_object(rel_src):
obj_list.append(rel_src)
elif self.environment.is_library(rel_src) or modules.is_module_library(rel_src):
pass
else:
# Assume anything not specifically a source file is a header. This is because
# people generate files with weird suffixes (.inc, .fh) that they then include
# in their source files.
header_deps.append(raw_src)
# These are the generated source files that need to be built for use by
# this target. We create the Ninja build file elements for this here
# because we need `header_deps` to be fully generated in the above loop.
for src in generated_source_files:
if self.environment.is_llvm_ir(src):
o, s = self.generate_llvm_ir_compile(target, src)
else:
o, s = self.generate_single_compile(target, src, True,
order_deps=header_deps)
compiled_sources.append(s)
source2object[s] = o
obj_list.append(o)
use_pch = self.environment.coredata.options.get(OptionKey('b_pch'))
if use_pch and target.has_pch():
pch_objects = self.generate_pch(target, header_deps=header_deps)
else:
pch_objects = []
# Generate compilation targets for C sources generated from Vala
# sources. This can be extended to other $LANG->C compilers later if
# necessary. This needs to be separate for at least Vala
#
# Do not try to unity-build the generated c files from vala, as these
# often contain duplicate symbols and will fail to compile properly
vala_generated_source_files = []
for src in transpiled_sources:
dirpart, fnamepart = os.path.split(src)
raw_src = File(True, dirpart, fnamepart)
# Generated targets are ordered deps because the must exist
# before the sources compiling them are used. After the first
# compile we get precise dependency info from dep files.
# This should work in all cases. If it does not, then just
# move them from orderdeps to proper deps.
if self.environment.is_header(src):
header_deps.append(raw_src)
else:
# We gather all these and generate compile rules below
# after `header_deps` (above) is fully generated
vala_generated_source_files.append(raw_src)
for src in vala_generated_source_files:
# Passing 'vala' here signifies that we want the compile
# arguments to be specialized for C code generated by
# valac. For instance, no warnings should be emitted.
o, s = self.generate_single_compile(target, src, 'vala', [], header_deps)
obj_list.append(o)
# Generate compile targets for all the pre-existing sources for this target
for src in target_sources.values():
if not self.environment.is_header(src):
if self.environment.is_llvm_ir(src):
o, s = self.generate_llvm_ir_compile(target, src)
obj_list.append(o)
elif is_unity and self.get_target_source_can_unity(target, src):
abs_src = os.path.join(self.environment.get_build_dir(),
src.rel_to_builddir(self.build_to_src))
unity_src.append(abs_src)
else:
o, s = self.generate_single_compile(target, src, False, [], header_deps)
obj_list.append(o)
compiled_sources.append(s)
source2object[s] = o
obj_list += self.flatten_object_list(target)
if is_unity:
for src in self.generate_unity_files(target, unity_src):
o, s = self.generate_single_compile(target, src, True, unity_deps + header_deps)
obj_list.append(o)
compiled_sources.append(s)
source2object[s] = o
linker, stdlib_args = self.determine_linker_and_stdlib_args(target)
if isinstance(target, build.StaticLibrary) and target.prelink:
final_obj_list = self.generate_prelink(target, obj_list)
else:
final_obj_list = obj_list
elem = self.generate_link(target, outname, final_obj_list, linker, pch_objects, stdlib_args=stdlib_args)
self.generate_dependency_scan_target(target, compiled_sources, source2object, generated_source_files)
self.add_build(elem)
def should_use_dyndeps_for_target(self, target: 'build.BuildTarget') -> bool:
if mesonlib.version_compare(self.ninja_version, '<1.10.0'):
return False
if 'fortran' in target.compilers:
return True
if 'cpp' not in target.compilers:
return False
# Currently only the preview version of Visual Studio is supported.
cpp = target.compilers['cpp']
if cpp.get_id() != 'msvc':
return False
cppversion = self.environment.coredata.options[OptionKey('std', machine=target.for_machine, lang='cpp')].value
if cppversion not in ('latest', 'c++latest', 'vc++latest'):
return False
if not mesonlib.current_vs_supports_modules():
return False
if mesonlib.version_compare(cpp.version, '<19.28.28617'):
return False
return True
def generate_dependency_scan_target(self, target, compiled_sources, source2object, generated_source_files: T.List[mesonlib.File]):
if not self.should_use_dyndeps_for_target(target):
return
depscan_file = self.get_dep_scan_file_for(target)
pickle_base = target.name + '.dat'
pickle_file = os.path.join(self.get_target_private_dir(target), pickle_base).replace('\\', '/')
pickle_abs = os.path.join(self.get_target_private_dir_abs(target), pickle_base).replace('\\', '/')
json_abs = os.path.join(self.get_target_private_dir_abs(target), f'{target.name}-deps.json').replace('\\', '/')
rule_name = 'depscan'
scan_sources = self.select_sources_to_scan(compiled_sources)
# Dump the sources as a json list. This avoids potential probllems where
# the number of sources passed to depscan exceedes the limit imposed by
# the OS.
with open(json_abs, 'w', encoding='utf-8') as f:
json.dump(scan_sources, f)
elem = NinjaBuildElement(self.all_outputs, depscan_file, rule_name, json_abs)
elem.add_item('picklefile', pickle_file)
# Add any generated outputs to the order deps of the scan target, so
# that those sources are present
for g in generated_source_files:
elem.orderdeps.add(g.relative_name())
scaninfo = TargetDependencyScannerInfo(self.get_target_private_dir(target), source2object)
with open(pickle_abs, 'wb') as p:
pickle.dump(scaninfo, p)
self.add_build(elem)
def select_sources_to_scan(self, compiled_sources):
# in practice pick up C++ and Fortran files. If some other language
# requires scanning (possibly Java to deal with inner class files)
# then add them here.
all_suffixes = set(compilers.lang_suffixes['cpp']) | set(compilers.lang_suffixes['fortran'])
selected_sources = []
for source in compiled_sources:
ext = os.path.splitext(source)[1][1:].lower()
if ext in all_suffixes:
selected_sources.append(source)
return selected_sources
def process_target_dependencies(self, target):
for t in target.get_dependencies():
if t.get_id() not in self.processed_targets:
self.generate_target(t)
def custom_target_generator_inputs(self, target):
for s in target.sources:
if isinstance(s, build.GeneratedList):
self.generate_genlist_for_target(s, target)
def unwrap_dep_list(self, target):
deps = []
for i in target.get_dependencies():
# FIXME, should not grab element at zero but rather expand all.
if isinstance(i, list):
i = i[0]
# Add a dependency on all the outputs of this target
for output in i.get_outputs():
deps.append(os.path.join(self.get_target_dir(i), output))
return deps
def generate_custom_target(self, target):
self.custom_target_generator_inputs(target)
(srcs, ofilenames, cmd) = self.eval_custom_target_command(target)
deps = self.unwrap_dep_list(target)
deps += self.get_custom_target_depend_files(target)
if target.build_always_stale:
deps.append('PHONY')
if target.depfile is None:
rulename = 'CUSTOM_COMMAND'
else:
rulename = 'CUSTOM_COMMAND_DEP'
elem = NinjaBuildElement(self.all_outputs, ofilenames, rulename, srcs)
elem.add_dep(deps)
for d in target.extra_depends:
# Add a dependency on all the outputs of this target
for output in d.get_outputs():
elem.add_dep(os.path.join(self.get_target_dir(d), output))
cmd, reason = self.as_meson_exe_cmdline(target.command[0], cmd[1:],
extra_bdeps=target.get_transitive_build_target_deps(),
capture=ofilenames[0] if target.capture else None,
feed=srcs[0] if target.feed else None,
env=target.env,
verbose=target.console)
if reason:
cmd_type = f' (wrapped by meson {reason})'
else:
cmd_type = ''
if target.depfile is not None:
depfile = target.get_dep_outname(elem.infilenames)
rel_dfile = os.path.join(self.get_target_dir(target), depfile)
abs_pdir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(target))
os.makedirs(abs_pdir, exist_ok=True)
elem.add_item('DEPFILE', rel_dfile)
if target.console:
elem.add_item('pool', 'console')
full_name = Path(target.subdir, target.name).as_posix()
elem.add_item('COMMAND', cmd)
elem.add_item('description', f'Generating {full_name} with a custom command{cmd_type}')
self.add_build(elem)
self.processed_targets.add(target.get_id())
def build_run_target_name(self, target):
if target.subproject != '':
subproject_prefix = f'{target.subproject}@@'
else:
subproject_prefix = ''
return f'{subproject_prefix}{target.name}'
def generate_run_target(self, target):
target_name = self.build_run_target_name(target)
if not target.command:
# This is an alias target, it has no command, it just depends on
# other targets.
elem = NinjaBuildElement(self.all_outputs, target_name, 'phony', [])
else:
target_env = self.get_run_target_env(target)
_, _, cmd = self.eval_custom_target_command(target)
meson_exe_cmd, reason = self.as_meson_exe_cmdline(target.command[0], cmd[1:],
force_serialize=True, env=target_env,
verbose=True)
cmd_type = f' (wrapped by meson {reason})'
internal_target_name = f'meson-{target_name}'
elem = NinjaBuildElement(self.all_outputs, internal_target_name, 'CUSTOM_COMMAND', [])
elem.add_item('COMMAND', meson_exe_cmd)
elem.add_item('description', f'Running external command {target.name}{cmd_type}')
elem.add_item('pool', 'console')
# Alias that runs the target defined above with the name the user specified
self.create_target_alias(internal_target_name)
deps = self.unwrap_dep_list(target)
deps += self.get_custom_target_depend_files(target)
elem.add_dep(deps)
self.add_build(elem)
self.processed_targets.add(target.get_id())
def generate_coverage_command(self, elem, outputs):
targets = self.build.get_targets().values()
use_llvm_cov = False
for target in targets:
if not hasattr(target, 'compilers'):
continue
for compiler in target.compilers.values():
if compiler.get_id() == 'clang' and not compiler.info.is_darwin():
use_llvm_cov = True
break
elem.add_item('COMMAND', self.environment.get_build_command() +
['--internal', 'coverage'] +
outputs +
[self.environment.get_source_dir(),
os.path.join(self.environment.get_source_dir(),
self.build.get_subproject_dir()),
self.environment.get_build_dir(),
self.environment.get_log_dir()] +
(['--use_llvm_cov'] if use_llvm_cov else []))
def generate_coverage_rules(self, gcovr_exe: T.Optional[str], gcovr_version: T.Optional[str]):
e = NinjaBuildElement(self.all_outputs, 'meson-coverage', 'CUSTOM_COMMAND', 'PHONY')
self.generate_coverage_command(e, [])
e.add_item('description', 'Generates coverage reports')
self.add_build(e)
# Alias that runs the target defined above
self.create_target_alias('meson-coverage')
self.generate_coverage_legacy_rules(gcovr_exe, gcovr_version)
def generate_coverage_legacy_rules(self, gcovr_exe: T.Optional[str], gcovr_version: T.Optional[str]):
e = NinjaBuildElement(self.all_outputs, 'meson-coverage-html', 'CUSTOM_COMMAND', 'PHONY')
self.generate_coverage_command(e, ['--html'])
e.add_item('description', 'Generates HTML coverage report')
self.add_build(e)
# Alias that runs the target defined above
self.create_target_alias('meson-coverage-html')
if gcovr_exe:
e = NinjaBuildElement(self.all_outputs, 'meson-coverage-xml', 'CUSTOM_COMMAND', 'PHONY')
self.generate_coverage_command(e, ['--xml'])
e.add_item('description', 'Generates XML coverage report')
self.add_build(e)
# Alias that runs the target defined above
self.create_target_alias('meson-coverage-xml')
e = NinjaBuildElement(self.all_outputs, 'meson-coverage-text', 'CUSTOM_COMMAND', 'PHONY')
self.generate_coverage_command(e, ['--text'])
e.add_item('description', 'Generates text coverage report')
self.add_build(e)
# Alias that runs the target defined above
self.create_target_alias('meson-coverage-text')
if mesonlib.version_compare(gcovr_version, '>=4.2'):
e = NinjaBuildElement(self.all_outputs, 'meson-coverage-sonarqube', 'CUSTOM_COMMAND', 'PHONY')
self.generate_coverage_command(e, ['--sonarqube'])
e.add_item('description', 'Generates Sonarqube XML coverage report')
self.add_build(e)
# Alias that runs the target defined above
self.create_target_alias('meson-coverage-sonarqube')
def generate_install(self):
self.create_install_data_files()
elem = NinjaBuildElement(self.all_outputs, 'meson-install', 'CUSTOM_COMMAND', 'PHONY')
elem.add_dep('all')
elem.add_item('DESC', 'Installing files.')
elem.add_item('COMMAND', self.environment.get_build_command() + ['install', '--no-rebuild'])
elem.add_item('pool', 'console')
self.add_build(elem)
# Alias that runs the target defined above
self.create_target_alias('meson-install')
def generate_tests(self):
self.serialize_tests()
cmd = self.environment.get_build_command(True) + ['test', '--no-rebuild']
if not self.environment.coredata.get_option(OptionKey('stdsplit')):
cmd += ['--no-stdsplit']
if self.environment.coredata.get_option(OptionKey('errorlogs')):
cmd += ['--print-errorlogs']
elem = NinjaBuildElement(self.all_outputs, 'meson-test', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running all tests.')
elem.add_item('pool', 'console')
self.add_build(elem)
# Alias that runs the above-defined meson-test target
self.create_target_alias('meson-test')
# And then benchmarks.
cmd = self.environment.get_build_command(True) + [
'test', '--benchmark', '--logbase',
'benchmarklog', '--num-processes=1', '--no-rebuild']
elem = NinjaBuildElement(self.all_outputs, 'meson-benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite.')
elem.add_item('pool', 'console')
self.add_build(elem)
# Alias that runs the above-defined meson-benchmark target
self.create_target_alias('meson-benchmark')
def generate_rules(self):
self.rules = []
self.ruledict = {}
self.add_rule_comment(NinjaComment('Rules for module scanning.'))
self.generate_scanner_rules()
self.add_rule_comment(NinjaComment('Rules for compiling.'))
self.generate_compile_rules()
self.add_rule_comment(NinjaComment('Rules for linking.'))
self.generate_static_link_rules()
self.generate_dynamic_link_rules()
self.add_rule_comment(NinjaComment('Other rules'))
# Ninja errors out if you have deps = gcc but no depfile, so we must
# have two rules for custom commands.
self.add_rule(NinjaRule('CUSTOM_COMMAND', ['$COMMAND'], [], '$DESC',
extra='restat = 1'))
self.add_rule(NinjaRule('CUSTOM_COMMAND_DEP', ['$COMMAND'], [], '$DESC',
deps='gcc', depfile='$DEPFILE',
extra='restat = 1'))
c = self.environment.get_build_command() + \
['--internal',
'regenerate',
self.environment.get_source_dir(),
self.environment.get_build_dir(),
'--backend',
'ninja']
self.add_rule(NinjaRule('REGENERATE_BUILD',
c, [],
'Regenerating build files.',
extra='generator = 1'))
def add_rule_comment(self, comment):
self.rules.append(comment)
def add_build_comment(self, comment):
self.build_elements.append(comment)
def add_rule(self, rule):
if rule.name in self.ruledict:
raise MesonException(f'Tried to add rule {rule.name} twice.')
self.rules.append(rule)
self.ruledict[rule.name] = rule
def add_build(self, build):
self.build_elements.append(build)
if build.rulename != 'phony':
# reference rule
if build.rulename in self.ruledict:
build.rule = self.ruledict[build.rulename]
else:
mlog.warning(f"build statement for {build.outfilenames} references non-existent rule {build.rulename}")
def write_rules(self, outfile):
for b in self.build_elements:
if isinstance(b, NinjaBuildElement):
b.count_rule_references()
for r in self.rules:
r.write(outfile)
def write_builds(self, outfile):
for b in ProgressBar(self.build_elements, desc='Writing build.ninja'):
b.write(outfile)
def generate_phony(self):
self.add_build_comment(NinjaComment('Phony build target, always out of date'))
elem = NinjaBuildElement(self.all_outputs, 'PHONY', 'phony', '')
self.add_build(elem)
def generate_jar_target(self, target):
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
class_list = []
compiler = target.compilers['java']
c = 'c'
m = 'm'
e = ''
f = 'f'
main_class = target.get_main_class()
if main_class != '':
e = 'e'
# Add possible java generated files to src list
generated_sources = self.get_target_generated_sources(target)
gen_src_list = []
for rel_src in generated_sources.keys():
dirpart, fnamepart = os.path.split(rel_src)
raw_src = File(True, dirpart, fnamepart)
if rel_src.endswith('.java'):
gen_src_list.append(raw_src)
compile_args = self.determine_single_java_compile_args(target, compiler)
for src in src_list + gen_src_list:
plain_class_path = self.generate_single_java_compile(src, target, compiler, compile_args)
class_list.append(plain_class_path)
class_dep_list = [os.path.join(self.get_target_private_dir(target), i) for i in class_list]
manifest_path = os.path.join(self.get_target_private_dir(target), 'META-INF', 'MANIFEST.MF')
manifest_fullpath = os.path.join(self.environment.get_build_dir(), manifest_path)
os.makedirs(os.path.dirname(manifest_fullpath), exist_ok=True)
with open(manifest_fullpath, 'w', encoding='utf-8') as manifest:
if any(target.link_targets):
manifest.write('Class-Path: ')
cp_paths = [os.path.join(self.get_target_dir(l), l.get_filename()) for l in target.link_targets]
manifest.write(' '.join(cp_paths))
manifest.write('\n')
jar_rule = 'java_LINKER'
commands = [c + m + e + f]
commands.append(manifest_path)
if e != '':
commands.append(main_class)
commands.append(self.get_target_filename(target))
# Java compilation can produce an arbitrary number of output
# class files for a single source file. Thus tell jar to just
# grab everything in the final package.
commands += ['-C', self.get_target_private_dir(target), '.']
elem = NinjaBuildElement(self.all_outputs, outname_rel, jar_rule, [])
elem.add_dep(class_dep_list)
elem.add_item('ARGS', commands)
self.add_build(elem)
# Create introspection information
self.create_target_source_introspection(target, compiler, compile_args, src_list, gen_src_list)
def generate_cs_resource_tasks(self, target):
args = []
deps = []
for r in target.resources:
rel_sourcefile = os.path.join(self.build_to_src, target.subdir, r)
if r.endswith('.resources'):
a = '-resource:' + rel_sourcefile
elif r.endswith('.txt') or r.endswith('.resx'):
ofilebase = os.path.splitext(os.path.basename(r))[0] + '.resources'
ofilename = os.path.join(self.get_target_private_dir(target), ofilebase)
elem = NinjaBuildElement(self.all_outputs, ofilename, "CUSTOM_COMMAND", rel_sourcefile)
elem.add_item('COMMAND', ['resgen', rel_sourcefile, ofilename])
elem.add_item('DESC', f'Compiling resource {rel_sourcefile}')
self.add_build(elem)
deps.append(ofilename)
a = '-resource:' + ofilename
else:
raise InvalidArguments(f'Unknown resource file {r}.')
args.append(a)
return args, deps
def generate_cs_target(self, target: build.BuildTarget):
buildtype = self.get_option_for_target(OptionKey('buildtype'), target)
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
compiler = target.compilers['cs']
rel_srcs = [os.path.normpath(s.rel_to_builddir(self.build_to_src)) for s in src_list]
deps = []
commands = compiler.compiler_args(target.extra_args.get('cs', []))
commands += compiler.get_buildtype_args(buildtype)
commands += compiler.get_optimization_args(self.get_option_for_target(OptionKey('optimization'), target))
commands += compiler.get_debug_args(self.get_option_for_target(OptionKey('debug'), target))
if isinstance(target, build.Executable):
commands.append('-target:exe')
elif isinstance(target, build.SharedLibrary):
commands.append('-target:library')
else:
raise MesonException('Unknown C# target type.')
(resource_args, resource_deps) = self.generate_cs_resource_tasks(target)
commands += resource_args
deps += resource_deps
commands += compiler.get_output_args(outname_rel)
for l in target.link_targets:
lname = os.path.join(self.get_target_dir(l), l.get_filename())
commands += compiler.get_link_args(lname)
deps.append(lname)
if '-g' in commands:
outputs = [outname_rel, outname_rel + '.mdb']
else:
outputs = [outname_rel]
generated_sources = self.get_target_generated_sources(target)
generated_rel_srcs = []
for rel_src in generated_sources.keys():
if rel_src.lower().endswith('.cs'):
generated_rel_srcs.append(os.path.normpath(rel_src))
deps.append(os.path.normpath(rel_src))
for dep in target.get_external_deps():
commands.extend_direct(dep.get_link_args())
commands += self.build.get_project_args(compiler, target.subproject, target.for_machine)
commands += self.build.get_global_args(compiler, target.for_machine)
elem = NinjaBuildElement(self.all_outputs, outputs, self.get_compiler_rule_name('cs', target.for_machine), rel_srcs + generated_rel_srcs)
elem.add_dep(deps)
elem.add_item('ARGS', commands)
self.add_build(elem)
self.generate_generator_list_rules(target)
self.create_target_source_introspection(target, compiler, commands, rel_srcs, generated_rel_srcs)
def determine_single_java_compile_args(self, target, compiler):
args = []
args += compiler.get_buildtype_args(self.get_option_for_target(OptionKey('buildtype'), target))
args += self.build.get_global_args(compiler, target.for_machine)
args += self.build.get_project_args(compiler, target.subproject, target.for_machine)
args += target.get_java_args()
args += compiler.get_output_args(self.get_target_private_dir(target))
args += target.get_classpath_args()
curdir = target.get_subdir()
sourcepath = os.path.join(self.build_to_src, curdir) + os.pathsep
sourcepath += os.path.normpath(curdir) + os.pathsep
for i in target.include_dirs:
for idir in i.get_incdirs():
sourcepath += os.path.join(self.build_to_src, i.curdir, idir) + os.pathsep
args += ['-sourcepath', sourcepath]
return args
def generate_single_java_compile(self, src, target, compiler, args):
deps = [os.path.join(self.get_target_dir(l), l.get_filename()) for l in target.link_targets]
generated_sources = self.get_target_generated_sources(target)
for rel_src in generated_sources.keys():
if rel_src.endswith('.java'):
deps.append(rel_src)
rel_src = src.rel_to_builddir(self.build_to_src)
plain_class_path = src.fname[:-4] + 'class'
rel_obj = os.path.join(self.get_target_private_dir(target), plain_class_path)
element = NinjaBuildElement(self.all_outputs, rel_obj, self.compiler_to_rule_name(compiler), rel_src)
element.add_dep(deps)
element.add_item('ARGS', args)
self.add_build(element)
return plain_class_path
def generate_java_link(self):
rule = 'java_LINKER'
command = ['jar', '$ARGS']
description = 'Creating JAR $out'
self.add_rule(NinjaRule(rule, command, [], description))
def determine_dep_vapis(self, target):
"""
Peek into the sources of BuildTargets we're linking with, and if any of
them was built with Vala, assume that it also generated a .vapi file of
the same name as the BuildTarget and return the path to it relative to
the build directory.
"""
result = OrderedSet()
for dep in itertools.chain(target.link_targets, target.link_whole_targets):
if not dep.is_linkable_target():
continue
for i in dep.sources:
if hasattr(i, 'fname'):
i = i.fname
if i.split('.')[-1] in compilers.lang_suffixes['vala']:
vapiname = dep.vala_vapi
fullname = os.path.join(self.get_target_dir(dep), vapiname)
result.add(fullname)
break
return list(result)
def split_vala_sources(self, t: build.BuildTarget) -> \
T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File],
T.Tuple[T.MutableMapping[str, File], T.MutableMapping]]:
"""
Splits the target's sources into .vala, .gs, .vapi, and other sources.
Handles both pre-existing and generated sources.
Returns a tuple (vala, vapi, others) each of which is a dictionary with
the keys being the path to the file (relative to the build directory)
and the value being the object that generated or represents the file.
"""
vala: T.MutableMapping[str, File] = OrderedDict()
vapi: T.MutableMapping[str, File] = OrderedDict()
others: T.MutableMapping[str, File] = OrderedDict()
othersgen: T.MutableMapping[str, File] = OrderedDict()
# Split pre-existing sources
for s in t.get_sources():
# BuildTarget sources are always mesonlib.File files which are
# either in the source root, or generated with configure_file and
# in the build root
if not isinstance(s, File):
raise InvalidArguments(f'All sources in target {t!r} must be of type mesonlib.File, not {s!r}')
f = s.rel_to_builddir(self.build_to_src)
if s.endswith(('.vala', '.gs')):
srctype = vala
elif s.endswith('.vapi'):
srctype = vapi
else:
srctype = others
srctype[f] = s
# Split generated sources
for gensrc in t.get_generated_sources():
for s in gensrc.get_outputs():
f = self.get_target_generated_dir(t, gensrc, s)
if s.endswith(('.vala', '.gs')):
srctype = vala
elif s.endswith('.vapi'):
srctype = vapi
# Generated non-Vala (C/C++) sources. Won't be used for
# generating the Vala compile rule below.
else:
srctype = othersgen
# Duplicate outputs are disastrous
if f in srctype and srctype[f] is not gensrc:
msg = 'Duplicate output {0!r} from {1!r} {2!r}; ' \
'conflicts with {0!r} from {4!r} {3!r}' \
''.format(f, type(gensrc).__name__, gensrc.name,
srctype[f].name, type(srctype[f]).__name__)
raise InvalidArguments(msg)
# Store 'somefile.vala': GeneratedList (or CustomTarget)
srctype[f] = gensrc
return vala, vapi, (others, othersgen)
def generate_vala_compile(self, target: build.BuildTarget) -> \
T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.List[str]]:
"""Vala is compiled into C. Set up all necessary build steps here."""
(vala_src, vapi_src, other_src) = self.split_vala_sources(target)
extra_dep_files = []
if not vala_src:
raise InvalidArguments(f'Vala library {target.name!r} has no Vala or Genie source files.')
valac = target.compilers['vala']
c_out_dir = self.get_target_private_dir(target)
# C files generated by valac
vala_c_src: T.List[str] = []
# Files generated by valac
valac_outputs: T.List = []
# All sources that are passed to valac on the commandline
all_files = list(vapi_src)
# Passed as --basedir
srcbasedir = os.path.join(self.build_to_src, target.get_subdir())
for (vala_file, gensrc) in vala_src.items():
all_files.append(vala_file)
# Figure out where the Vala compiler will write the compiled C file
#
# If the Vala file is in a subdir of the build dir (in our case
# because it was generated/built by something else), and is also
# a subdir of --basedir (because the builddir is in the source
# tree, and the target subdir is the source root), the subdir
# components from the source root till the private builddir will be
# duplicated inside the private builddir. Otherwise, just the
# basename will be used.
#
# If the Vala file is outside the build directory, the paths from
# the --basedir till the subdir will be duplicated inside the
# private builddir.
if isinstance(gensrc, (build.CustomTarget, build.GeneratedList)) or gensrc.is_built:
vala_c_file = os.path.splitext(os.path.basename(vala_file))[0] + '.c'
# Check if the vala file is in a subdir of --basedir
abs_srcbasedir = os.path.join(self.environment.get_source_dir(), target.get_subdir())
abs_vala_file = os.path.join(self.environment.get_build_dir(), vala_file)
if PurePath(os.path.commonpath((abs_srcbasedir, abs_vala_file))) == PurePath(abs_srcbasedir):
vala_c_subdir = PurePath(abs_vala_file).parent.relative_to(abs_srcbasedir)
vala_c_file = os.path.join(str(vala_c_subdir), vala_c_file)
else:
path_to_target = os.path.join(self.build_to_src, target.get_subdir())
if vala_file.startswith(path_to_target):
vala_c_file = os.path.splitext(os.path.relpath(vala_file, path_to_target))[0] + '.c'
else:
vala_c_file = os.path.splitext(os.path.basename(vala_file))[0] + '.c'
# All this will be placed inside the c_out_dir
vala_c_file = os.path.join(c_out_dir, vala_c_file)
vala_c_src.append(vala_c_file)
valac_outputs.append(vala_c_file)
args = self.generate_basic_compiler_args(target, valac)
args += valac.get_colorout_args(self.environment.coredata.options.get(OptionKey('b_colorout')).value)
# Tell Valac to output everything in our private directory. Sadly this
# means it will also preserve the directory components of Vala sources
# found inside the build tree (generated sources).
args += ['--directory', c_out_dir]
args += ['--basedir', srcbasedir]
if target.is_linkable_target():
# Library name
args += ['--library', target.name]
# Outputted header
hname = os.path.join(self.get_target_dir(target), target.vala_header)
args += ['--header', hname]
if self.is_unity(target):
# Without this the declarations will get duplicated in the .c
# files and cause a build failure when all of them are
# #include-d in one .c file.
# https://github.com/mesonbuild/meson/issues/1969
args += ['--use-header']
valac_outputs.append(hname)
# Outputted vapi file
vapiname = os.path.join(self.get_target_dir(target), target.vala_vapi)
# Force valac to write the vapi and gir files in the target build dir.
# Without this, it will write it inside c_out_dir
args += ['--vapi', os.path.join('..', target.vala_vapi)]
valac_outputs.append(vapiname)
target.outputs += [target.vala_header, target.vala_vapi]
target.install_tag += ['devel', 'devel']
# Install header and vapi to default locations if user requests this
if len(target.install_dir) > 1 and target.install_dir[1] is True:
target.install_dir[1] = self.environment.get_includedir()
if len(target.install_dir) > 2 and target.install_dir[2] is True:
target.install_dir[2] = os.path.join(self.environment.get_datadir(), 'vala', 'vapi')
# Generate GIR if requested
if isinstance(target.vala_gir, str):
girname = os.path.join(self.get_target_dir(target), target.vala_gir)
args += ['--gir', os.path.join('..', target.vala_gir)]
valac_outputs.append(girname)
target.outputs.append(target.vala_gir)
target.install_tag.append('devel')
# Install GIR to default location if requested by user
if len(target.install_dir) > 3 and target.install_dir[3] is True:
target.install_dir[3] = os.path.join(self.environment.get_datadir(), 'gir-1.0')
# Detect gresources and add --gresources arguments for each
for gensrc in other_src[1].values():
if isinstance(gensrc, modules.GResourceTarget):
gres_xml, = self.get_custom_target_sources(gensrc)
args += ['--gresources=' + gres_xml]
extra_args = []
for a in target.extra_args.get('vala', []):
if isinstance(a, File):
relname = a.rel_to_builddir(self.build_to_src)
extra_dep_files.append(relname)
extra_args.append(relname)
else:
extra_args.append(a)
dependency_vapis = self.determine_dep_vapis(target)
extra_dep_files += dependency_vapis
args += extra_args
element = NinjaBuildElement(self.all_outputs, valac_outputs,
self.compiler_to_rule_name(valac),
all_files + dependency_vapis)
element.add_item('ARGS', args)
element.add_dep(extra_dep_files)
self.add_build(element)
self.create_target_source_introspection(target, valac, args, all_files, [])
return other_src[0], other_src[1], vala_c_src
def generate_cython_transpile(self, target: build.BuildTarget) -> \
T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.List[str]]:
"""Generate rules for transpiling Cython files to C or C++
XXX: Currently only C is handled.
"""
static_sources: T.MutableMapping[str, File] = OrderedDict()
generated_sources: T.MutableMapping[str, File] = OrderedDict()
cython_sources: T.List[str] = []
cython = target.compilers['cython']
opt_proxy = self.get_compiler_options_for_target(target)
args: T.List[str] = []
args += cython.get_always_args()
args += cython.get_buildtype_args(self.get_option_for_target(OptionKey('buildtype'), target))
args += cython.get_debug_args(self.get_option_for_target(OptionKey('debug'), target))
args += cython.get_optimization_args(self.get_option_for_target(OptionKey('optimization'), target))
args += cython.get_option_compile_args(opt_proxy)
args += self.build.get_global_args(cython, target.for_machine)
args += self.build.get_project_args(cython, target.subproject, target.for_machine)
ext = opt_proxy[OptionKey('language', machine=target.for_machine, lang='cython')].value
for src in target.get_sources():
if src.endswith('.pyx'):
output = os.path.join(self.get_target_private_dir(target), f'{src}.{ext}')
args = args.copy()
args += cython.get_output_args(output)
element = NinjaBuildElement(
self.all_outputs, [output],
self.compiler_to_rule_name(cython),
[src.absolute_path(self.environment.get_source_dir(), self.environment.get_build_dir())])
element.add_item('ARGS', args)
self.add_build(element)
# TODO: introspection?
cython_sources.append(output)
else:
static_sources[src.rel_to_builddir(self.build_to_src)] = src
for gen in target.get_generated_sources():
for ssrc in gen.get_outputs():
if isinstance(gen, GeneratedList):
ssrc = os.path.join(self.get_target_private_dir(target), ssrc)
else:
ssrc = os.path.join(gen.get_subdir(), ssrc)
if ssrc.endswith('.pyx'):
args = args.copy()
output = os.path.join(self.get_target_private_dir(target), f'{ssrc}.{ext}')
args += cython.get_output_args(output)
element = NinjaBuildElement(
self.all_outputs, [output],
self.compiler_to_rule_name(cython),
[ssrc])
element.add_item('ARGS', args)
self.add_build(element)
# TODO: introspection?
cython_sources.append(output)
else:
generated_sources[ssrc] = mesonlib.File.from_built_file(gen.get_subdir(), ssrc)
return static_sources, generated_sources, cython_sources
def generate_rust_target(self, target: build.BuildTarget) -> None:
rustc = target.compilers['rust']
# Rust compiler takes only the main file as input and
# figures out what other files are needed via import
# statements and magic.
base_proxy = self.get_base_options_for_target(target)
args = rustc.compiler_args()
# Compiler args for compiling this target
args += compilers.get_base_compile_args(base_proxy, rustc)
self.generate_generator_list_rules(target)
# dependencies need to cause a relink, they're not just for odering
deps = [
os.path.join(t.subdir, t.get_filename())
for t in itertools.chain(target.link_targets, target.link_whole_targets)
]
orderdeps: T.List[str] = []
main_rust_file = None
for i in target.get_sources():
if not rustc.can_compile(i):
raise InvalidArguments(f'Rust target {target.get_basename()} contains a non-rust source file.')
if main_rust_file is None:
main_rust_file = i.rel_to_builddir(self.build_to_src)
for g in target.get_generated_sources():
for i in g.get_outputs():
if not rustc.can_compile(i):
raise InvalidArguments(f'Rust target {target.get_basename()} contains a non-rust source file.')
if isinstance(g, GeneratedList):
fname = os.path.join(self.get_target_private_dir(target), i)
else:
fname = os.path.join(g.get_subdir(), i)
if main_rust_file is None:
main_rust_file = fname
orderdeps.append(fname)
if main_rust_file is None:
raise RuntimeError('A Rust target has no Rust sources. This is weird. Also a bug. Please report')
target_name = os.path.join(target.subdir, target.get_filename())
if isinstance(target, build.Executable):
cratetype = 'bin'
elif hasattr(target, 'rust_crate_type'):
cratetype = target.rust_crate_type
elif isinstance(target, build.SharedLibrary):
cratetype = 'dylib'
elif isinstance(target, build.StaticLibrary):
cratetype = 'rlib'
else:
raise InvalidArguments('Unknown target type for rustc.')
args.extend(['--crate-type', cratetype])
# If we're dynamically linking, add those arguments
#
# Rust is super annoying, calling -C link-arg foo does not work, it has
# to be -C link-arg=foo
if cratetype in {'bin', 'dylib'}:
args.extend(rustc.get_linker_always_args())
args += self.generate_basic_compiler_args(target, rustc, False)
args += ['--crate-name', target.name]
depfile = os.path.join(target.subdir, target.name + '.d')
args += ['--emit', f'dep-info={depfile}', '--emit', 'link']
args += target.get_extra_args('rust')
args += rustc.get_output_args(os.path.join(target.subdir, target.get_filename()))
linkdirs = mesonlib.OrderedSet()
external_deps = target.external_deps.copy()
for d in itertools.chain(target.link_targets, target.link_whole_targets):
linkdirs.add(d.subdir)
if d.uses_rust():
# specify `extern CRATE_NAME=OUTPUT_FILE` for each Rust
# dependency, so that collisions with libraries in rustc's
# sysroot don't cause ambiguity
args += ['--extern', '{}={}'.format(d.name, os.path.join(d.subdir, d.filename))]
elif d.typename == 'static library':
# Rustc doesn't follow Meson's convention that static libraries
# are called .a, and import libraries are .lib, so we have to
# manually handle that.
if rustc.linker.id in {'link', 'lld-link'}:
args += ['-C', f'link-arg={self.get_target_filename_for_linking(d)}']
else:
args += ['-l', f'static={d.name}']
external_deps.extend(d.external_deps)
else:
# Rust uses -l for non rust dependencies, but we still need to
# add dylib=foo
args += ['-l', f'dylib={d.name}']
for e in external_deps:
for a in e.get_link_args():
if a.endswith(('.dll', '.so', '.dylib')):
dir_, lib = os.path.split(a)
linkdirs.add(dir_)
lib, ext = os.path.splitext(lib)
if lib.startswith('lib'):
lib = lib[3:]
args.extend(['-l', f'dylib={lib}'])
elif a.startswith('-L'):
args.append(a)
elif a.startswith('-l'):
_type = 'static' if e.static else 'dylib'
args.extend(['-l', f'{_type}={a[2:]}'])
for d in linkdirs:
if d == '':
d = '.'
args += ['-L', d]
has_shared_deps = any(isinstance(dep, build.SharedLibrary) for dep in target.get_dependencies())
if isinstance(target, build.SharedLibrary) or has_shared_deps:
# add prefer-dynamic if any of the Rust libraries we link
# against are dynamic, otherwise we'll end up with
# multiple implementations of crates
args += ['-C', 'prefer-dynamic']
# build the usual rpath arguments as well...
# Set runtime-paths so we can run executables without needing to set
# LD_LIBRARY_PATH, etc in the environment. Doesn't work on Windows.
if has_path_sep(target.name):
# Target names really should not have slashes in them, but
# unfortunately we did not check for that and some downstream projects
# now have them. Once slashes are forbidden, remove this bit.
target_slashname_workaround_dir = os.path.join(os.path.dirname(target.name),
self.get_target_dir(target))
else:
target_slashname_workaround_dir = self.get_target_dir(target)
rpath_args, target.rpath_dirs_to_remove = (
rustc.build_rpath_args(self.environment,
self.environment.get_build_dir(),
target_slashname_workaround_dir,
self.determine_rpath_dirs(target),
target.build_rpath,
target.install_rpath))
# ... but then add rustc's sysroot to account for rustup
# installations
for rpath_arg in rpath_args:
args += ['-C', 'link-arg=' + rpath_arg + ':' + os.path.join(rustc.get_sysroot(), 'lib')]
compiler_name = self.get_compiler_rule_name('rust', target.for_machine)
element = NinjaBuildElement(self.all_outputs, target_name, compiler_name, main_rust_file)
if orderdeps:
element.add_orderdep(orderdeps)
if deps:
element.add_dep(deps)
element.add_item('ARGS', args)
element.add_item('targetdep', depfile)
element.add_item('cratetype', cratetype)
self.add_build(element)
if isinstance(target, build.SharedLibrary):
self.generate_shsym(target)
self.create_target_source_introspection(target, rustc, args, [main_rust_file], [])
@staticmethod
def get_rule_suffix(for_machine: MachineChoice) -> str:
return PerMachine('_FOR_BUILD', '')[for_machine]
@classmethod
def get_compiler_rule_name(cls, lang: str, for_machine: MachineChoice) -> str:
return '{}_COMPILER{}'.format(lang, cls.get_rule_suffix(for_machine))
@classmethod
def get_pch_rule_name(cls, lang: str, for_machine: MachineChoice) -> str:
return '{}_PCH{}'.format(lang, cls.get_rule_suffix(for_machine))
@classmethod
def compiler_to_rule_name(cls, compiler: Compiler) -> str:
return cls.get_compiler_rule_name(compiler.get_language(), compiler.for_machine)
@classmethod
def compiler_to_pch_rule_name(cls, compiler: Compiler) -> str:
return cls.get_pch_rule_name(compiler.get_language(), compiler.for_machine)
def swift_module_file_name(self, target):
return os.path.join(self.get_target_private_dir(target),
self.target_swift_modulename(target) + '.swiftmodule')
def target_swift_modulename(self, target):
return target.name
def determine_swift_dep_modules(self, target):
result = []
for l in target.link_targets:
if self.is_swift_target(l):
result.append(self.swift_module_file_name(l))
return result
def get_swift_link_deps(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_filename(l))
return result
def split_swift_generated_sources(self, target):
all_srcs = self.get_target_generated_sources(target)
srcs = []
others = []
for i in all_srcs:
if i.endswith('.swift'):
srcs.append(i)
else:
others.append(i)
return srcs, others
def generate_swift_target(self, target):
module_name = self.target_swift_modulename(target)
swiftc = target.compilers['swift']
abssrc = []
relsrc = []
abs_headers = []
header_imports = []
for i in target.get_sources():
if swiftc.can_compile(i):
rels = i.rel_to_builddir(self.build_to_src)
abss = os.path.normpath(os.path.join(self.environment.get_build_dir(), rels))
relsrc.append(rels)
abssrc.append(abss)
elif self.environment.is_header(i):
relh = i.rel_to_builddir(self.build_to_src)
absh = os.path.normpath(os.path.join(self.environment.get_build_dir(), relh))
abs_headers.append(absh)
header_imports += swiftc.get_header_import_args(absh)
else:
raise InvalidArguments(f'Swift target {target.get_basename()} contains a non-swift source file.')
os.makedirs(self.get_target_private_dir_abs(target), exist_ok=True)
compile_args = swiftc.get_compile_only_args()
compile_args += swiftc.get_optimization_args(self.get_option_for_target(OptionKey('optimization'), target))
compile_args += swiftc.get_debug_args(self.get_option_for_target(OptionKey('debug'), target))
compile_args += swiftc.get_module_args(module_name)
compile_args += self.build.get_project_args(swiftc, target.subproject, target.for_machine)
compile_args += self.build.get_global_args(swiftc, target.for_machine)
for i in reversed(target.get_include_dirs()):
basedir = i.get_curdir()
for d in i.get_incdirs():
if d not in ('', '.'):
expdir = os.path.join(basedir, d)
else:
expdir = basedir
srctreedir = os.path.normpath(os.path.join(self.environment.get_build_dir(), self.build_to_src, expdir))
sargs = swiftc.get_include_args(srctreedir, False)
compile_args += sargs
link_args = swiftc.get_output_args(os.path.join(self.environment.get_build_dir(), self.get_target_filename(target)))
link_args += self.build.get_project_link_args(swiftc, target.subproject, target.for_machine)
link_args += self.build.get_global_link_args(swiftc, target.for_machine)
rundir = self.get_target_private_dir(target)
out_module_name = self.swift_module_file_name(target)
in_module_files = self.determine_swift_dep_modules(target)
abs_module_dirs = self.determine_swift_dep_dirs(target)
module_includes = []
for x in abs_module_dirs:
module_includes += swiftc.get_include_args(x, False)
link_deps = self.get_swift_link_deps(target)
abs_link_deps = [os.path.join(self.environment.get_build_dir(), x) for x in link_deps]
for d in target.link_targets:
reldir = self.get_target_dir(d)
if reldir == '':
reldir = '.'
link_args += ['-L', os.path.normpath(os.path.join(self.environment.get_build_dir(), reldir))]
(rel_generated, _) = self.split_swift_generated_sources(target)
abs_generated = [os.path.join(self.environment.get_build_dir(), x) for x in rel_generated]
# We need absolute paths because swiftc needs to be invoked in a subdir
# and this is the easiest way about it.
objects = [] # Relative to swift invocation dir
rel_objects = [] # Relative to build.ninja
for i in abssrc + abs_generated:
base = os.path.basename(i)
oname = os.path.splitext(base)[0] + '.o'
objects.append(oname)
rel_objects.append(os.path.join(self.get_target_private_dir(target), oname))
rulename = self.get_compiler_rule_name('swift', target.for_machine)
# Swiftc does not seem to be able to emit objects and module files in one go.
elem = NinjaBuildElement(self.all_outputs, rel_objects, rulename, abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_dep(abs_headers)
elem.add_item('ARGS', compile_args + header_imports + abs_generated + module_includes)
elem.add_item('RUNDIR', rundir)
self.add_build(elem)
elem = NinjaBuildElement(self.all_outputs, out_module_name,
self.get_compiler_rule_name('swift', target.for_machine),
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_item('ARGS', compile_args + abs_generated + module_includes + swiftc.get_mod_gen_args())
elem.add_item('RUNDIR', rundir)
self.add_build(elem)
if isinstance(target, build.StaticLibrary):
elem = self.generate_link(target, self.get_target_filename(target),
rel_objects, self.build.static_linker[target.for_machine])
self.add_build(elem)
elif isinstance(target, build.Executable):
elem = NinjaBuildElement(self.all_outputs, self.get_target_filename(target), rulename, [])
elem.add_dep(rel_objects)
elem.add_dep(link_deps)
elem.add_item('ARGS', link_args + swiftc.get_std_exe_link_args() + objects + abs_link_deps)
elem.add_item('RUNDIR', rundir)
self.add_build(elem)
else:
raise MesonException('Swift supports only executable and static library targets.')
# Introspection information
self.create_target_source_introspection(target, swiftc, compile_args + header_imports + module_includes, relsrc, rel_generated)
def _rsp_options(self, tool: T.Union['Compiler', 'StaticLinker', 'DynamicLinker']) -> T.Dict[str, T.Union[bool, RSPFileSyntax]]:
"""Helper method to get rsp options.
rsp_file_syntax() is only guaranteed to be implemented if
can_linker_accept_rsp() returns True.
"""
options = dict(rspable=tool.can_linker_accept_rsp())
if options['rspable']:
options['rspfile_quote_style'] = tool.rsp_file_syntax()
return options
def generate_static_link_rules(self):
num_pools = self.environment.coredata.options[OptionKey('backend_max_links')].value
if 'java' in self.environment.coredata.compilers.host:
self.generate_java_link()
for for_machine in MachineChoice:
static_linker = self.build.static_linker[for_machine]
if static_linker is None:
continue
rule = 'STATIC_LINKER{}'.format(self.get_rule_suffix(for_machine))
cmdlist = []
args = ['$in']
# FIXME: Must normalize file names with pathlib.Path before writing
# them out to fix this properly on Windows. See:
# https://github.com/mesonbuild/meson/issues/1517
# https://github.com/mesonbuild/meson/issues/1526
if isinstance(static_linker, ArLinker) and not mesonlib.is_windows():
# `ar` has no options to overwrite archives. It always appends,
# which is never what we want. Delete an existing library first if
# it exists. https://github.com/mesonbuild/meson/issues/1355
cmdlist = execute_wrapper + [c.format('$out') for c in rmfile_prefix]
cmdlist += static_linker.get_exelist()
cmdlist += ['$LINK_ARGS']
cmdlist += NinjaCommandArg.list(static_linker.get_output_args('$out'), Quoting.none)
description = 'Linking static target $out'
if num_pools > 0:
pool = 'pool = link_pool'
else:
pool = None
options = self._rsp_options(static_linker)
self.add_rule(NinjaRule(rule, cmdlist, args, description, **options, extra=pool))
def generate_dynamic_link_rules(self):
num_pools = self.environment.coredata.options[OptionKey('backend_max_links')].value
for for_machine in MachineChoice:
complist = self.environment.coredata.compilers[for_machine]
for langname, compiler in complist.items():
if langname in {'java', 'vala', 'rust', 'cs', 'cython'}:
continue
rule = '{}_LINKER{}'.format(langname, self.get_rule_suffix(for_machine))
command = compiler.get_linker_exelist()
args = ['$ARGS'] + NinjaCommandArg.list(compiler.get_linker_output_args('$out'), Quoting.none) + ['$in', '$LINK_ARGS']
description = 'Linking target $out'
if num_pools > 0:
pool = 'pool = link_pool'
else:
pool = None
options = self._rsp_options(compiler)
self.add_rule(NinjaRule(rule, command, args, description, **options, extra=pool))
args = self.environment.get_build_command() + \
['--internal',
'symbolextractor',
self.environment.get_build_dir(),
'$in',
'$IMPLIB',
'$out']
symrule = 'SHSYM'
symcmd = args + ['$CROSS']
syndesc = 'Generating symbol file $out'
synstat = 'restat = 1'
self.add_rule(NinjaRule(symrule, symcmd, [], syndesc, extra=synstat))
def generate_java_compile_rule(self, compiler):
rule = self.compiler_to_rule_name(compiler)
command = compiler.get_exelist() + ['$ARGS', '$in']
description = 'Compiling Java object $in'
self.add_rule(NinjaRule(rule, command, [], description))
def generate_cs_compile_rule(self, compiler: 'CsCompiler') -> None:
rule = self.compiler_to_rule_name(compiler)
command = compiler.get_exelist()
args = ['$ARGS', '$in']
description = 'Compiling C Sharp target $out'
self.add_rule(NinjaRule(rule, command, args, description,
rspable=mesonlib.is_windows(),
rspfile_quote_style=compiler.rsp_file_syntax()))
def generate_vala_compile_rules(self, compiler):
rule = self.compiler_to_rule_name(compiler)
command = compiler.get_exelist() + ['$ARGS', '$in']
description = 'Compiling Vala source $in'
self.add_rule(NinjaRule(rule, command, [], description, extra='restat = 1'))
def generate_cython_compile_rules(self, compiler: 'Compiler') -> None:
rule = self.compiler_to_rule_name(compiler)
command = compiler.get_exelist() + ['$ARGS', '$in']
description = 'Compiling Cython source $in'
self.add_rule(NinjaRule(rule, command, [], description, extra='restat = 1'))
def generate_rust_compile_rules(self, compiler):
rule = self.compiler_to_rule_name(compiler)
command = compiler.get_exelist() + ['$ARGS', '$in']
description = 'Compiling Rust source $in'
depfile = '$targetdep'
depstyle = 'gcc'
self.add_rule(NinjaRule(rule, command, [], description, deps=depstyle,
depfile=depfile))
def generate_swift_compile_rules(self, compiler):
rule = self.compiler_to_rule_name(compiler)
full_exe = self.environment.get_build_command() + [
'--internal',
'dirchanger',
'$RUNDIR',
]
invoc = full_exe + compiler.get_exelist()
command = invoc + ['$ARGS', '$in']
description = 'Compiling Swift source $in'
self.add_rule(NinjaRule(rule, command, [], description))
def use_dyndeps_for_fortran(self) -> bool:
'''Use the new Ninja feature for scanning dependencies during build,
rather than up front. Remove this and all old scanning code once Ninja
minimum version is bumped to 1.10.'''
return mesonlib.version_compare(self.ninja_version, '>=1.10.0')
def generate_fortran_dep_hack(self, crstr: str) -> None:
if self.use_dyndeps_for_fortran():
return
rule = f'FORTRAN_DEP_HACK{crstr}'
if mesonlib.is_windows():
cmd = ['cmd', '/C']
else:
cmd = ['true']
self.add_rule_comment(NinjaComment('''Workaround for these issues:
https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485'''))
self.add_rule(NinjaRule(rule, cmd, [], 'Dep hack', extra='restat = 1'))
def generate_llvm_ir_compile_rule(self, compiler):
if self.created_llvm_ir_rule[compiler.for_machine]:
return
rule = self.get_compiler_rule_name('llvm_ir', compiler.for_machine)
command = compiler.get_exelist()
args = ['$ARGS'] + NinjaCommandArg.list(compiler.get_output_args('$out'), Quoting.none) + compiler.get_compile_only_args() + ['$in']
description = 'Compiling LLVM IR object $in'
options = self._rsp_options(compiler)
self.add_rule(NinjaRule(rule, command, args, description, **options))
self.created_llvm_ir_rule[compiler.for_machine] = True
def generate_compile_rule_for(self, langname, compiler):
if langname == 'java':
self.generate_java_compile_rule(compiler)
return
if langname == 'cs':
if self.environment.machines.matches_build_machine(compiler.for_machine):
self.generate_cs_compile_rule(compiler)
return
if langname == 'vala':
self.generate_vala_compile_rules(compiler)
return
if langname == 'rust':
self.generate_rust_compile_rules(compiler)
return
if langname == 'swift':
if self.environment.machines.matches_build_machine(compiler.for_machine):
self.generate_swift_compile_rules(compiler)
return
if langname == 'cython':
self.generate_cython_compile_rules(compiler)
return
crstr = self.get_rule_suffix(compiler.for_machine)
if langname == 'fortran':
self.generate_fortran_dep_hack(crstr)
rule = self.get_compiler_rule_name(langname, compiler.for_machine)
depargs = NinjaCommandArg.list(compiler.get_dependency_gen_args('$out', '$DEPFILE'), Quoting.none)
command = compiler.get_exelist()
args = ['$ARGS'] + depargs + NinjaCommandArg.list(compiler.get_output_args('$out'), Quoting.none) + compiler.get_compile_only_args() + ['$in']
description = f'Compiling {compiler.get_display_language()} object $out'
if isinstance(compiler, VisualStudioLikeCompiler):
deps = 'msvc'
depfile = None
else:
deps = 'gcc'
depfile = '$DEPFILE'
options = self._rsp_options(compiler)
self.add_rule(NinjaRule(rule, command, args, description, **options,
deps=deps, depfile=depfile))
def generate_pch_rule_for(self, langname, compiler):
if langname != 'c' and langname != 'cpp':
return
rule = self.compiler_to_pch_rule_name(compiler)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
if isinstance(compiler, VisualStudioLikeCompiler):
output = []
else:
output = NinjaCommandArg.list(compiler.get_output_args('$out'), Quoting.none)
command = compiler.get_exelist() + ['$ARGS'] + depargs + output + compiler.get_compile_only_args() + ['$in']
description = 'Precompiling header $in'
if isinstance(compiler, VisualStudioLikeCompiler):
deps = 'msvc'
depfile = None
else:
deps = 'gcc'
depfile = '$DEPFILE'
self.add_rule(NinjaRule(rule, command, [], description, deps=deps,
depfile=depfile))
def generate_scanner_rules(self):
rulename = 'depscan'
if rulename in self.ruledict:
# Scanning command is the same for native and cross compilation.
return
command = self.environment.get_build_command() + \
['--internal', 'depscan']
args = ['$picklefile', '$out', '$in']
description = 'Module scanner.'
rule = NinjaRule(rulename, command, args, description)
self.add_rule(rule)
def generate_compile_rules(self):
for for_machine in MachineChoice:
clist = self.environment.coredata.compilers[for_machine]
for langname, compiler in clist.items():
if compiler.get_id() == 'clang':
self.generate_llvm_ir_compile_rule(compiler)
self.generate_compile_rule_for(langname, compiler)
self.generate_pch_rule_for(langname, compiler)
def generate_generator_list_rules(self, target):
# CustomTargets have already written their rules and
# CustomTargetIndexes don't actually get generated, so write rules for
# GeneratedLists here
for genlist in target.get_generated_sources():
if isinstance(genlist, (build.CustomTarget, build.CustomTargetIndex)):
continue
self.generate_genlist_for_target(genlist, target)
def replace_paths(self, target, args, override_subdir=None):
if override_subdir:
source_target_dir = os.path.join(self.build_to_src, override_subdir)
else:
source_target_dir = self.get_target_source_dir(target)
relout = self.get_target_private_dir(target)
args = [x.replace("@SOURCE_DIR@", self.build_to_src).replace("@BUILD_DIR@", relout)
for x in args]
args = [x.replace("@CURRENT_SOURCE_DIR@", source_target_dir) for x in args]
args = [x.replace("@SOURCE_ROOT@", self.build_to_src).replace("@BUILD_ROOT@", '.')
for x in args]
args = [x.replace('\\', '/') for x in args]
return args
def generate_genlist_for_target(self, genlist, target):
generator = genlist.get_generator()
subdir = genlist.subdir
exe = generator.get_exe()
exe_arr = self.build_target_to_cmd_array(exe)
infilelist = genlist.get_inputs()
outfilelist = genlist.get_outputs()
extra_dependencies = self.get_custom_target_depend_files(genlist)
for i, curfile in enumerate(infilelist):
if len(generator.outputs) == 1:
sole_output = os.path.join(self.get_target_private_dir(target), outfilelist[i])
else:
sole_output = f'{curfile}'
infilename = curfile.rel_to_builddir(self.build_to_src)
base_args = generator.get_arglist(infilename)
outfiles = genlist.get_outputs_for(curfile)
outfiles = [os.path.join(self.get_target_private_dir(target), of) for of in outfiles]
if generator.depfile is None:
rulename = 'CUSTOM_COMMAND'
args = base_args
else:
rulename = 'CUSTOM_COMMAND_DEP'
depfilename = generator.get_dep_outname(infilename)
depfile = os.path.join(self.get_target_private_dir(target), depfilename)
args = [x.replace('@DEPFILE@', depfile) for x in base_args]
args = [x.replace("@INPUT@", infilename).replace('@OUTPUT@', sole_output)
for x in args]
args = self.replace_outputs(args, self.get_target_private_dir(target), outfilelist)
# We have consumed output files, so drop them from the list of remaining outputs.
if len(generator.outputs) > 1:
outfilelist = outfilelist[len(generator.outputs):]
args = self.replace_paths(target, args, override_subdir=subdir)
cmdlist = exe_arr + self.replace_extra_args(args, genlist)
cmdlist, reason = self.as_meson_exe_cmdline(cmdlist[0], cmdlist[1:],
capture=outfiles[0] if generator.capture else None)
abs_pdir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(target))
os.makedirs(abs_pdir, exist_ok=True)
elem = NinjaBuildElement(self.all_outputs, outfiles, rulename, infilename)
elem.add_dep([self.get_target_filename(x) for x in generator.depends])
if generator.depfile is not None:
elem.add_item('DEPFILE', depfile)
if len(extra_dependencies) > 0:
elem.add_dep(extra_dependencies)
if len(generator.outputs) == 1:
what = f'{sole_output!r}'
else:
# since there are multiple outputs, we log the source that caused the rebuild
what = f'from {sole_output!r}.'
if reason:
reason = f' (wrapped by meson {reason})'
elem.add_item('DESC', f'Generating {what}{reason}.')
if isinstance(exe, build.BuildTarget):
elem.add_dep(self.get_target_filename(exe))
elem.add_item('COMMAND', cmdlist)
self.add_build(elem)
def scan_fortran_module_outputs(self, target):
"""
Find all module and submodule made available in a Fortran code file.
"""
if self.use_dyndeps_for_fortran():
return
compiler = None
# TODO other compilers
for lang, c in self.environment.coredata.compilers.host.items():
if lang == 'fortran':
compiler = c
break
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
modre = re.compile(FORTRAN_MODULE_PAT, re.IGNORECASE)
submodre = re.compile(FORTRAN_SUBMOD_PAT, re.IGNORECASE)
module_files = {}
submodule_files = {}
for s in target.get_sources():
# FIXME, does not work for Fortran sources generated by
# custom_target() and generator() as those are run after
# the configuration (configure_file() is OK)
if not compiler.can_compile(s):
continue
filename = s.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
# Fortran keywords must be ASCII.
with open(filename, encoding='ascii', errors='ignore') as f:
for line in f:
modmatch = modre.match(line)
if modmatch is not None:
modname = modmatch.group(1).lower()
if modname in module_files:
raise InvalidArguments(
f'Namespace collision: module {modname} defined in '
'two files {module_files[modname]} and {s}.')
module_files[modname] = s
else:
submodmatch = submodre.match(line)
if submodmatch is not None:
# '_' is arbitrarily used to distinguish submod from mod.
parents = submodmatch.group(1).lower().split(':')
submodname = parents[0] + '_' + submodmatch.group(2).lower()
if submodname in submodule_files:
raise InvalidArguments(
'Namespace collision: submodule {submodname} defined in '
'two files {submodule_files[submodname]} and {s}.')
submodule_files[submodname] = s
self.fortran_deps[target.get_basename()] = {**module_files, **submodule_files}
def get_fortran_deps(self, compiler: FortranCompiler, src: Path, target) -> T.List[str]:
"""
Find all module and submodule needed by a Fortran target
"""
if self.use_dyndeps_for_fortran():
return []
dirname = Path(self.get_target_private_dir(target))
tdeps = self.fortran_deps[target.get_basename()]
srcdir = Path(self.source_dir)
mod_files = _scan_fortran_file_deps(src, srcdir, dirname, tdeps, compiler)
return mod_files
def get_no_stdlib_link_args(self, target, linker):
if hasattr(linker, 'language') and linker.language in self.build.stdlibs[target.for_machine]:
return linker.get_no_stdlib_link_args()
return []
def get_compile_debugfile_args(self, compiler, target, objfile):
# The way MSVC uses PDB files is documented exactly nowhere so
# the following is what we have been able to decipher via
# reverse engineering.
#
# Each object file gets the path of its PDB file written
# inside it. This can be either the final PDB (for, say,
# foo.exe) or an object pdb (for foo.obj). If the former, then
# each compilation step locks the pdb file for writing, which
# is a bottleneck and object files from one target can not be
# used in a different target. The latter seems to be the
# sensible one (and what Unix does) but there is a catch. If
# you try to use precompiled headers MSVC will error out
# because both source and pch pdbs go in the same file and
# they must be the same.
#
# This means:
#
# - pch files must be compiled anew for every object file (negating
# the entire point of having them in the first place)
# - when using pch, output must go to the target pdb
#
# Since both of these are broken in some way, use the one that
# works for each target. This unfortunately means that you
# can't combine pch and object extraction in a single target.
#
# PDB files also lead to filename collisions. A target foo.exe
# has a corresponding foo.pdb. A shared library foo.dll _also_
# has pdb file called foo.pdb. So will a static library
# foo.lib, which clobbers both foo.pdb _and_ the dll file's
# export library called foo.lib (by default, currently we name
# them libfoo.a to avoidt this issue). You can give the files
# unique names such as foo_exe.pdb but VC also generates a
# bunch of other files which take their names from the target
# basename (i.e. "foo") and stomp on each other.
#
# CMake solves this problem by doing two things. First of all
# static libraries do not generate pdb files at
# all. Presumably you don't need them and VC is smart enough
# to look up the original data when linking (speculation, not
# tested). The second solution is that you can only have
# target named "foo" as an exe, shared lib _or_ static
# lib. This makes filename collisions not happen. The downside
# is that you can't have an executable foo that uses a shared
# library libfoo.so, which is a common idiom on Unix.
#
# If you feel that the above is completely wrong and all of
# this is actually doable, please send patches.
if target.has_pch():
tfilename = self.get_target_filename_abs(target)
return compiler.get_compile_debugfile_args(tfilename, pch=True)
else:
return compiler.get_compile_debugfile_args(objfile, pch=False)
def get_link_debugfile_name(self, linker, target, outname):
return linker.get_link_debugfile_name(outname)
def get_link_debugfile_args(self, linker, target, outname):
return linker.get_link_debugfile_args(outname)
def generate_llvm_ir_compile(self, target, src):
base_proxy = self.get_base_options_for_target(target)
compiler = get_compiler_for_source(target.compilers.values(), src)
commands = compiler.compiler_args()
# Compiler args for compiling this target
commands += compilers.get_base_compile_args(base_proxy, compiler)
if isinstance(src, File):
if src.is_built:
src_filename = os.path.join(src.subdir, src.fname)
else:
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = self.canonicalize_filename(src_filename)
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.machines[target.for_machine].get_object_suffix()
commands += self.get_compile_debugfile_args(compiler, target, rel_obj)
if isinstance(src, File) and src.is_built:
rel_src = src.fname
elif isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise InvalidArguments(f'Invalid source type: {src!r}')
# Write the Ninja build command
compiler_name = self.get_compiler_rule_name('llvm_ir', compiler.for_machine)
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)
element.add_item('ARGS', commands)
self.add_build(element)
return (rel_obj, rel_src)
@lru_cache(maxsize=None)
def generate_inc_dir(self, compiler: 'Compiler', d: str, basedir: str, is_system: bool) -> \
T.Tuple['ImmutableListProtocol[str]', 'ImmutableListProtocol[str]']:
# Avoid superfluous '/.' at the end of paths when d is '.'
if d not in ('', '.'):
expdir = os.path.normpath(os.path.join(basedir, d))
else:
expdir = basedir
srctreedir = os.path.normpath(os.path.join(self.build_to_src, expdir))
sargs = compiler.get_include_args(srctreedir, is_system)
# There may be include dirs where a build directory has not been
# created for some source dir. For example if someone does this:
#
# inc = include_directories('foo/bar/baz')
#
# But never subdir()s into the actual dir.
if os.path.isdir(os.path.join(self.environment.get_build_dir(), expdir)):
bargs = compiler.get_include_args(expdir, is_system)
else:
bargs = []
return (sargs, bargs)
def _generate_single_compile(self, target: build.BuildTarget, compiler: 'Compiler',
is_generated: bool = False) -> 'CompilerArgs':
commands = self._generate_single_compile_base_args(target, compiler)
commands += self._generate_single_compile_target_args(target, compiler, is_generated)
return commands
def _generate_single_compile_base_args(self, target: build.BuildTarget, compiler: 'Compiler') -> 'CompilerArgs':
base_proxy = self.get_base_options_for_target(target)
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
commands = compiler.compiler_args()
# Start with symbol visibility.
commands += compiler.gnu_symbol_visibility_args(target.gnu_symbol_visibility)
# Add compiler args for compiling this target derived from 'base' build
# options passed on the command-line, in default_options, etc.
# These have the lowest priority.
commands += compilers.get_base_compile_args(base_proxy,
compiler)
return commands
@lru_cache(maxsize=None)
def _generate_single_compile_target_args(self, target: build.BuildTarget, compiler: 'Compiler',
is_generated: bool = False) -> 'ImmutableListProtocol[str]':
# The code generated by valac is usually crap and has tons of unused
# variables and such, so disable warnings for Vala C sources.
no_warn_args = (is_generated == 'vala')
# Add compiler args and include paths from several sources; defaults,
# build options, external dependencies, etc.
commands = self.generate_basic_compiler_args(target, compiler, no_warn_args)
# Add custom target dirs as includes automatically, but before
# target-specific include directories.
if target.implicit_include_directories:
commands += self.get_custom_target_dir_include_args(target, compiler)
# Add include dirs from the `include_directories:` kwarg on the target
# and from `include_directories:` of internal deps of the target.
#
# Target include dirs should override internal deps include dirs.
# This is handled in BuildTarget.process_kwargs()
#
# Include dirs from internal deps should override include dirs from
# external deps and must maintain the order in which they are specified.
# Hence, we must reverse the list so that the order is preserved.
for i in reversed(target.get_include_dirs()):
basedir = i.get_curdir()
# We should iterate include dirs in reversed orders because
# -Ipath will add to begin of array. And without reverse
# flags will be added in reversed order.
for d in reversed(i.get_incdirs()):
# Add source subdir first so that the build subdir overrides it
(compile_obj, includeargs) = self.generate_inc_dir(compiler, d, basedir, i.is_system)
commands += compile_obj
commands += includeargs
for d in i.get_extra_build_dirs():
commands += compiler.get_include_args(d, i.is_system)
# Add per-target compile args, f.ex, `c_args : ['-DFOO']`. We set these
# near the end since these are supposed to override everything else.
commands += self.escape_extra_args(target.get_extra_args(compiler.get_language()))
# D specific additional flags
if compiler.language == 'd':
commands += compiler.get_feature_args(target.d_features, self.build_to_src)
# Add source dir and build dir. Project-specific and target-specific
# include paths must override per-target compile args, include paths
# from external dependencies, internal dependencies, and from
# per-target `include_directories:`
#
# We prefer headers in the build dir over the source dir since, for
# instance, the user might have an srcdir == builddir Autotools build
# in their source tree. Many projects that are moving to Meson have
# both Meson and Autotools in parallel as part of the transition.
if target.implicit_include_directories:
commands += self.get_source_dir_include_args(target, compiler)
if target.implicit_include_directories:
commands += self.get_build_dir_include_args(target, compiler)
# Finally add the private dir for the target to the include path. This
# must override everything else and must be the final path added.
commands += compiler.get_include_args(self.get_target_private_dir(target), False)
return commands
def generate_single_compile(self, target, src, is_generated=False, header_deps=None, order_deps=None):
"""
Compiles C/C++, ObjC/ObjC++, Fortran, and D sources
"""
header_deps = header_deps if header_deps is not None else []
order_deps = order_deps if order_deps is not None else []
if isinstance(src, str) and src.endswith('.h'):
raise AssertionError(f'BUG: sources should not contain headers {src!r}')
compiler = get_compiler_for_source(target.compilers.values(), src)
commands = self._generate_single_compile_base_args(target, compiler)
# Include PCH header as first thing as it must be the first one or it will be
# ignored by gcc https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100462
if self.environment.coredata.options.get(OptionKey('b_pch')) and is_generated != 'pch':
commands += self.get_pch_include_args(compiler, target)
commands += self._generate_single_compile_target_args(target, compiler, is_generated)
commands = commands.compiler.compiler_args(commands)
# Create introspection information
if is_generated is False:
self.create_target_source_introspection(target, compiler, commands, [src], [])
else:
self.create_target_source_introspection(target, compiler, commands, [], [src])
build_dir = self.environment.get_build_dir()
if isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
if os.path.isabs(rel_src):
# Source files may not be from the source directory if they originate in source-only libraries,
# so we can't assert that the absolute path is anywhere in particular.
if src.is_built:
assert rel_src.startswith(build_dir)
rel_src = rel_src[len(build_dir) + 1:]
elif is_generated:
raise AssertionError(f'BUG: broken generated source file handling for {src!r}')
else:
raise InvalidArguments(f'Invalid source type: {src!r}')
obj_basename = self.object_filename_from_source(target, src)
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
dep_file = compiler.depfile_for_object(rel_obj)
# Add MSVC debug file generation compile flags: /Fd /FS
commands += self.get_compile_debugfile_args(compiler, target, rel_obj)
# PCH handling
if self.environment.coredata.options.get(OptionKey('b_pch')):
pchlist = target.get_pch(compiler.language)
else:
pchlist = []
if not pchlist:
pch_dep = []
elif compiler.id == 'intel':
pch_dep = []
else:
arr = []
i = os.path.join(self.get_target_private_dir(target), compiler.get_pch_name(pchlist[0]))
arr.append(i)
pch_dep = arr
compiler_name = self.compiler_to_rule_name(compiler)
extra_deps = []
if compiler.get_language() == 'fortran':
# Can't read source file to scan for deps if it's generated later
# at build-time. Skip scanning for deps, and just set the module
# outdir argument instead.
# https://github.com/mesonbuild/meson/issues/1348
if not is_generated:
abs_src = Path(build_dir) / rel_src
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
if not self.use_dyndeps_for_fortran():
# Dependency hack. Remove once multiple outputs in Ninja is fixed:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
for modname, srcfile in self.fortran_deps[target.get_basename()].items():
modfile = os.path.join(self.get_target_private_dir(target),
compiler.module_name_to_filename(modname))
if srcfile == src:
crstr = self.get_rule_suffix(target.for_machine)
depelem = NinjaBuildElement(self.all_outputs,
modfile,
'FORTRAN_DEP_HACK' + crstr,
rel_obj)
self.add_build(depelem)
commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)
self.add_header_deps(target, element, header_deps)
for d in extra_deps:
element.add_dep(d)
for d in order_deps:
if isinstance(d, File):
d = d.rel_to_builddir(self.build_to_src)
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_orderdep(d)
element.add_dep(pch_dep)
for i in self.get_fortran_orderdeps(target, compiler):
element.add_orderdep(i)
element.add_item('DEPFILE', dep_file)
element.add_item('ARGS', commands)
self.add_dependency_scanner_entries_to_element(target, compiler, element, src)
self.add_build(element)
assert isinstance(rel_obj, str)
assert isinstance(rel_src, str)
return (rel_obj, rel_src.replace('\\', '/'))
def add_dependency_scanner_entries_to_element(self, target, compiler, element, src):
if not self.should_use_dyndeps_for_target(target):
return
extension = os.path.splitext(src.fname)[1][1:]
if not (extension.lower() in compilers.lang_suffixes['fortran'] or extension in compilers.lang_suffixes['cpp']):
return
dep_scan_file = self.get_dep_scan_file_for(target)
element.add_item('dyndep', dep_scan_file)
element.add_orderdep(dep_scan_file)
def get_dep_scan_file_for(self, target):
return os.path.join(self.get_target_private_dir(target), 'depscan.dd')
def add_header_deps(self, target, ninja_element, header_deps):
for d in header_deps:
if isinstance(d, File):
d = d.rel_to_builddir(self.build_to_src)
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
ninja_element.add_dep(d)
def has_dir_part(self, fname):
# FIXME FIXME: The usage of this is a terrible and unreliable hack
if isinstance(fname, File):
return fname.subdir != ''
return has_path_sep(fname)
# Fortran is a bit weird (again). When you link against a library, just compiling a source file
# requires the mod files that are output when single files are built. To do this right we would need to
# scan all inputs and write out explicit deps for each file. That is stoo slow and too much effort so
# instead just have an ordered dependency on the library. This ensures all required mod files are created.
# The real deps are then detected via dep file generation from the compiler. This breaks on compilers that
# produce incorrect dep files but such is life.
def get_fortran_orderdeps(self, target, compiler):
if compiler.language != 'fortran':
return []
return [
os.path.join(self.get_target_dir(lt), lt.get_filename())
for lt in itertools.chain(target.link_targets, target.link_whole_targets)
]
def generate_msvc_pch_command(self, target, compiler, pch):
header = pch[0]
pchname = compiler.get_pch_name(header)
dst = os.path.join(self.get_target_private_dir(target), pchname)
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
if len(pch) == 1:
# Auto generate PCH.
source = self.create_msvc_pch_implementation(target, compiler.get_language(), pch[0])
pch_header_dir = os.path.dirname(os.path.join(self.build_to_src, target.get_source_subdir(), header))
commands += compiler.get_include_args(pch_header_dir, False)
else:
source = os.path.join(self.build_to_src, target.get_source_subdir(), pch[1])
just_name = os.path.basename(header)
(objname, pch_args) = compiler.gen_pch_args(just_name, source, dst)
commands += pch_args
commands += self._generate_single_compile(target, compiler)
commands += self.get_compile_debugfile_args(compiler, target, objname)
dep = dst + '.' + compiler.get_depfile_suffix()
return commands, dep, dst, [objname], source
def generate_gcc_pch_command(self, target, compiler, pch):
commands = self._generate_single_compile(target, compiler)
if pch.split('.')[-1] == 'h' and compiler.language == 'cpp':
# Explicitly compile pch headers as C++. If Clang is invoked in C++ mode, it actually warns if
# this option is not set, and for gcc it also makes sense to use it.
commands += ['-x', 'c++-header']
dst = os.path.join(self.get_target_private_dir(target),
os.path.basename(pch) + '.' + compiler.get_pch_suffix())
dep = dst + '.' + compiler.get_depfile_suffix()
return commands, dep, dst, [] # Gcc does not create an object file during pch generation.
def generate_pch(self, target, header_deps=None):
header_deps = header_deps if header_deps is not None else []
pch_objects = []
for lang in ['c', 'cpp']:
pch = target.get_pch(lang)
if not pch:
continue
if not has_path_sep(pch[0]) or not has_path_sep(pch[-1]):
msg = f'Precompiled header of {target.get_basename()!r} must not be in the same ' \
'directory as source, please put it in a subdirectory.'
raise InvalidArguments(msg)
compiler = target.compilers[lang]
if isinstance(compiler, VisualStudioLikeCompiler):
(commands, dep, dst, objs, src) = self.generate_msvc_pch_command(target, compiler, pch)
extradep = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
elif compiler.id == 'intel':
# Intel generates on target generation
continue
else:
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
(commands, dep, dst, objs) = self.generate_gcc_pch_command(target, compiler, pch[0])
extradep = None
pch_objects += objs
rulename = self.compiler_to_pch_rule_name(compiler)
elem = NinjaBuildElement(self.all_outputs, dst, rulename, src)
if extradep is not None:
elem.add_dep(extradep)
self.add_header_deps(target, elem, header_deps)
elem.add_item('ARGS', commands)
elem.add_item('DEPFILE', dep)
self.add_build(elem)
return pch_objects
def get_target_shsym_filename(self, target):
# Always name the .symbols file after the primary build output because it always exists
targetdir = self.get_target_private_dir(target)
return os.path.join(targetdir, target.get_filename() + '.symbols')
def generate_shsym(self, target):
target_file = self.get_target_filename(target)
symname = self.get_target_shsym_filename(target)
elem = NinjaBuildElement(self.all_outputs, symname, 'SHSYM', target_file)
# The library we will actually link to, which is an import library on Windows (not the DLL)
elem.add_item('IMPLIB', self.get_target_filename_for_linking(target))
if self.environment.is_cross_build():
elem.add_item('CROSS', '--cross-host=' + self.environment.machines[target.for_machine].system)
self.add_build(elem)
def get_import_filename(self, target):
return os.path.join(self.get_target_dir(target), target.import_filename)
def get_target_type_link_args(self, target, linker):
commands = []
if isinstance(target, build.Executable):
# Currently only used with the Swift compiler to add '-emit-executable'
commands += linker.get_std_exe_link_args()
# If export_dynamic, add the appropriate linker arguments
if target.export_dynamic:
commands += linker.gen_export_dynamic_link_args(self.environment)
# If implib, and that's significant on this platform (i.e. Windows using either GCC or Visual Studio)
if target.import_filename:
commands += linker.gen_import_library_args(self.get_import_filename(target))
if target.pie:
commands += linker.get_pie_link_args()
elif isinstance(target, build.SharedLibrary):
if isinstance(target, build.SharedModule):
options = self.environment.coredata.options
commands += linker.get_std_shared_module_link_args(options)
else:
commands += linker.get_std_shared_lib_link_args()
# All shared libraries are PIC
commands += linker.get_pic_args()
if not isinstance(target, build.SharedModule) or target.backwards_compat_want_soname:
# Add -Wl,-soname arguments on Linux, -install_name on OS X
commands += linker.get_soname_args(
self.environment, target.prefix, target.name, target.suffix,
target.soversion, target.darwin_versions)
# This is only visited when building for Windows using either GCC or Visual Studio
if target.vs_module_defs and hasattr(linker, 'gen_vs_module_defs_args'):
commands += linker.gen_vs_module_defs_args(target.vs_module_defs.rel_to_builddir(self.build_to_src))
# This is only visited when building for Windows using either GCC or Visual Studio
if target.import_filename:
commands += linker.gen_import_library_args(self.get_import_filename(target))
elif isinstance(target, build.StaticLibrary):
commands += linker.get_std_link_args(not target.should_install())
else:
raise RuntimeError('Unknown build target type.')
return commands
def get_target_type_link_args_post_dependencies(self, target, linker):
commands = []
if isinstance(target, build.Executable):
# If gui_app is significant on this platform, add the appropriate linker arguments.
# Unfortunately this can't be done in get_target_type_link_args, because some misguided
# libraries (such as SDL2) add -mwindows to their link flags.
m = self.environment.machines[target.for_machine]
if m.is_windows() or m.is_cygwin():
if target.gui_app is not None:
commands += linker.get_gui_app_args(target.gui_app)
else:
commands += linker.get_win_subsystem_args(target.win_subsystem)
return commands
def get_link_whole_args(self, linker, target):
use_custom = False
if isinstance(linker, mixins.visualstudio.MSVCCompiler):
# Expand our object lists manually if we are on pre-Visual Studio 2015 Update 2
# (incidentally, the "linker" here actually refers to cl.exe)
if mesonlib.version_compare(linker.version, '<19.00.23918'):
use_custom = True
if use_custom:
objects_from_static_libs: T.List[ExtractedObjects] = []
for dep in target.link_whole_targets:
l = dep.extract_all_objects(False)
objects_from_static_libs += self.determine_ext_objs(l, '')
objects_from_static_libs.extend(self.flatten_object_list(dep))
return objects_from_static_libs
else:
target_args = self.build_target_link_arguments(linker, target.link_whole_targets)
return linker.get_link_whole_for(target_args) if target_args else []
@lru_cache(maxsize=None)
def guess_library_absolute_path(self, linker, libname, search_dirs, patterns) -> Path:
for d in search_dirs:
for p in patterns:
trial = CCompiler._get_trials_from_pattern(p, d, libname)
if not trial:
continue
trial = CCompiler._get_file_from_list(self.environment, trial)
if not trial:
continue
# Return the first result
return trial
def guess_external_link_dependencies(self, linker, target, commands, internal):
# Ideally the linker would generate dependency information that could be used.
# But that has 2 problems:
# * currently ld can not create dependency information in a way that ninja can use:
# https://sourceware.org/bugzilla/show_bug.cgi?id=22843
# * Meson optimizes libraries from the same build using the symbol extractor.
# Just letting ninja use ld generated dependencies would undo this optimization.
search_dirs = OrderedSet()
libs = OrderedSet()
absolute_libs = []
build_dir = self.environment.get_build_dir()
# the following loop sometimes consumes two items from command in one pass
it = iter(linker.native_args_to_unix(commands))
for item in it:
if item in internal and not item.startswith('-'):
continue
if item.startswith('-L'):
if len(item) > 2:
path = item[2:]
else:
try:
path = next(it)
except StopIteration:
mlog.warning("Generated linker command has -L argument without following path")
break
if not os.path.isabs(path):
path = os.path.join(build_dir, path)
search_dirs.add(path)
elif item.startswith('-l'):
if len(item) > 2:
lib = item[2:]
else:
try:
lib = next(it)
except StopIteration:
mlog.warning("Generated linker command has '-l' argument without following library name")
break
libs.add(lib)
elif os.path.isabs(item) and self.environment.is_library(item) and os.path.isfile(item):
absolute_libs.append(item)
guessed_dependencies = []
# TODO The get_library_naming requirement currently excludes link targets that use d or fortran as their main linker
try:
static_patterns = linker.get_library_naming(self.environment, LibType.STATIC, strict=True)
shared_patterns = linker.get_library_naming(self.environment, LibType.SHARED, strict=True)
search_dirs = tuple(search_dirs) + tuple(linker.get_library_dirs(self.environment))
for libname in libs:
# be conservative and record most likely shared and static resolution, because we don't know exactly
# which one the linker will prefer
staticlibs = self.guess_library_absolute_path(linker, libname,
search_dirs, static_patterns)
sharedlibs = self.guess_library_absolute_path(linker, libname,
search_dirs, shared_patterns)
if staticlibs:
guessed_dependencies.append(staticlibs.resolve().as_posix())
if sharedlibs:
guessed_dependencies.append(sharedlibs.resolve().as_posix())
except (mesonlib.MesonException, AttributeError) as e:
if 'get_library_naming' not in str(e):
raise
return guessed_dependencies + absolute_libs
def generate_prelink(self, target, obj_list):
assert isinstance(target, build.StaticLibrary)
prelink_name = os.path.join(self.get_target_private_dir(target), target.name + '-prelink.o')
elem = NinjaBuildElement(self.all_outputs, [prelink_name], 'CUSTOM_COMMAND', obj_list)
prelinker = target.get_prelinker()
cmd = prelinker.exelist[:]
cmd += prelinker.get_prelink_args(prelink_name, obj_list)
cmd = self.replace_paths(target, cmd)
elem.add_item('COMMAND', cmd)
elem.add_item('description', f'Prelinking {prelink_name}.')
self.add_build(elem)
return [prelink_name]
def generate_link(self, target: build.BuildTarget, outname, obj_list, linker: T.Union['Compiler', 'StaticLinker'], extra_args=None, stdlib_args=None):
extra_args = extra_args if extra_args is not None else []
stdlib_args = stdlib_args if stdlib_args is not None else []
implicit_outs = []
if isinstance(target, build.StaticLibrary):
linker_base = 'STATIC'
else:
linker_base = linker.get_language() # Fixme.
if isinstance(target, build.SharedLibrary):
self.generate_shsym(target)
crstr = self.get_rule_suffix(target.for_machine)
linker_rule = linker_base + '_LINKER' + crstr
# Create an empty commands list, and start adding link arguments from
# various sources in the order in which they must override each other
# starting from hard-coded defaults followed by build options and so on.
#
# Once all the linker options have been passed, we will start passing
# libraries and library paths from internal and external sources.
commands = linker.compiler_args()
# First, the trivial ones that are impossible to override.
#
# Add linker args for linking this target derived from 'base' build
# options passed on the command-line, in default_options, etc.
# These have the lowest priority.
if isinstance(target, build.StaticLibrary):
commands += linker.get_base_link_args(self.get_base_options_for_target(target))
else:
commands += compilers.get_base_link_args(self.get_base_options_for_target(target),
linker,
isinstance(target, build.SharedModule))
# Add -nostdlib if needed; can't be overridden
commands += self.get_no_stdlib_link_args(target, linker)
# Add things like /NOLOGO; usually can't be overridden
commands += linker.get_linker_always_args()
# Add buildtype linker args: optimization level, etc.
commands += linker.get_buildtype_linker_args(self.get_option_for_target(OptionKey('buildtype'), target))
# Add /DEBUG and the pdb filename when using MSVC
if self.get_option_for_target(OptionKey('debug'), target):
commands += self.get_link_debugfile_args(linker, target, outname)
debugfile = self.get_link_debugfile_name(linker, target, outname)
if debugfile is not None:
implicit_outs += [debugfile]
# Add link args specific to this BuildTarget type, such as soname args,
# PIC, import library generation, etc.
commands += self.get_target_type_link_args(target, linker)
# Archives that are copied wholesale in the result. Must be before any
# other link targets so missing symbols from whole archives are found in those.
if not isinstance(target, build.StaticLibrary):
commands += self.get_link_whole_args(linker, target)
if not isinstance(target, build.StaticLibrary):
# Add link args added using add_project_link_arguments()
commands += self.build.get_project_link_args(linker, target.subproject, target.for_machine)
# Add link args added using add_global_link_arguments()
# These override per-project link arguments
commands += self.build.get_global_link_args(linker, target.for_machine)
# Link args added from the env: LDFLAGS. We want these to override
# all the defaults but not the per-target link args.
commands += self.environment.coredata.get_external_link_args(target.for_machine, linker.get_language())
# Now we will add libraries and library paths from various sources
# Set runtime-paths so we can run executables without needing to set
# LD_LIBRARY_PATH, etc in the environment. Doesn't work on Windows.
if has_path_sep(target.name):
# Target names really should not have slashes in them, but
# unfortunately we did not check for that and some downstream projects
# now have them. Once slashes are forbidden, remove this bit.
target_slashname_workaround_dir = os.path.join(
os.path.dirname(target.name),
self.get_target_dir(target))
else:
target_slashname_workaround_dir = self.get_target_dir(target)
(rpath_args, target.rpath_dirs_to_remove) = (
linker.build_rpath_args(self.environment,
self.environment.get_build_dir(),
target_slashname_workaround_dir,
self.determine_rpath_dirs(target),
target.build_rpath,
target.install_rpath))
commands += rpath_args
# Add link args to link to all internal libraries (link_with:) and
# internal dependencies needed by this target.
if linker_base == 'STATIC':
# Link arguments of static libraries are not put in the command
# line of the library. They are instead appended to the command
# line where the static library is used.
dependencies = []
else:
dependencies = target.get_dependencies()
internal = self.build_target_link_arguments(linker, dependencies)
commands += internal
# Only non-static built targets need link args and link dependencies
if not isinstance(target, build.StaticLibrary):
# For 'automagic' deps: Boost and GTest. Also dependency('threads').
# pkg-config puts the thread flags itself via `Cflags:`
commands += linker.get_target_link_args(target)
# External deps must be last because target link libraries may depend on them.
for dep in target.get_external_deps():
# Extend without reordering or de-dup to preserve `-L -l` sets
# https://github.com/mesonbuild/meson/issues/1718
commands.extend_preserving_lflags(linker.get_dependency_link_args(dep))
for d in target.get_dependencies():
if isinstance(d, build.StaticLibrary):
for dep in d.get_external_deps():
commands.extend_preserving_lflags(linker.get_dependency_link_args(dep))
# Add link args specific to this BuildTarget type that must not be overridden by dependencies
commands += self.get_target_type_link_args_post_dependencies(target, linker)
# Add link args for c_* or cpp_* build options. Currently this only
# adds c_winlibs and cpp_winlibs when building for Windows. This needs
# to be after all internal and external libraries so that unresolved
# symbols from those can be found here. This is needed when the
# *_winlibs that we want to link to are static mingw64 libraries.
if isinstance(linker, Compiler):
# The static linker doesn't know what language it is building, so we
# don't know what option. Fortunately, it doesn't care to see the
# language-specific options either.
#
# We shouldn't check whether we are making a static library, because
# in the LTO case we do use a real compiler here.
commands += linker.get_option_link_args(self.environment.coredata.options)
dep_targets = []
dep_targets.extend(self.guess_external_link_dependencies(linker, target, commands, internal))
# Add libraries generated by custom targets
custom_target_libraries = self.get_custom_target_provided_libraries(target)
commands += extra_args
commands += custom_target_libraries
commands += stdlib_args # Standard library arguments go last, because they never depend on anything.
dep_targets.extend([self.get_dependency_filename(t) for t in dependencies])
dep_targets.extend([self.get_dependency_filename(t)
for t in target.link_depends])
elem = NinjaBuildElement(self.all_outputs, outname, linker_rule, obj_list, implicit_outs=implicit_outs)
elem.add_dep(dep_targets + custom_target_libraries)
elem.add_item('LINK_ARGS', commands)
return elem
def get_dependency_filename(self, t):
if isinstance(t, build.SharedLibrary):
return self.get_target_shsym_filename(t)
elif isinstance(t, mesonlib.File):
if t.is_built:
return t.relative_name()
else:
return t.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
return self.get_target_filename(t)
def generate_shlib_aliases(self, target, outdir):
aliases = target.get_aliases()
for alias, to in aliases.items():
aliasfile = os.path.join(self.environment.get_build_dir(), outdir, alias)
try:
os.remove(aliasfile)
except Exception:
pass
try:
os.symlink(to, aliasfile)
except NotImplementedError:
mlog.debug("Library versioning disabled because symlinks are not supported.")
except OSError:
mlog.debug("Library versioning disabled because we do not have symlink creation privileges.")
def generate_custom_target_clean(self, trees: T.List[str]) -> str:
e = NinjaBuildElement(self.all_outputs, 'meson-clean-ctlist', 'CUSTOM_COMMAND', 'PHONY')
d = CleanTrees(self.environment.get_build_dir(), trees)
d_file = os.path.join(self.environment.get_scratch_dir(), 'cleantrees.dat')
e.add_item('COMMAND', self.environment.get_build_command() + ['--internal', 'cleantrees', d_file])
e.add_item('description', 'Cleaning custom target directories')
self.add_build(e)
# Alias that runs the target defined above
self.create_target_alias('meson-clean-ctlist')
# Write out the data file passed to the script
with open(d_file, 'wb') as ofile:
pickle.dump(d, ofile)
return 'clean-ctlist'
def generate_gcov_clean(self):
gcno_elem = NinjaBuildElement(self.all_outputs, 'meson-clean-gcno', 'CUSTOM_COMMAND', 'PHONY')
gcno_elem.add_item('COMMAND', mesonlib.get_meson_command() + ['--internal', 'delwithsuffix', '.', 'gcno'])
gcno_elem.add_item('description', 'Deleting gcno files')
self.add_build(gcno_elem)
# Alias that runs the target defined above
self.create_target_alias('meson-clean-gcno')
gcda_elem = NinjaBuildElement(self.all_outputs, 'meson-clean-gcda', 'CUSTOM_COMMAND', 'PHONY')
gcda_elem.add_item('COMMAND', mesonlib.get_meson_command() + ['--internal', 'delwithsuffix', '.', 'gcda'])
gcda_elem.add_item('description', 'Deleting gcda files')
self.add_build(gcda_elem)
# Alias that runs the target defined above
self.create_target_alias('meson-clean-gcda')
def get_user_option_args(self):
cmds = []
for (k, v) in self.environment.coredata.options.items():
if k.is_project():
cmds.append('-D' + str(k) + '=' + (v.value if isinstance(v.value, str) else str(v.value).lower()))
# The order of these arguments must be the same between runs of Meson
# to ensure reproducible output. The order we pass them shouldn't
# affect behavior in any other way.
return sorted(cmds)
def generate_dist(self):
elem = NinjaBuildElement(self.all_outputs, 'meson-dist', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('DESC', 'Creating source packages')
elem.add_item('COMMAND', self.environment.get_build_command() + ['dist'])
elem.add_item('pool', 'console')
self.add_build(elem)
# Alias that runs the target defined above
self.create_target_alias('meson-dist')
def generate_scanbuild(self):
if not environment.detect_scanbuild():
return
if ('', 'scan-build') in self.build.run_target_names:
return
cmd = self.environment.get_build_command() + \
['--internal', 'scanbuild', self.environment.source_dir, self.environment.build_dir] + \
self.environment.get_build_command() + self.get_user_option_args()
elem = NinjaBuildElement(self.all_outputs, 'meson-scan-build', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', cmd)
elem.add_item('pool', 'console')
self.add_build(elem)
# Alias that runs the target defined above
self.create_target_alias('meson-scan-build')
def generate_clangtool(self, name, extra_arg=None):
target_name = 'clang-' + name
extra_args = []
if extra_arg:
target_name += f'-{extra_arg}'
extra_args.append(f'--{extra_arg}')
if not os.path.exists(os.path.join(self.environment.source_dir, '.clang-' + name)) and \
not os.path.exists(os.path.join(self.environment.source_dir, '_clang-' + name)):
return
if target_name in self.all_outputs:
return
if ('', target_name) in self.build.run_target_names:
return
cmd = self.environment.get_build_command() + \
['--internal', 'clang' + name, self.environment.source_dir, self.environment.build_dir] + \
extra_args
elem = NinjaBuildElement(self.all_outputs, 'meson-' + target_name, 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', cmd)
elem.add_item('pool', 'console')
self.add_build(elem)
self.create_target_alias('meson-' + target_name)
def generate_clangformat(self):
if not environment.detect_clangformat():
return
self.generate_clangtool('format')
self.generate_clangtool('format', 'check')
def generate_clangtidy(self):
import shutil
if not shutil.which('clang-tidy'):
return
self.generate_clangtool('tidy')
def generate_tags(self, tool, target_name):
import shutil
if not shutil.which(tool):
return
if ('', target_name) in self.build.run_target_names:
return
if target_name in self.all_outputs:
return
cmd = self.environment.get_build_command() + \
['--internal', 'tags', tool, self.environment.source_dir]
elem = NinjaBuildElement(self.all_outputs, 'meson-' + target_name, 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', cmd)
elem.add_item('pool', 'console')
self.add_build(elem)
# Alias that runs the target defined above
self.create_target_alias('meson-' + target_name)
# For things like scan-build and other helper tools we might have.
def generate_utils(self):
self.generate_scanbuild()
self.generate_clangformat()
self.generate_clangtidy()
self.generate_tags('etags', 'TAGS')
self.generate_tags('ctags', 'ctags')
self.generate_tags('cscope', 'cscope')
cmd = self.environment.get_build_command() + ['--internal', 'uninstall']
elem = NinjaBuildElement(self.all_outputs, 'meson-uninstall', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', cmd)
elem.add_item('pool', 'console')
self.add_build(elem)
# Alias that runs the target defined above
self.create_target_alias('meson-uninstall')
def generate_ending(self):
targetlist = []
for t in self.get_build_by_default_targets().values():
# Add the first output of each target to the 'all' target so that
# they are all built
targetlist.append(os.path.join(self.get_target_dir(t), t.get_outputs()[0]))
elem = NinjaBuildElement(self.all_outputs, 'all', 'phony', targetlist)
self.add_build(elem)
elem = NinjaBuildElement(self.all_outputs, 'meson-clean', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', self.ninja_command + ['-t', 'clean'])
elem.add_item('description', 'Cleaning')
# Alias that runs the above-defined meson-clean target
self.create_target_alias('meson-clean')
# If we have custom targets in this project, add all their outputs to
# the list that is passed to the `cleantrees.py` script. The script
# will manually delete all custom_target outputs that are directories
# instead of files. This is needed because on platforms other than
# Windows, Ninja only deletes directories while cleaning if they are
# empty. https://github.com/mesonbuild/meson/issues/1220
ctlist = []
for t in self.build.get_targets().values():
if isinstance(t, build.CustomTarget):
# Create a list of all custom target outputs
for o in t.get_outputs():
ctlist.append(os.path.join(self.get_target_dir(t), o))
if ctlist:
elem.add_dep(self.generate_custom_target_clean(ctlist))
if OptionKey('b_coverage') in self.environment.coredata.options and \
self.environment.coredata.options[OptionKey('b_coverage')].value:
self.generate_gcov_clean()
elem.add_dep('clean-gcda')
elem.add_dep('clean-gcno')
self.add_build(elem)
deps = self.get_regen_filelist()
elem = NinjaBuildElement(self.all_outputs, 'build.ninja', 'REGENERATE_BUILD', deps)
elem.add_item('pool', 'console')
self.add_build(elem)
elem = NinjaBuildElement(self.all_outputs, 'reconfigure', 'REGENERATE_BUILD', 'PHONY')
elem.add_item('pool', 'console')
self.add_build(elem)
elem = NinjaBuildElement(self.all_outputs, deps, 'phony', '')
self.add_build(elem)
def get_introspection_data(self, target_id: str, target: build.Target) -> T.List[T.Dict[str, T.Union[bool, str, T.List[T.Union[str, T.Dict[str, T.Union[str, T.List[str], bool]]]]]]]:
if target_id not in self.introspection_data or len(self.introspection_data[target_id]) == 0:
return super().get_introspection_data(target_id, target)
result = []
for i in self.introspection_data[target_id].values():
result += [i]
return result
def _scan_fortran_file_deps(src: Path, srcdir: Path, dirname: Path, tdeps, compiler) -> T.List[str]:
"""
scan a Fortran file for dependencies. Needs to be distinct from target
to allow for recursion induced by `include` statements.er
It makes a number of assumptions, including
* `use`, `module`, `submodule` name is not on a continuation line
Regex
-----
* `incre` works for `#include "foo.f90"` and `include "foo.f90"`
* `usere` works for legacy and Fortran 2003 `use` statements
* `submodre` is for Fortran >= 2008 `submodule`
"""
incre = re.compile(FORTRAN_INCLUDE_PAT, re.IGNORECASE)
usere = re.compile(FORTRAN_USE_PAT, re.IGNORECASE)
submodre = re.compile(FORTRAN_SUBMOD_PAT, re.IGNORECASE)
mod_files = []
src = Path(src)
with src.open(encoding='ascii', errors='ignore') as f:
for line in f:
# included files
incmatch = incre.match(line)
if incmatch is not None:
incfile = src.parent / incmatch.group(1)
# NOTE: src.parent is most general, in particular for CMake subproject with Fortran file
# having an `include 'foo.f'` statement.
if incfile.suffix.lower()[1:] in compiler.file_suffixes:
mod_files.extend(_scan_fortran_file_deps(incfile, srcdir, dirname, tdeps, compiler))
# modules
usematch = usere.match(line)
if usematch is not None:
usename = usematch.group(1).lower()
if usename == 'intrinsic': # this keeps the regex simpler
continue
if usename not in tdeps:
# The module is not provided by any source file. This
# is due to:
# a) missing file/typo/etc
# b) using a module provided by the compiler, such as
# OpenMP
# There's no easy way to tell which is which (that I
# know of) so just ignore this and go on. Ideally we
# would print a warning message to the user but this is
# a common occurrence, which would lead to lots of
# distracting noise.
continue
srcfile = srcdir / tdeps[usename].fname # type: Path
if not srcfile.is_file():
if srcfile.name != src.name: # generated source file
pass
else: # subproject
continue
elif srcfile.samefile(src): # self-reference
continue
mod_name = compiler.module_name_to_filename(usename)
mod_files.append(str(dirname / mod_name))
else: # submodules
submodmatch = submodre.match(line)
if submodmatch is not None:
parents = submodmatch.group(1).lower().split(':')
assert len(parents) in (1, 2), (
'submodule ancestry must be specified as'
f' ancestor:parent but Meson found {parents}')
ancestor_child = '_'.join(parents)
if ancestor_child not in tdeps:
raise MesonException("submodule {} relies on ancestor module {} that was not found.".format(submodmatch.group(2).lower(), ancestor_child.split('_')[0]))
submodsrcfile = srcdir / tdeps[ancestor_child].fname # type: Path
if not submodsrcfile.is_file():
if submodsrcfile.name != src.name: # generated source file
pass
else: # subproject
continue
elif submodsrcfile.samefile(src): # self-reference
continue
mod_name = compiler.module_name_to_filename(ancestor_child)
mod_files.append(str(dirname / mod_name))
return mod_files
| []
| []
| [
"MESON_RSP_THRESHOLD"
]
| [] | ["MESON_RSP_THRESHOLD"] | python | 1 | 0 | |
examples/create-page/main.go | package main
import (
"context"
"log"
"os"
"github.com/mkfsn/notion-go"
)
func main() {
c := notion.New(os.Getenv("NOTION_AUTH_TOKEN"))
page, err := c.Pages().Create(context.Background(),
notion.PagesCreateParameters{
Parent: notion.DatabaseParentInput{
DatabaseID: "aee104a17e554846bea3536712bfca2c",
},
Properties: map[string]notion.PropertyValue{
"Name": notion.TitlePropertyValue{
Title: []notion.RichText{
notion.RichTextText{Text: notion.TextObject{Content: "Tuscan Kale"}},
},
},
"Description": notion.RichTextPropertyValue{
RichText: []notion.RichText{
notion.RichTextText{Text: notion.TextObject{Content: " dark green leafy vegetable"}},
},
},
"Food group": notion.SelectPropertyValue{
Select: notion.SelectPropertyValueOption{
Name: "Vegetable",
},
},
"Price": notion.NumberPropertyValue{
Number: 2.5,
},
},
Children: []notion.Block{
notion.Heading2Block{
BlockBase: notion.BlockBase{
Object: notion.ObjectTypeBlock,
Type: notion.BlockTypeHeading2,
},
Heading2: notion.HeadingBlock{
Text: []notion.RichText{
notion.RichTextText{
BaseRichText: notion.BaseRichText{
Type: notion.RichTextTypeText,
},
Text: notion.TextObject{
Content: "Lacinato kale",
},
},
},
},
},
notion.ParagraphBlock{
BlockBase: notion.BlockBase{
Object: notion.ObjectTypeBlock,
Type: notion.BlockTypeParagraph,
},
Paragraph: notion.RichTextBlock{
Text: []notion.RichText{
notion.RichTextText{
BaseRichText: notion.BaseRichText{
Type: notion.RichTextTypeText,
},
Text: notion.TextObject{
Content: "Lacinato kale is a variety of kale with a long tradition in Italian cuisine, especially that of Tuscany. It is also known as Tuscan kale, Italian kale, dinosaur kale, kale, flat back kale, palm tree kale, or black Tuscan palm.",
Link: ¬ion.Link{
Type: "url",
URL: "https://en.wikipedia.org/wiki/Lacinato_kale",
},
},
},
},
},
},
},
},
)
if err != nil {
log.Fatal(err)
}
log.Printf("page: %#v\n", page)
}
| [
"\"NOTION_AUTH_TOKEN\""
]
| []
| [
"NOTION_AUTH_TOKEN"
]
| [] | ["NOTION_AUTH_TOKEN"] | go | 1 | 0 | |
test/functional/test_framework/util.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2019 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError(
"Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError(
"Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg)
for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError(
"Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError(
"Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError(
"Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError(
"Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" %
(str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("shrinkdebugfile=0\n")
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node" + str(n_node), "regtest", logname)
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
from_connection.disconnectnode(nodeid=peer_id)
for _ in range(50):
if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []:
break
time.sleep(0.1)
else:
raise AssertionError("timed out waiting for disconnect")
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def connect_nodes_mesh(nodes, bi=False):
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
if bi:
connect_nodes_bi(nodes, i, j)
else:
connect_nodes(nodes[i], j)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000))
for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" %
(txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append(
{"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" %
(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(
change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount + fee * 2)
outputs = make_change(from_node, total_in, amount + fee, fee)
outputs[self_address] = float(amount + fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount + fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [{"txid": self_txid, "vout": vout}]
outputs = {to_node.getnewaddress(): float(amount)}
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count, age=101):
to_generate = int(0.5 * count) + age
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before
# the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE|FORKID")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def get_srcdir(calling_script=None):
"""
Try to find out the base folder containing the 'src' folder.
If SRCDIR is set it does a sanity check and returns that.
Otherwise it goes on a search and rescue mission.
Returns None if it cannot find a suitable folder.
TODO: This is only used for cdefs, consider moving that there.
"""
def contains_src(path_to_check):
if not path_to_check:
return False
else:
cand_path = os.path.join(path_to_check, 'src')
return os.path.exists(cand_path) and os.path.isdir(cand_path)
srcdir = os.environ.get('SRCDIR', '')
if contains_src(srcdir):
return srcdir
# If we have a caller, try to guess from its location where the
# top level might be.
if calling_script:
caller_basedir = os.path.dirname(
os.path.dirname(os.path.dirname(calling_script)))
if caller_basedir != '' and contains_src(os.path.abspath(caller_basedir)):
return os.path.abspath(caller_basedir)
# Try to work it based out on main module
# We might expect the caller to be rpc-tests.py or a test script
# itself.
import sys
mainmod = sys.modules['__main__']
mainmod_path = getattr(mainmod, '__file__', '')
if mainmod_path and mainmod_path.endswith('.py'):
maybe_top = os.path.dirname(
os.path.dirname(os.path.dirname(mainmod_path)))
if contains_src(os.path.abspath(maybe_top)):
return os.path.abspath(maybe_top)
# No luck, give up.
return None
| []
| []
| [
"SRCDIR"
]
| [] | ["SRCDIR"] | python | 1 | 0 | |
Lib/test/test_posix.py | "Test posix functions"
z test zaimportuj support
# Skip these tests jeżeli there jest no posix module.
posix = support.import_module('posix')
zaimportuj errno
zaimportuj sys
zaimportuj time
zaimportuj os
zaimportuj platform
zaimportuj pwd
zaimportuj shutil
zaimportuj stat
zaimportuj tempfile
zaimportuj unittest
zaimportuj warnings
_DUMMY_SYMLINK = os.path.join(tempfile.gettempdir(),
support.TESTFN + '-dummy-symlink')
klasa PosixTester(unittest.TestCase):
def setUp(self):
# create empty file
fp = open(support.TESTFN, 'w+')
fp.close()
self.teardown_files = [ support.TESTFN ]
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
def tearDown(self):
dla teardown_file w self.teardown_files:
support.unlink(teardown_file)
self._warnings_manager.__exit__(Nic, Nic, Nic)
def testNoArgFunctions(self):
# test posix functions which take no arguments oraz have
# no side-effects which we need to cleanup (e.g., fork, wait, abort)
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdb", "uname",
"times", "getloadavg",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid", "sync",
]
dla name w NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, Nic)
jeżeli posix_func jest nie Nic:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
@unittest.skipUnless(hasattr(posix, 'getresuid'),
'test needs posix.getresuid()')
def test_getresuid(self):
user_ids = posix.getresuid()
self.assertEqual(len(user_ids), 3)
dla val w user_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'getresgid'),
'test needs posix.getresgid()')
def test_getresgid(self):
group_ids = posix.getresgid()
self.assertEqual(len(group_ids), 3)
dla val w group_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid(self):
current_user_ids = posix.getresuid()
self.assertIsNic(posix.setresuid(*current_user_ids))
# -1 means don't change that value.
self.assertIsNic(posix.setresuid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid_exception(self):
# Don't do this test jeżeli someone jest silly enough to run us jako root.
current_user_ids = posix.getresuid()
jeżeli 0 nie w current_user_ids:
new_user_ids = (current_user_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresuid, *new_user_ids)
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid(self):
current_group_ids = posix.getresgid()
self.assertIsNic(posix.setresgid(*current_group_ids))
# -1 means don't change that value.
self.assertIsNic(posix.setresgid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid_exception(self):
# Don't do this test jeżeli someone jest silly enough to run us jako root.
current_group_ids = posix.getresgid()
jeżeli 0 nie w current_group_ids:
new_group_ids = (current_group_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
def test_initgroups(self):
# It takes a string oraz an integer; check that it podnieśs a TypeError
# dla other argument lists.
self.assertRaises(TypeError, posix.initgroups)
self.assertRaises(TypeError, posix.initgroups, Nic)
self.assertRaises(TypeError, posix.initgroups, 3, "foo")
self.assertRaises(TypeError, posix.initgroups, "foo", 3, object())
# If a non-privileged user invokes it, it should fail przy OSError
# EPERM.
jeżeli os.getuid() != 0:
spróbuj:
name = pwd.getpwuid(posix.getuid()).pw_name
wyjąwszy KeyError:
# the current UID may nie have a pwd entry
podnieś unittest.SkipTest("need a pwd entry")
spróbuj:
posix.initgroups(name, 13)
wyjąwszy OSError jako e:
self.assertEqual(e.errno, errno.EPERM)
inaczej:
self.fail("Expected OSError to be podnieśd by initgroups")
@unittest.skipUnless(hasattr(posix, 'statvfs'),
'test needs posix.statvfs()')
def test_statvfs(self):
self.assertPrawda(posix.statvfs(os.curdir))
@unittest.skipUnless(hasattr(posix, 'fstatvfs'),
'test needs posix.fstatvfs()')
def test_fstatvfs(self):
fp = open(support.TESTFN)
spróbuj:
self.assertPrawda(posix.fstatvfs(fp.fileno()))
self.assertPrawda(posix.statvfs(fp.fileno()))
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'ftruncate'),
'test needs posix.ftruncate()')
def test_ftruncate(self):
fp = open(support.TESTFN, 'w+')
spróbuj:
# we need to have some data to truncate
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'truncate'), "test needs posix.truncate()")
def test_truncate(self):
przy open(support.TESTFN, 'w') jako fp:
fp.write('test')
fp.flush()
posix.truncate(support.TESTFN, 0)
@unittest.skipUnless(getattr(os, 'execve', Nic) w os.supports_fd, "test needs execve() to support the fd parameter")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_fexecve(self):
fp = os.open(sys.executable, os.O_RDONLY)
spróbuj:
pid = os.fork()
jeżeli pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(fp, [sys.executable, '-c', 'pass'], os.environ)
inaczej:
self.assertEqual(os.waitpid(pid, 0), (pid, 0))
w_końcu:
os.close(fp)
@unittest.skipUnless(hasattr(posix, 'waitid'), "test needs posix.waitid()")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_waitid(self):
pid = os.fork()
jeżeli pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(sys.executable, [sys.executable, '-c', 'pass'], os.environ)
inaczej:
res = posix.waitid(posix.P_PID, pid, posix.WEXITED)
self.assertEqual(pid, res.si_pid)
@unittest.skipUnless(hasattr(posix, 'lockf'), "test needs posix.lockf()")
def test_lockf(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
spróbuj:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.lockf(fd, posix.F_LOCK, 4)
# section jest locked
posix.lockf(fd, posix.F_ULOCK, 4)
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pread'), "test needs posix.pread()")
def test_pread(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'es', posix.pread(fd, 2, 1))
# the first pread() shouldn't disturb the file offset
self.assertEqual(b'te', posix.read(fd, 2))
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pwrite'), "test needs posix.pwrite()")
def test_pwrite(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.pwrite(fd, b'xx', 1)
self.assertEqual(b'txxt', posix.read(fd, 4))
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fallocate'),
"test needs posix.posix_fallocate()")
def test_posix_fallocate(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
spróbuj:
posix.posix_fallocate(fd, 0, 10)
wyjąwszy OSError jako inst:
# issue10812, ZFS doesn't appear to support posix_fallocate,
# so skip Solaris-based since they are likely to have ZFS.
jeżeli inst.errno != errno.EINVAL albo nie sys.platform.startswith("sunos"):
podnieś
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fadvise'),
"test needs posix.posix_fadvise()")
def test_posix_fadvise(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
spróbuj:
posix.posix_fadvise(fd, 0, 0, posix.POSIX_FADV_WILLNEED)
w_końcu:
os.close(fd)
@unittest.skipUnless(os.utime w os.supports_fd, "test needs fd support w os.utime")
def test_utime_with_fd(self):
now = time.time()
fd = os.open(support.TESTFN, os.O_RDONLY)
spróbuj:
posix.utime(fd)
posix.utime(fd, Nic)
self.assertRaises(TypeError, posix.utime, fd, (Nic, Nic))
self.assertRaises(TypeError, posix.utime, fd, (now, Nic))
self.assertRaises(TypeError, posix.utime, fd, (Nic, now))
posix.utime(fd, (int(now), int(now)))
posix.utime(fd, (now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, now), ns=(now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, 0), ns=(Nic, Nic))
self.assertRaises(ValueError, posix.utime, fd, (Nic, Nic), ns=(now, 0))
posix.utime(fd, (int(now), int((now - int(now)) * 1e9)))
posix.utime(fd, ns=(int(now), int((now - int(now)) * 1e9)))
w_końcu:
os.close(fd)
@unittest.skipUnless(os.utime w os.supports_follow_symlinks, "test needs follow_symlinks support w os.utime")
def test_utime_nofollow_symlinks(self):
now = time.time()
posix.utime(support.TESTFN, Nic, follow_symlinks=Nieprawda)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, Nic), follow_symlinks=Nieprawda)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, Nic), follow_symlinks=Nieprawda)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, now), follow_symlinks=Nieprawda)
posix.utime(support.TESTFN, (int(now), int(now)), follow_symlinks=Nieprawda)
posix.utime(support.TESTFN, (now, now), follow_symlinks=Nieprawda)
posix.utime(support.TESTFN, follow_symlinks=Nieprawda)
@unittest.skipUnless(hasattr(posix, 'writev'), "test needs posix.writev()")
def test_writev(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
n = os.writev(fd, (b'test1', b'tt2', b't3'))
self.assertEqual(n, 10)
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'test1tt2t3', posix.read(fd, 10))
# Issue #20113: empty list of buffers should nie crash
spróbuj:
size = posix.writev(fd, [])
wyjąwszy OSError:
# writev(fd, []) podnieśs OSError(22, "Invalid argument")
# on OpenIndiana
dalej
inaczej:
self.assertEqual(size, 0)
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'readv'), "test needs posix.readv()")
def test_readv(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
spróbuj:
os.write(fd, b'test1tt2t3')
os.lseek(fd, 0, os.SEEK_SET)
buf = [bytearray(i) dla i w [5, 3, 2]]
self.assertEqual(posix.readv(fd, buf), 10)
self.assertEqual([b'test1', b'tt2', b't3'], [bytes(i) dla i w buf])
# Issue #20113: empty list of buffers should nie crash
spróbuj:
size = posix.readv(fd, [])
wyjąwszy OSError:
# readv(fd, []) podnieśs OSError(22, "Invalid argument")
# on OpenIndiana
dalej
inaczej:
self.assertEqual(size, 0)
w_końcu:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'dup'),
'test needs posix.dup()')
def test_dup(self):
fp = open(support.TESTFN)
spróbuj:
fd = posix.dup(fp.fileno())
self.assertIsInstance(fd, int)
os.close(fd)
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'confstr'),
'test needs posix.confstr()')
def test_confstr(self):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, Prawda)
@unittest.skipUnless(hasattr(posix, 'dup2'),
'test needs posix.dup2()')
def test_dup2(self):
fp1 = open(support.TESTFN)
fp2 = open(support.TESTFN)
spróbuj:
posix.dup2(fp1.fileno(), fp2.fileno())
w_końcu:
fp1.close()
fp2.close()
@unittest.skipUnless(hasattr(os, 'O_CLOEXEC'), "needs os.O_CLOEXEC")
@support.requires_linux_version(2, 6, 23)
def test_oscloexec(self):
fd = os.open(support.TESTFN, os.O_RDONLY|os.O_CLOEXEC)
self.addCleanup(os.close, fd)
self.assertNieprawda(os.get_inheritable(fd))
@unittest.skipUnless(hasattr(posix, 'O_EXLOCK'),
'test needs posix.O_EXLOCK')
def test_osexlock(self):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
jeżeli hasattr(posix, "O_SHLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'O_SHLOCK'),
'test needs posix.O_SHLOCK')
def test_osshlock(self):
fd1 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
jeżeli hasattr(posix, "O_EXLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'fstat'),
'test needs posix.fstat()')
def test_fstat(self):
fp = open(support.TESTFN)
spróbuj:
self.assertPrawda(posix.fstat(fp.fileno()))
self.assertPrawda(posix.stat(fp.fileno()))
self.assertRaisesRegex(TypeError,
'should be string, bytes albo integer, not',
posix.stat, float(fp.fileno()))
w_końcu:
fp.close()
@unittest.skipUnless(hasattr(posix, 'stat'),
'test needs posix.stat()')
def test_stat(self):
self.assertPrawda(posix.stat(support.TESTFN))
self.assertPrawda(posix.stat(os.fsencode(support.TESTFN)))
self.assertPrawda(posix.stat(bytearray(os.fsencode(support.TESTFN))))
self.assertRaisesRegex(TypeError,
'can\'t specify Nic dla path argument',
posix.stat, Nic)
self.assertRaisesRegex(TypeError,
'should be string, bytes albo integer, not',
posix.stat, list(support.TESTFN))
self.assertRaisesRegex(TypeError,
'should be string, bytes albo integer, not',
posix.stat, list(os.fsencode(support.TESTFN)))
@unittest.skipUnless(hasattr(posix, 'mkfifo'), "don't have mkfifo()")
def test_mkfifo(self):
support.unlink(support.TESTFN)
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR)
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'mknod') oraz hasattr(stat, 'S_IFIFO'),
"don't have mknod()/S_IFIFO")
def test_mknod(self):
# Test using mknod() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
spróbuj:
posix.mknod(support.TESTFN, mode, 0)
wyjąwszy OSError jako e:
# Some old systems don't allow unprivileged users to use
# mknod(), albo only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
inaczej:
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'stat'), 'test needs posix.stat()')
@unittest.skipUnless(hasattr(posix, 'makedev'), 'test needs posix.makedev()')
def test_makedev(self):
st = posix.stat(support.TESTFN)
dev = st.st_dev
self.assertIsInstance(dev, int)
self.assertGreaterEqual(dev, 0)
major = posix.major(dev)
self.assertIsInstance(major, int)
self.assertGreaterEqual(major, 0)
self.assertEqual(posix.major(dev), major)
self.assertRaises(TypeError, posix.major, float(dev))
self.assertRaises(TypeError, posix.major)
self.assertRaises((ValueError, OverflowError), posix.major, -1)
minor = posix.minor(dev)
self.assertIsInstance(minor, int)
self.assertGreaterEqual(minor, 0)
self.assertEqual(posix.minor(dev), minor)
self.assertRaises(TypeError, posix.minor, float(dev))
self.assertRaises(TypeError, posix.minor)
self.assertRaises((ValueError, OverflowError), posix.minor, -1)
self.assertEqual(posix.makedev(major, minor), dev)
self.assertRaises(TypeError, posix.makedev, float(major), minor)
self.assertRaises(TypeError, posix.makedev, major, float(minor))
self.assertRaises(TypeError, posix.makedev, major)
self.assertRaises(TypeError, posix.makedev)
def _test_all_chown_common(self, chown_func, first_param, stat_func):
"""Common code dla chown, fchown oraz lchown tests."""
def check_stat(uid, gid):
jeżeli stat_func jest nie Nic:
stat = stat_func(first_param)
self.assertEqual(stat.st_uid, uid)
self.assertEqual(stat.st_gid, gid)
uid = os.getuid()
gid = os.getgid()
# test a successful chown call
chown_func(first_param, uid, gid)
check_stat(uid, gid)
chown_func(first_param, -1, gid)
check_stat(uid, gid)
chown_func(first_param, uid, -1)
check_stat(uid, gid)
jeżeli uid == 0:
# Try an amusingly large uid/gid to make sure we handle
# large unsigned values. (chown lets you use any
# uid/gid you like, even jeżeli they aren't defined.)
#
# This problem keeps coming up:
# http://bugs.python.org/issue1747858
# http://bugs.python.org/issue4591
# http://bugs.python.org/issue15301
# Hopefully the fix w 4591 fixes it dla good!
#
# This part of the test only runs when run jako root.
# Only scary people run their tests jako root.
big_value = 2**31
chown_func(first_param, big_value, big_value)
check_stat(big_value, big_value)
chown_func(first_param, -1, -1)
check_stat(big_value, big_value)
chown_func(first_param, uid, gid)
check_stat(uid, gid)
albo_inaczej platform.system() w ('HP-UX', 'SunOS'):
# HP-UX oraz Solaris can allow a non-root user to chown() to root
# (issue #5113)
podnieś unittest.SkipTest("Skipping because of non-standard chown() "
"behavior")
inaczej:
# non-root cannot chown to root, podnieśs OSError
self.assertRaises(OSError, chown_func, first_param, 0, 0)
check_stat(uid, gid)
self.assertRaises(OSError, chown_func, first_param, 0, -1)
check_stat(uid, gid)
jeżeli 0 nie w os.getgroups():
self.assertRaises(OSError, chown_func, first_param, -1, 0)
check_stat(uid, gid)
# test illegal types
dla t w str, float:
self.assertRaises(TypeError, chown_func, first_param, t(uid), gid)
check_stat(uid, gid)
self.assertRaises(TypeError, chown_func, first_param, uid, t(gid))
check_stat(uid, gid)
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
# podnieś an OSError jeżeli the file does nie exist
os.unlink(support.TESTFN)
self.assertRaises(OSError, posix.chown, support.TESTFN, -1, -1)
# re-create the file
support.create_empty_file(support.TESTFN)
self._test_all_chown_common(posix.chown, support.TESTFN,
getattr(posix, 'stat', Nic))
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
os.unlink(support.TESTFN)
# re-create the file
test_file = open(support.TESTFN, 'w')
spróbuj:
fd = test_file.fileno()
self._test_all_chown_common(posix.fchown, fd,
getattr(posix, 'fstat', Nic))
w_końcu:
test_file.close()
@unittest.skipUnless(hasattr(posix, 'lchown'), "test needs os.lchown()")
def test_lchown(self):
os.unlink(support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, support.TESTFN)
self._test_all_chown_common(posix.lchown, support.TESTFN,
getattr(posix, 'lstat', Nic))
@unittest.skipUnless(hasattr(posix, 'chdir'), 'test needs posix.chdir()')
def test_chdir(self):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, support.TESTFN)
def test_listdir(self):
self.assertPrawda(support.TESTFN w posix.listdir(os.curdir))
def test_listdir_default(self):
# When listdir jest called without argument,
# it's the same jako listdir(os.curdir).
self.assertPrawda(support.TESTFN w posix.listdir())
def test_listdir_bytes(self):
# When listdir jest called przy a bytes object,
# the returned strings are of type bytes.
self.assertPrawda(os.fsencode(support.TESTFN) w posix.listdir(b'.'))
@unittest.skipUnless(posix.listdir w os.supports_fd,
"test needs fd support dla posix.listdir()")
def test_listdir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
self.addCleanup(posix.close, f)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
# Check that the fd offset was reset (issue #13739)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
@unittest.skipUnless(hasattr(posix, 'access'), 'test needs posix.access()')
def test_access(self):
self.assertPrawda(posix.access(support.TESTFN, os.R_OK))
@unittest.skipUnless(hasattr(posix, 'umask'), 'test needs posix.umask()')
def test_umask(self):
old_mask = posix.umask(0)
self.assertIsInstance(old_mask, int)
posix.umask(old_mask)
@unittest.skipUnless(hasattr(posix, 'strerror'),
'test needs posix.strerror()')
def test_strerror(self):
self.assertPrawda(posix.strerror(0))
@unittest.skipUnless(hasattr(posix, 'pipe'), 'test needs posix.pipe()')
def test_pipe(self):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2(self):
self.assertRaises(TypeError, os.pipe2, 'DEADBEEF')
self.assertRaises(TypeError, os.pipe2, 0, 0)
# try calling przy flags = 0, like os.pipe()
r, w = os.pipe2(0)
os.close(r)
os.close(w)
# test flags
r, w = os.pipe2(os.O_CLOEXEC|os.O_NONBLOCK)
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
self.assertNieprawda(os.get_inheritable(r))
self.assertNieprawda(os.get_inheritable(w))
self.assertNieprawda(os.get_blocking(r))
self.assertNieprawda(os.get_blocking(w))
# try reading z an empty pipe: this should fail, nie block
self.assertRaises(OSError, os.read, r, 1)
# try a write big enough to fill-up the pipe: this should either
# fail albo perform a partial write, nie block
spróbuj:
os.write(w, b'x' * support.PIPE_MAX_SIZE)
wyjąwszy OSError:
dalej
@support.cpython_only
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2_c_limits(self):
# Issue 15989
zaimportuj _testcapi
self.assertRaises(OverflowError, os.pipe2, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, os.pipe2, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(hasattr(posix, 'utime'), 'test needs posix.utime()')
def test_utime(self):
now = time.time()
posix.utime(support.TESTFN, Nic)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, Nic))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, Nic))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, now))
posix.utime(support.TESTFN, (int(now), int(now)))
posix.utime(support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file, **kwargs):
st = os.stat(target_file)
self.assertPrawda(hasattr(st, 'st_flags'))
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = st.st_flags | stat.UF_IMMUTABLE
spróbuj:
chflags_func(target_file, flags, **kwargs)
wyjąwszy OSError jako err:
jeżeli err.errno != errno.EOPNOTSUPP:
podnieś
msg = 'chflag UF_IMMUTABLE nie supported by underlying fs'
self.skipTest(msg)
spróbuj:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
spróbuj:
fd = open(target_file, 'w+')
wyjąwszy OSError jako e:
self.assertEqual(e.errno, errno.EPERM)
w_końcu:
posix.chflags(target_file, st.st_flags)
@unittest.skipUnless(hasattr(posix, 'chflags'), 'test needs os.chflags()')
def test_chflags(self):
self._test_chflags_regular_file(posix.chflags, support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_regular_file(self):
self._test_chflags_regular_file(posix.lchflags, support.TESTFN)
self._test_chflags_regular_file(posix.chflags, support.TESTFN, follow_symlinks=Nieprawda)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_symlink(self):
testfn_st = os.stat(support.TESTFN)
self.assertPrawda(hasattr(testfn_st, 'st_flags'))
os.symlink(support.TESTFN, _DUMMY_SYMLINK)
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
def chflags_nofollow(path, flags):
zwróć posix.chflags(path, flags, follow_symlinks=Nieprawda)
dla fn w (posix.lchflags, chflags_nofollow):
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = dummy_symlink_st.st_flags | stat.UF_IMMUTABLE
spróbuj:
fn(_DUMMY_SYMLINK, flags)
wyjąwszy OSError jako err:
jeżeli err.errno != errno.EOPNOTSUPP:
podnieś
msg = 'chflag UF_IMMUTABLE nie supported by underlying fs'
self.skipTest(msg)
spróbuj:
new_testfn_st = os.stat(support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
new_dummy_symlink_st.st_flags)
w_końcu:
fn(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
def test_environ(self):
jeżeli os.name == "nt":
item_type = str
inaczej:
item_type = bytes
dla k, v w posix.environ.items():
self.assertEqual(type(k), item_type)
self.assertEqual(type(v), item_type)
@unittest.skipUnless(hasattr(posix, 'getcwd'), 'test needs posix.getcwd()')
def test_getcwd_long_pathnames(self):
dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
curdir = os.getcwd()
base_path = os.path.abspath(support.TESTFN) + '.getcwd'
spróbuj:
os.mkdir(base_path)
os.chdir(base_path)
wyjąwszy:
# Just returning nothing instead of the SkipTest exception, because
# the test results w Error w that case. Is that ok?
# podnieś unittest.SkipTest("cannot create directory dla testing")
zwróć
def _create_and_do_getcwd(dirname, current_path_length = 0):
spróbuj:
os.mkdir(dirname)
wyjąwszy:
podnieś unittest.SkipTest("mkdir cannot create directory sufficiently deep dla getcwd test")
os.chdir(dirname)
spróbuj:
os.getcwd()
jeżeli current_path_length < 1027:
_create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
w_końcu:
os.chdir('..')
os.rmdir(dirname)
_create_and_do_getcwd(dirname)
w_końcu:
os.chdir(curdir)
support.rmtree(base_path)
@unittest.skipUnless(hasattr(posix, 'getgrouplist'), "test needs posix.getgrouplist()")
@unittest.skipUnless(hasattr(pwd, 'getpwuid'), "test needs pwd.getpwuid()")
@unittest.skipUnless(hasattr(os, 'getuid'), "test needs os.getuid()")
def test_getgrouplist(self):
user = pwd.getpwuid(os.getuid())[0]
group = pwd.getpwuid(os.getuid())[3]
self.assertIn(group, posix.getgrouplist(user, group))
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
przy os.popen('id -G 2>/dev/null') jako idg:
groups = idg.read().strip()
ret = idg.close()
jeżeli ret jest nie Nic albo nie groups:
podnieś unittest.SkipTest("need working 'id -G'")
# Issues 16698: OS X ABIs prior to 10.6 have limits on getgroups()
jeżeli sys.platform == 'darwin':
zaimportuj sysconfig
dt = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') albo '10.0'
jeżeli tuple(int(n) dla n w dt.split('.')[0:2]) < (10, 6):
podnieś unittest.SkipTest("getgroups(2) jest broken prior to 10.6")
# 'id -G' oraz 'os.getgroups()' should zwróć the same
# groups, ignoring order oraz duplicates.
# #10822 - it jest implementation defined whether posix.getgroups()
# includes the effective gid so we include it anyway, since id -G does
self.assertEqual(
set([int(x) dla x w groups.split()]),
set(posix.getgroups() + [posix.getegid()]))
# tests dla the posix *at functions follow
@unittest.skipUnless(os.access w os.supports_dir_fd, "test needs dir_fd support dla os.access()")
def test_access_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
self.assertPrawda(posix.access(support.TESTFN, os.R_OK, dir_fd=f))
w_końcu:
posix.close(f)
@unittest.skipUnless(os.chmod w os.supports_dir_fd, "test needs dir_fd support w os.chmod()")
def test_chmod_dir_fd(self):
os.chmod(support.TESTFN, stat.S_IRUSR)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.chmod(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
s = posix.stat(support.TESTFN)
self.assertEqual(s[0] & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR)
w_końcu:
posix.close(f)
@unittest.skipUnless(os.chown w os.supports_dir_fd, "test needs dir_fd support w os.chown()")
def test_chown_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.chown(support.TESTFN, os.getuid(), os.getgid(), dir_fd=f)
w_końcu:
posix.close(f)
@unittest.skipUnless(os.stat w os.supports_dir_fd, "test needs dir_fd support w os.stat()")
def test_stat_dir_fd(self):
support.unlink(support.TESTFN)
przy open(support.TESTFN, 'w') jako outfile:
outfile.write("testline\n")
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
s1 = posix.stat(support.TESTFN)
s2 = posix.stat(support.TESTFN, dir_fd=f)
self.assertEqual(s1, s2)
s2 = posix.stat(support.TESTFN, dir_fd=Nic)
self.assertEqual(s1, s2)
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=posix.getcwd())
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=float(f))
self.assertRaises(OverflowError,
posix.stat, support.TESTFN, dir_fd=10**20)
w_końcu:
posix.close(f)
@unittest.skipUnless(os.utime w os.supports_dir_fd, "test needs dir_fd support w os.utime()")
def test_utime_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
now = time.time()
posix.utime(support.TESTFN, Nic, dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, now, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, Nic), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, Nic), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (Nic, now), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, "x"), dir_fd=f)
posix.utime(support.TESTFN, (int(now), int(now)), dir_fd=f)
posix.utime(support.TESTFN, (now, now), dir_fd=f)
posix.utime(support.TESTFN,
(int(now), int((now - int(now)) * 1e9)), dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f,
times=(int(now), int((now - int(now)) * 1e9)))
# try dir_fd oraz follow_symlinks together
jeżeli os.utime w os.supports_follow_symlinks:
spróbuj:
posix.utime(support.TESTFN, follow_symlinks=Nieprawda, dir_fd=f)
wyjąwszy ValueError:
# whoops! using both together nie supported on this platform.
dalej
w_końcu:
posix.close(f)
@unittest.skipUnless(os.link w os.supports_dir_fd, "test needs dir_fd support w os.link()")
def test_link_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.link(support.TESTFN, support.TESTFN + 'link', src_dir_fd=f, dst_dir_fd=f)
# should have same inodes
self.assertEqual(posix.stat(support.TESTFN)[1],
posix.stat(support.TESTFN + 'link')[1])
w_końcu:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.mkdir w os.supports_dir_fd, "test needs dir_fd support w os.mkdir()")
def test_mkdir_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.mkdir(support.TESTFN + 'dir', dir_fd=f)
posix.stat(support.TESTFN + 'dir') # should nie podnieś exception
w_końcu:
posix.close(f)
support.rmtree(support.TESTFN + 'dir')
@unittest.skipUnless((os.mknod w os.supports_dir_fd) oraz hasattr(stat, 'S_IFIFO'),
"test requires both stat.S_IFIFO oraz dir_fd support dla os.mknod()")
def test_mknod_dir_fd(self):
# Test using mknodat() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.mknod(support.TESTFN, mode, 0, dir_fd=f)
wyjąwszy OSError jako e:
# Some old systems don't allow unprivileged users to use
# mknod(), albo only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
inaczej:
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
w_końcu:
posix.close(f)
@unittest.skipUnless(os.open w os.supports_dir_fd, "test needs dir_fd support w os.open()")
def test_open_dir_fd(self):
support.unlink(support.TESTFN)
przy open(support.TESTFN, 'w') jako outfile:
outfile.write("testline\n")
a = posix.open(posix.getcwd(), posix.O_RDONLY)
b = posix.open(support.TESTFN, posix.O_RDONLY, dir_fd=a)
spróbuj:
res = posix.read(b, 9).decode(encoding="utf-8")
self.assertEqual("testline\n", res)
w_końcu:
posix.close(a)
posix.close(b)
@unittest.skipUnless(os.readlink w os.supports_dir_fd, "test needs dir_fd support w os.readlink()")
def test_readlink_dir_fd(self):
os.symlink(support.TESTFN, support.TESTFN + 'link')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
self.assertEqual(posix.readlink(support.TESTFN + 'link'),
posix.readlink(support.TESTFN + 'link', dir_fd=f))
w_końcu:
support.unlink(support.TESTFN + 'link')
posix.close(f)
@unittest.skipUnless(os.rename w os.supports_dir_fd, "test needs dir_fd support w os.rename()")
def test_rename_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN + 'ren')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.rename(support.TESTFN + 'ren', support.TESTFN, src_dir_fd=f, dst_dir_fd=f)
wyjąwszy:
posix.rename(support.TESTFN + 'ren', support.TESTFN)
podnieś
inaczej:
posix.stat(support.TESTFN) # should nie podnieś exception
w_końcu:
posix.close(f)
@unittest.skipUnless(os.symlink w os.supports_dir_fd, "test needs dir_fd support w os.symlink()")
def test_symlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.symlink(support.TESTFN, support.TESTFN + 'link', dir_fd=f)
self.assertEqual(posix.readlink(support.TESTFN + 'link'), support.TESTFN)
w_końcu:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.unlink w os.supports_dir_fd, "test needs dir_fd support w os.unlink()")
def test_unlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
support.create_empty_file(support.TESTFN + 'del')
posix.stat(support.TESTFN + 'del') # should nie podnieś exception
spróbuj:
posix.unlink(support.TESTFN + 'del', dir_fd=f)
wyjąwszy:
support.unlink(support.TESTFN + 'del')
podnieś
inaczej:
self.assertRaises(OSError, posix.stat, support.TESTFN + 'link')
w_końcu:
posix.close(f)
@unittest.skipUnless(os.mkfifo w os.supports_dir_fd, "test needs dir_fd support w os.mkfifo()")
def test_mkfifo_dir_fd(self):
support.unlink(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
spróbuj:
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
self.assertPrawda(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
w_końcu:
posix.close(f)
requires_sched_h = unittest.skipUnless(hasattr(posix, 'sched_uzyskaj'),
"don't have scheduling support")
requires_sched_affinity = unittest.skipUnless(hasattr(posix, 'sched_setaffinity'),
"don't have sched affinity support")
@requires_sched_h
def test_sched_uzyskaj(self):
# This has no error conditions (at least on Linux).
posix.sched_uzyskaj()
@requires_sched_h
@unittest.skipUnless(hasattr(posix, 'sched_get_priority_max'),
"requires sched_get_priority_max()")
def test_sched_priority(self):
# Round-robin usually has interesting priorities.
pol = posix.SCHED_RR
lo = posix.sched_get_priority_min(pol)
hi = posix.sched_get_priority_max(pol)
self.assertIsInstance(lo, int)
self.assertIsInstance(hi, int)
self.assertGreaterEqual(hi, lo)
# OSX evidently just returns 15 without checking the argument.
jeżeli sys.platform != "darwin":
self.assertRaises(OSError, posix.sched_get_priority_min, -23)
self.assertRaises(OSError, posix.sched_get_priority_max, -23)
@unittest.skipUnless(hasattr(posix, 'sched_setscheduler'), "can't change scheduler")
def test_get_and_set_scheduler_and_param(self):
possible_schedulers = [sched dla name, sched w posix.__dict__.items()
jeżeli name.startswith("SCHED_")]
mine = posix.sched_getscheduler(0)
self.assertIn(mine, possible_schedulers)
spróbuj:
parent = posix.sched_getscheduler(os.getppid())
wyjąwszy OSError jako e:
jeżeli e.errno != errno.EPERM:
podnieś
inaczej:
self.assertIn(parent, possible_schedulers)
self.assertRaises(OSError, posix.sched_getscheduler, -1)
self.assertRaises(OSError, posix.sched_getparam, -1)
param = posix.sched_getparam(0)
self.assertIsInstance(param.sched_priority, int)
# POSIX states that calling sched_setparam() albo sched_setscheduler() on
# a process przy a scheduling policy other than SCHED_FIFO albo SCHED_RR
# jest implementation-defined: NetBSD oraz FreeBSD can zwróć EINVAL.
jeżeli nie sys.platform.startswith(('freebsd', 'netbsd')):
spróbuj:
posix.sched_setscheduler(0, mine, param)
posix.sched_setparam(0, param)
wyjąwszy OSError jako e:
jeżeli e.errno != errno.EPERM:
podnieś
self.assertRaises(OSError, posix.sched_setparam, -1, param)
self.assertRaises(OSError, posix.sched_setscheduler, -1, mine, param)
self.assertRaises(TypeError, posix.sched_setscheduler, 0, mine, Nic)
self.assertRaises(TypeError, posix.sched_setparam, 0, 43)
param = posix.sched_param(Nic)
self.assertRaises(TypeError, posix.sched_setparam, 0, param)
large = 214748364700
param = posix.sched_param(large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
param = posix.sched_param(sched_priority=-large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
@unittest.skipUnless(hasattr(posix, "sched_rr_get_interval"), "no function")
def test_sched_rr_get_interval(self):
spróbuj:
interval = posix.sched_rr_get_interval(0)
wyjąwszy OSError jako e:
# This likely means that sched_rr_get_interval jest only valid for
# processes przy the SCHED_RR scheduler w effect.
jeżeli e.errno != errno.EINVAL:
podnieś
self.skipTest("only works on SCHED_RR processes")
self.assertIsInstance(interval, float)
# Reasonable constraints, I think.
self.assertGreaterEqual(interval, 0.)
self.assertLess(interval, 1.)
@requires_sched_affinity
def test_sched_getaffinity(self):
mask = posix.sched_getaffinity(0)
self.assertIsInstance(mask, set)
self.assertGreaterEqual(len(mask), 1)
self.assertRaises(OSError, posix.sched_getaffinity, -1)
dla cpu w mask:
self.assertIsInstance(cpu, int)
self.assertGreaterEqual(cpu, 0)
self.assertLess(cpu, 1 << 32)
@requires_sched_affinity
def test_sched_setaffinity(self):
mask = posix.sched_getaffinity(0)
jeżeli len(mask) > 1:
# Empty masks are forbidden
mask.pop()
posix.sched_setaffinity(0, mask)
self.assertEqual(posix.sched_getaffinity(0), mask)
self.assertRaises(OSError, posix.sched_setaffinity, 0, [])
self.assertRaises(ValueError, posix.sched_setaffinity, 0, [-10])
self.assertRaises(OverflowError, posix.sched_setaffinity, 0, [1<<128])
self.assertRaises(OSError, posix.sched_setaffinity, -1, mask)
def test_rtld_constants(self):
# check presence of major RTLD_* constants
posix.RTLD_LAZY
posix.RTLD_NOW
posix.RTLD_GLOBAL
posix.RTLD_LOCAL
@unittest.skipUnless(hasattr(os, 'SEEK_HOLE'),
"test needs an OS that reports file holes")
def test_fs_holes(self):
# Even jeżeli the filesystem doesn't report holes,
# jeżeli the OS supports it the SEEK_* constants
# will be defined oraz will have a consistent
# behaviour:
# os.SEEK_DATA = current position
# os.SEEK_HOLE = end of file position
przy open(support.TESTFN, 'r+b') jako fp:
fp.write(b"hello")
fp.flush()
size = fp.tell()
fno = fp.fileno()
try :
dla i w range(size):
self.assertEqual(i, os.lseek(fno, i, os.SEEK_DATA))
self.assertLessEqual(size, os.lseek(fno, i, os.SEEK_HOLE))
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_DATA)
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_HOLE)
wyjąwszy OSError :
# Some OSs claim to support SEEK_HOLE/SEEK_DATA
# but it jest nie true.
# For instance:
# http://lists.freebsd.org/pipermail/freebsd-amd64/2012-January/014332.html
podnieś unittest.SkipTest("OSError podnieśd!")
def test_path_error2(self):
"""
Test functions that call path_error2(), providing two filenames w their exceptions.
"""
dla name w ("rename", "replace", "link"):
function = getattr(os, name, Nic)
jeżeli function jest Nic:
kontynuuj
dla dst w ("noodly2", support.TESTFN):
spróbuj:
function('doesnotexistfilename', dst)
wyjąwszy OSError jako e:
self.assertIn("'doesnotexistfilename' -> '{}'".format(dst), str(e))
przerwij
inaczej:
self.fail("No valid path_error2() test dla os." + name)
def test_path_with_null_character(self):
fn = support.TESTFN
fn_with_NUL = fn + '\0'
self.addCleanup(support.unlink, fn)
support.unlink(fn)
fd = Nic
spróbuj:
przy self.assertRaises(ValueError):
fd = os.open(fn_with_NUL, os.O_WRONLY | os.O_CREAT) # podnieśs
w_końcu:
jeżeli fd jest nie Nic:
os.close(fd)
self.assertNieprawda(os.path.exists(fn))
self.assertRaises(ValueError, os.mkdir, fn_with_NUL)
self.assertNieprawda(os.path.exists(fn))
open(fn, 'wb').close()
self.assertRaises(ValueError, os.stat, fn_with_NUL)
def test_path_with_null_byte(self):
fn = os.fsencode(support.TESTFN)
fn_with_NUL = fn + b'\0'
self.addCleanup(support.unlink, fn)
support.unlink(fn)
fd = Nic
spróbuj:
przy self.assertRaises(ValueError):
fd = os.open(fn_with_NUL, os.O_WRONLY | os.O_CREAT) # podnieśs
w_końcu:
jeżeli fd jest nie Nic:
os.close(fd)
self.assertNieprawda(os.path.exists(fn))
self.assertRaises(ValueError, os.mkdir, fn_with_NUL)
self.assertNieprawda(os.path.exists(fn))
open(fn, 'wb').close()
self.assertRaises(ValueError, os.stat, fn_with_NUL)
klasa PosixGroupsTester(unittest.TestCase):
def setUp(self):
jeżeli posix.getuid() != 0:
podnieś unittest.SkipTest("not enough privileges")
jeżeli nie hasattr(posix, 'getgroups'):
podnieś unittest.SkipTest("need posix.getgroups")
jeżeli sys.platform == 'darwin':
podnieś unittest.SkipTest("getgroups(2) jest broken on OSX")
self.saved_groups = posix.getgroups()
def tearDown(self):
jeżeli hasattr(posix, 'setgroups'):
posix.setgroups(self.saved_groups)
albo_inaczej hasattr(posix, 'initgroups'):
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs posix.initgroups()")
def test_initgroups(self):
# find missing group
g = max(self.saved_groups albo [0]) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
"test needs posix.setgroups()")
def test_setgroups(self):
dla groups w [[0], list(range(16))]:
posix.setgroups(groups)
self.assertListEqual(groups, posix.getgroups())
def test_main():
spróbuj:
support.run_unittest(PosixTester, PosixGroupsTester)
w_końcu:
support.reap_children()
jeżeli __name__ == '__main__':
test_main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tools/check_format.py | #!/usr/bin/env python
import argparse
import fileinput
import os
import os.path
import re
import subprocess
import sys
EXCLUDED_PREFIXES = ("./generated/", "./thirdparty/", "./build", "./.git/",
"./bazel-", "./bazel/external", "./.cache")
SUFFIXES = (".cc", ".h", "BUILD", ".md", ".rst")
DOCS_SUFFIX = (".md", ".rst")
# Files in these paths can make reference to protobuf stuff directly
GOOGLE_PROTOBUF_WHITELIST = ('ci/prebuilt', 'source/common/protobuf')
CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-5.0")
BUILDIFIER_PATH = os.getenv("BUILDIFIER_BIN", "$GOPATH/bin/buildifier")
ENVOY_BUILD_FIXER_PATH = os.path.join(
os.path.dirname(os.path.abspath(sys.argv[0])), "envoy_build_fixer.py")
HEADER_ORDER_PATH = os.path.join(
os.path.dirname(os.path.abspath(sys.argv[0])), "header_order.py")
found_error = False
def printError(error):
global found_error
found_error = True
print "ERROR: %s" % (error)
def checkNamespace(file_path):
with open(file_path) as f:
text = f.read()
if not re.search('^\s*namespace\s+Envoy\s*{', text, re.MULTILINE) and \
not 'NOLINT(namespace-envoy)' in text:
printError("Unable to find Envoy namespace or NOLINT(namespace-envoy) for file: %s" % file_path)
return False
return True
# To avoid breaking the Lyft import, we just check for path inclusion here.
def whitelistedForProtobufDeps(file_path):
return any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_WHITELIST)
def findSubstringAndPrintError(pattern, file_path, error_message):
with open(file_path) as f:
text = f.read()
if pattern in text:
printError(error_message)
for i, line in enumerate(text.splitlines()):
if pattern in line:
printError(" %s:%s" % (file_path, i + 1))
return False
return True
def checkProtobufExternalDepsBuild(file_path):
if whitelistedForProtobufDeps(file_path):
return True
message = ("%s has unexpected direct external dependency on protobuf, use "
"//source/common/protobuf instead." % file_path)
return findSubstringAndPrintError('"protobuf"', file_path, message)
def checkProtobufExternalDeps(file_path):
if whitelistedForProtobufDeps(file_path):
return True
with open(file_path) as f:
text = f.read()
if '"google/protobuf' in text or "google::protobuf" in text:
printError(
"%s has unexpected direct dependency on google.protobuf, use "
"the definitions in common/protobuf/protobuf.h instead." % file_path)
return False
return True
def isBuildFile(file_path):
basename = os.path.basename(file_path)
if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"):
return True
return False
def checkFileContents(file_path):
message = "%s has over-enthusiastic spaces:" % file_path
findSubstringAndPrintError('. ', file_path, message)
def fixFileContents(file_path):
for line in fileinput.input(file_path, inplace=True):
# Strip double space after '.' This may prove overenthusiastic and need to
# be restricted to comments and metadata files but works for now.
print "%s" % (line.replace('. ', '. ').rstrip())
def checkFilePath(file_path):
if isBuildFile(file_path):
command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)
executeCommand(command, "envoy_build_fixer check failed", file_path)
command = "cat %s | %s -mode=fix | diff %s -" % (file_path, BUILDIFIER_PATH, file_path)
executeCommand(command, "buildifier check failed", file_path)
checkProtobufExternalDepsBuild(file_path)
return
checkFileContents(file_path)
if file_path.endswith(DOCS_SUFFIX):
return
checkNamespace(file_path)
checkProtobufExternalDeps(file_path)
command = ("%s %s | diff %s -" % (HEADER_ORDER_PATH, file_path,
file_path))
executeCommand(command, "header_order.py check failed", file_path)
command = ("%s %s | diff %s -" % (CLANG_FORMAT_PATH, file_path,
file_path))
executeCommand(command, "clang-format check failed", file_path)
# Example target outputs are:
# - "26,27c26"
# - "12,13d13"
# - "7a8,9"
def executeCommand(command, error_message, file_path,
regex=re.compile(r"^(\d+)[a|c|d]?\d*(?:,\d+[a|c|d]?\d*)?$")):
try:
subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if (e.returncode != 0 and e.returncode != 1):
print "ERROR: something went wrong while executing: %s" % e.cmd
sys.exit(1)
# In case we can't find any line numbers, call printError at first.
printError("%s for file: %s" % (error_message, file_path))
for line in e.output.splitlines():
for num in regex.findall(line):
printError(" %s:%s" % (file_path, num))
def fixFilePath(file_path):
if isBuildFile(file_path):
if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0:
printError("envoy_build_fixer rewrite failed for file: %s" % file_path)
if os.system("%s -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0:
printError("buildifier rewrite failed for file: %s" % file_path)
return
fixFileContents(file_path)
if file_path.endswith(DOCS_SUFFIX):
return
if not checkNamespace(file_path) or not checkProtobufExternalDepsBuild(
file_path) or not checkProtobufExternalDeps(file_path):
printError("This cannot be automatically corrected. Please fix by hand.")
command = "%s --rewrite %s" % (HEADER_ORDER_PATH, file_path)
if os.system(command) != 0:
printError("header_order.py rewrite error: %s" % (file_path))
command = "%s -i %s" % (CLANG_FORMAT_PATH, file_path)
if os.system(command) != 0:
printError("clang-format rewrite error: %s" % (file_path))
def checkFormat(file_path):
if file_path.startswith(EXCLUDED_PREFIXES):
return
if not file_path.endswith(SUFFIXES):
return
if operation_type == "check":
checkFilePath(file_path)
if operation_type == "fix":
fixFilePath(file_path)
def checkFormatVisitor(arg, dir_name, names):
for file_name in names:
checkFormat(dir_name + "/" + file_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Check or fix file format.')
parser.add_argument('operation_type', type=str, choices=['check', 'fix'],
help="specify if the run should 'check' or 'fix' format.")
parser.add_argument('target_path', type=str, nargs="?", default=".", help="specify the root directory"
" for the script to recurse over. Default '.'.")
parser.add_argument('--add-excluded-prefixes', type=str, nargs="+", help="exclude additional prefixes.")
args = parser.parse_args()
operation_type = args.operation_type
target_path = args.target_path
if args.add_excluded_prefixes:
EXCLUDED_PREFIXES += tuple(args.add_excluded_prefixes)
if os.path.isfile(target_path):
checkFormat("./" + target_path)
else:
os.path.walk(target_path, checkFormatVisitor, None)
if found_error:
print "ERROR: check format failed. run 'tools/check_format.py fix'"
sys.exit(1)
| []
| []
| [
"CLANG_FORMAT",
"BUILDIFIER_BIN"
]
| [] | ["CLANG_FORMAT", "BUILDIFIER_BIN"] | python | 2 | 0 | |
run_squad_final.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
import modeling
import optimization
import tokenization
import six
import tensorflow as tf
import io
flags = tf.flags
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float(
"null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
## squadexample has multiple answers
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
## above become list
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
##find answer index in document
def answer_index_in_document(answer_list, document):
indexs=[]
doc=document.lower()
for answer_string_in_doc in answer_list:
index = doc.find(answer_string_in_doc.lower())
while(index!=-1):
indexs.append((answer_string_in_doc.lower(),index))
index = doc.find(answer_string_in_doc.lower(),index+1)
return indexs
##
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with io.open(input_file, "r",encoding='utf8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
#i_num_cnt = 0
for entry in input_data:
'''
i_num_cnt += 1
if (i_num_cnt > 10):
break;
'''
for i_num,paragraph in enumerate(entry["paragraphs"]):
paragraph_text = paragraph["context"]#[0:2000]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
#print(char_to_word_offset)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = []
end_position = []
orig_answer_text = []
is_impossible = False
if is_training:
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
## if (len(qa["answers"]) != 1) and (not is_impossible):
## raise ValueError(
## "For training, each question should have exactly 1 answer.")
if not is_impossible:
## read answer list
answers= qa["answers"]
indexs=answer_index_in_document(answers,paragraph_text)
if not len(indexs):
tf.logging.warning("Could not find answer for %s \n",qa['id'])
for index in indexs:
orig_answer_text.append(index[0])
answer_offset = index[1]
answer_length = len(index[0])
start_position.append(char_to_word_offset[answer_offset])
if(answer_offset + answer_length - 1>len(char_to_word_offset)-1):
print(qa['id'])
print('\n')
print(index[1])
print(index[0])
print(answer_length)
print(len(char_to_word_offset))
print('\n')
print(paragraph_text)
end_position.append(char_to_word_offset[answer_offset + answer_length -
1])
##
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[char_to_word_offset[answer_offset]:(char_to_word_offset[answer_offset + answer_length -
1] + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(index[0]))
## not checking things above
if actual_text.lower().find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
## else:
## start_position = -1
## end_position = -1
## orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
##
print("total example:")
print(len(examples))
print("\n")
##
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
example_num=0
for (example_index, example) in enumerate(examples):
##
example_num+=1
if(example_num % 100==0):
print("processing example %d",example_num)
print('\n')
##
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = []
tok_end_position = []
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
## convert char_index to token_index
for i in range(len(example.start_position)):
i_tok_start_position=orig_to_tok_index[example.start_position[i]]
if example.end_position[i] < len(example.doc_tokens) - 1:
i_tok_end_position= orig_to_tok_index[example.end_position[i] + 1] - 1
else:
i_tok_end_position= len(all_doc_tokens) - 1
(i_tok_start_position, i_tok_end_position) = _improve_answer_span(
all_doc_tokens, i_tok_start_position, i_tok_end_position, tokenizer,
example.orig_answer_text[i])
tok_start_position.append(i_tok_start_position)
tok_end_position.append(i_tok_end_position)
##
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
#连接文章和问题 [CLS]+ query + [SEP] + context + [SEP],问题放在前面
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = True
##find out whether a span contain any of answers
start_position=[]
end_position=[]
for i in range(len(tok_start_position)):
if (tok_start_position[i] >= doc_start and
tok_end_position[i] <= doc_end):
out_of_span=False
doc_offset = len(query_tokens) + 2
start_position.append(tok_start_position[i] - doc_start + doc_offset)
end_position.append(tok_end_position[i] - doc_start + doc_offset)
break
##
if out_of_span:
continue
start_position = [0]
end_position = [0]
## else:
## doc_offset = len(query_tokens) + 2
## start_position = tok_start_position - doc_start + doc_offset
## end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
'''
if example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
'''
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
print("input_feature done.")
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.softmax(logits, axis=-1)
loss = -tf.reduce_mean(tf.log(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1)))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if FLAGS.version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if FLAGS.version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if FLAGS.version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not FLAGS.version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if FLAGS.version_2_with_negative:
with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature(feature.start_position)
features["end_positions"] = create_int_feature(feature.end_position)
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = read_squad_examples(
input_file=FLAGS.train_file, is_training=True)
## Calculate the number of features first to calculate the num_train_steps
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "train.tf_record"),
is_training=True)
convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature)
## The total number of features
train_features = train_writer.num_features
##
train_writer.close()
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig examples = %d", len(train_examples))
tf.logging.info(" Num split examples = %d", train_writer.num_features)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
##
#train_features = 591335
#
num_train_steps = int(
#len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
train_features / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
rng.shuffle(train_examples)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
#del train_examples
train_input_fn = input_fn_builder(
input_file=os.path.join(FLAGS.output_dir, "train.tf_record"),
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_predict:
eval_examples = read_squad_examples(
input_file=FLAGS.predict_file, is_training=False)
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature)
eval_writer.close()
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
## write features to file
tf.logging.info("Writing features")
json_file=os.path.join(FLAGS.output_dir, "test_features.json")
with open(json_file, 'r') as outfile:
all_features=[]
for temp_feature in eval_features:
feature_dict={
"tokens":temp_feature.tokens,
"token_to_orig_map":temp_feature.token_to_orig_map
}
all_features.append(feature_dict)
json.dump(all_features, outfile, indent=4, sort_keys=True, ensure_ascii=False)
## read features from file
eval_features=[]
with io.open(json_file, "r",encoding='utf8') as reader:
input_data = json.load(reader)
for entry in input_data:
InputFeatures(temp_feature)
temp_feature.tokens=entry["tokens"]
temp_feature.token_to_orig_map==entry["token_to_orig_map"]
eval_features.append(temp_feature)
##
all_results = []
predict_input_fn = input_fn_builder(
## input_file=eval_writer.filename,
input_file=os.path.join(FLAGS.output_dir, "eval.tf_record"),
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
# If running eval on the TPU, you will need to specify the number of
# steps.
all_results = []
for result in estimator.predict(
predict_input_fn, yield_single_examples=True):
if len(all_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(
RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json")
output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json")
output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json")
write_predictions(eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file)
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
gothic/gothic.go | /*
Package gothic wraps common behaviour when using Goth. This makes it quick, and easy, to get up
and running with Goth. Of course, if you want complete control over how things flow, in regards
to the authentication process, feel free and use Goth directly.
See https://github.com/markbates/goth/blob/master/examples/main.go to see this in action.
*/
package gothic
import (
"bytes"
"compress/gzip"
"context"
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"github.com/gorilla/mux"
"github.com/gorilla/sessions"
"github.com/markbates/goth"
)
// SessionName is the key used to access the session store.
const SessionName = "_gothic_session"
// Store can/should be set by applications using gothic. The default is a cookie store.
var Store sessions.Store
var defaultStore sessions.Store
var keySet = false
type key int
// ProviderParamKey can be used as a key in context when passing in a provider
const ProviderParamKey key = iota
func init() {
key := []byte(os.Getenv("SESSION_SECRET"))
keySet = len(key) != 0
cookieStore := sessions.NewCookieStore([]byte(key))
cookieStore.Options.HttpOnly = true
Store = cookieStore
defaultStore = Store
}
/*
BeginAuthHandler is a convenience handler for starting the authentication process.
It expects to be able to get the name of the provider from the query parameters
as either "provider" or ":provider".
BeginAuthHandler will redirect the user to the appropriate authentication end-point
for the requested provider.
See https://github.com/markbates/goth/examples/main.go to see this in action.
*/
func BeginAuthHandler(res http.ResponseWriter, req *http.Request) {
url, err := GetAuthURL(res, req)
if err != nil {
res.WriteHeader(http.StatusBadRequest)
fmt.Fprintln(res, err)
return
}
http.Redirect(res, req, url, http.StatusTemporaryRedirect)
}
// SetState sets the state string associated with the given request.
// If no state string is associated with the request, one will be generated.
// This state is sent to the provider and can be retrieved during the
// callback.
var SetState = func(req *http.Request) string {
state := req.URL.Query().Get("state")
if len(state) > 0 {
return state
}
// If a state query param is not passed in, generate a random
// base64-encoded nonce so that the state on the auth URL
// is unguessable, preventing CSRF attacks, as described in
//
// https://auth0.com/docs/protocols/oauth2/oauth-state#keep-reading
nonceBytes := make([]byte, 64)
_, err := io.ReadFull(rand.Reader, nonceBytes)
if err != nil {
panic("gothic: source of randomness unavailable: " + err.Error())
}
return base64.URLEncoding.EncodeToString(nonceBytes)
}
// GetState gets the state returned by the provider during the callback.
// This is used to prevent CSRF attacks, see
// http://tools.ietf.org/html/rfc6749#section-10.12
var GetState = func(req *http.Request) string {
return req.URL.Query().Get("state")
}
/*
GetAuthURL starts the authentication process with the requested provided.
It will return a URL that should be used to send users to.
It expects to be able to get the name of the provider from the query parameters
as either "provider" or ":provider".
I would recommend using the BeginAuthHandler instead of doing all of these steps
yourself, but that's entirely up to you.
*/
func GetAuthURL(res http.ResponseWriter, req *http.Request, providerName string) (string, error) {
if !keySet && defaultStore == Store {
fmt.Println("goth/gothic: no SESSION_SECRET environment variable is set. The default cookie store is not available and any calls will fail. Ignore this warning if you are using a different store.")
}
provider, err := goth.GetProvider(providerName)
if err != nil {
return "", err
}
sess, err := provider.BeginAuth(SetState(req))
if err != nil {
return "", err
}
url, err := sess.GetAuthURL()
if err != nil {
return "", err
}
err = StoreInSession(providerName, sess.Marshal(), req, res)
if err != nil {
return "", err
}
return url, err
}
/*
CompleteUserAuth does what it says on the tin. It completes the authentication
process and fetches all of the basic information about the user from the provider.
It expects to be able to get the name of the provider from the query parameters
as either "provider" or ":provider".
See https://github.com/markbates/goth/examples/main.go to see this in action.
*/
var CompleteUserAuth = func(res http.ResponseWriter, req *http.Request, providerName string) (goth.User, error) {
defer Logout(res, req)
if !keySet && defaultStore == Store {
fmt.Println("goth/gothic: no SESSION_SECRET environment variable is set. The default cookie store is not available and any calls will fail. Ignore this warning if you are using a different store.")
}
provider, err := goth.GetProvider(providerName)
if err != nil {
return goth.User{}, err
}
value, err := GetFromSession(providerName, req)
if err != nil {
return goth.User{}, err
}
sess, err := provider.UnmarshalSession(value)
if err != nil {
return goth.User{}, err
}
err = validateState(req, sess)
if err != nil {
return goth.User{}, err
}
user, err := provider.FetchUser(sess)
if err == nil {
// user can be found with existing session data
return user, err
}
params := req.URL.Query()
if params.Encode() == "" && req.Method == "POST" {
req.ParseForm()
params = req.Form
}
// get new token and retry fetch
_, err = sess.Authorize(provider, params)
if err != nil {
return goth.User{}, err
}
err = StoreInSession(providerName, sess.Marshal(), req, res)
if err != nil {
return goth.User{}, err
}
gu, err := provider.FetchUser(sess)
return gu, err
}
// validateState ensures that the state token param from the original
// AuthURL matches the one included in the current (callback) request.
func validateState(req *http.Request, sess goth.Session) error {
rawAuthURL, err := sess.GetAuthURL()
if err != nil {
return err
}
authURL, err := url.Parse(rawAuthURL)
if err != nil {
return err
}
reqState := GetState(req)
originalState := authURL.Query().Get("state")
if originalState != "" && (originalState != reqState) {
return errors.New("state token mismatch")
}
return nil
}
// Logout invalidates a user session.
func Logout(res http.ResponseWriter, req *http.Request) error {
session, err := Store.Get(req, SessionName)
if err != nil {
return err
}
session.Options.MaxAge = -1
session.Values = make(map[interface{}]interface{})
err = session.Save(req, res)
if err != nil {
return errors.New("Could not delete user session ")
}
return nil
}
// GetProviderName is a function used to get the name of a provider
// for a given request. By default, this provider is fetched from
// the URL query string. If you provide it in a different way,
// assign your own function to this variable that returns the provider
// name for your request.
var GetProviderName = getProviderName
func getProviderName(req *http.Request) (string, error) {
// try to get it from the url param "provider"
if p := req.URL.Query().Get("provider"); p != "" {
return p, nil
}
// try to get it from the url param ":provider"
if p := req.URL.Query().Get(":provider"); p != "" {
return p, nil
}
// try to get it from the context's value of "provider" key
if p, ok := mux.Vars(req)["provider"]; ok {
return p, nil
}
// try to get it from the go-context's value of "provider" key
if p, ok := req.Context().Value("provider").(string); ok {
return p, nil
}
// try to get it from the go-context's value of providerContextKey key
if p, ok := req.Context().Value(ProviderParamKey).(string); ok {
return p, nil
}
// As a fallback, loop over the used providers, if we already have a valid session for any provider (ie. user has already begun authentication with a provider), then return that provider name
providers := goth.GetProviders()
session, _ := Store.Get(req, SessionName)
for _, provider := range providers {
p := provider.Name()
value := session.Values[p]
if _, ok := value.(string); ok {
return p, nil
}
}
// if not found then return an empty string with the corresponding error
return "", errors.New("you must select a provider")
}
// GetContextWithProvider returns a new request context containing the provider
func GetContextWithProvider(req *http.Request, provider string) *http.Request {
return req.WithContext(context.WithValue(req.Context(), ProviderParamKey, provider))
}
// StoreInSession stores a specified key/value pair in the session.
func StoreInSession(key string, value string, req *http.Request, res http.ResponseWriter) error {
session, _ := Store.New(req, SessionName)
if err := updateSessionValue(session, key, value); err != nil {
return err
}
return session.Save(req, res)
}
// GetFromSession retrieves a previously-stored value from the session.
// If no value has previously been stored at the specified key, it will return an error.
func GetFromSession(key string, req *http.Request) (string, error) {
session, _ := Store.Get(req, SessionName)
value, err := getSessionValue(session, key)
if err != nil {
return "", errors.New("could not find a matching session for this request")
}
return value, nil
}
func getSessionValue(session *sessions.Session, key string) (string, error) {
value := session.Values[key]
if value == nil {
return "", fmt.Errorf("could not find a matching session for this request")
}
rdata := strings.NewReader(value.(string))
r, err := gzip.NewReader(rdata)
if err != nil {
return "", err
}
s, err := ioutil.ReadAll(r)
if err != nil {
return "", err
}
return string(s), nil
}
func updateSessionValue(session *sessions.Session, key, value string) error {
var b bytes.Buffer
gz := gzip.NewWriter(&b)
if _, err := gz.Write([]byte(value)); err != nil {
return err
}
if err := gz.Flush(); err != nil {
return err
}
if err := gz.Close(); err != nil {
return err
}
session.Values[key] = b.String()
return nil
}
| [
"\"SESSION_SECRET\""
]
| []
| [
"SESSION_SECRET"
]
| [] | ["SESSION_SECRET"] | go | 1 | 0 | |
sppas/plugins/sampa2ipa/ipamapping.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# Laboratoire Parole et Langage
#
# Copyright (C) 2017-2018 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# this program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# this program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# ipamapping.py
# ---------------------------------------------------------------------------
import sys
import os
from argparse import ArgumentParser
PROGRAM = os.path.abspath(__file__)
SPPAS = os.getenv('SPPAS')
if SPPAS is None:
SPPAS = os.path.dirname(os.path.dirname(os.path.dirname(PROGRAM)))
if os.path.exists(SPPAS) is False:
print("ERROR: SPPAS not found.")
sys.exit(1)
sys.path.append(SPPAS)
from sppas.src.presenters import sppasMappingTier
from sppas.src.anndata import sppasTranscription, sppasRW
# ----------------------------------------------------------------------------
# Verify and extract args:
# ----------------------------------------------------------------------------
parser = ArgumentParser(usage="{:s} -i file"
"".format(os.path.basename(PROGRAM)),
description="... a program to map tags of labels.")
parser.add_argument("-i",
metavar="file",
required=True,
help='Input annotated file name.')
parser.add_argument("-n",
metavar="tiername",
required=True,
type=str,
help='One or several tier name separated by commas.')
if len(sys.argv) <= 1:
sys.argv.append('-h')
args = parser.parse_args()
# ----------------------------------------------------------------------------
# Load input data
# read content
parser = sppasRW(args.i)
trs_input = parser.read()
# fix table
if args.i.lower().endswith('textgrid') is True:
print('Converted with Praat-IPA mapping table.')
table = os.path.join(os.path.dirname(PROGRAM), "sampa2praat.repl")
else:
print('Converted with standard-IPA mapping table.')
table = os.path.join(os.path.dirname(PROGRAM), 'sampa2ipa.repl')
# load table
mapping = sppasMappingTier(table)
mapping.set_reverse(False) # from sampa to ipa direction
mapping.set_keep_miss(True) # keep unknown entries as given
mapping.set_miss_symbol("") # not used!
mapping.set_delimiters([]) # will use longest matching
# ----------------------------------------------------------------------------
# Convert input file
trs = sppasTranscription(name=trs_input.get_name()+"-IPA")
for n in args.n.split(','):
print(" -> Tier {:s}:".format(n))
tier = trs_input.find(n, case_sensitive=False)
if tier is not None:
new_tier = mapping.map_tier(tier)
new_tier.set_name(n+"-IPA")
trs.append(new_tier)
else:
print(" [IGNORED] Wrong tier name.")
# ----------------------------------------------------------------------------
# Write converted tiers
if len(trs) == 0:
print("No tier converted. No file created.")
sys.exit(1)
infile, inext = os.path.splitext(args.i)
filename = infile + "-ipa" + inext
parser.set_filename(infile + "-ipa" + inext)
parser.write(trs)
print("File {:s} created.".format(filename))
| []
| []
| [
"SPPAS"
]
| [] | ["SPPAS"] | python | 1 | 0 | |
3.6.6/entrypoint.py | import subprocess
import glob
import os
HOME_SITE="/home/site/wwwroot"
DEFAULT_SITE="/opt/defaultsite"
STARTUP_COMMAND_FILE="/opt/startup/startupCommand"
APPSVC_VIRTUAL_ENV="antenv"
# Temp patch. Remove when Kudu script is available.
os.environ["PYTHONPATH"] = HOME_SITE + "/antenv/lib/python3.6/site-packages"
def subprocess_cmd(command):
print ('executing:')
print (command)
process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print (proc_stdout.decode("utf-8"))
## Check for custom startup command
def custom_check():
with open(STARTUP_COMMAND_FILE, 'r') as myfile:
startupScript = myfile.read()
if not startupScript:
return None
else:
return startupScript
## Django check: If 'wsgi.py' is provided, identify as Django.
def check_django():
with os.scandir(HOME_SITE) as siteRoot:
for entry in siteRoot:
if not entry.name.startswith(APPSVC_VIRTUAL_ENV) and entry.is_dir():
print(entry.name)
with os.scandir(HOME_SITE + '/'+ entry.name) as subFolder:
for subEntry in subFolder:
if subEntry.name == 'wsgi.py' and subEntry.is_file():
return entry.name + '.wsgi'
return None
## Flask check: If 'application.py' is provided or a .py module is present, identify as Flask.
def check_flask():
py_modules = glob.glob(HOME_SITE+'/*.py')
if len(py_modules) == 0:
return None
for module in py_modules:
if module[-14:] == 'application.py':
print ('found flask app')
return 'application:app'
def start_server():
cmd = custom_check()
if cmd is not None:
subprocess_cmd('. antenv/bin/activate')
subprocess_cmd(
'GUNICORN_CMD_ARGS="--bind=0.0.0.0" gunicorn ' + cmd
)
cmd = check_django()
if cmd is not None:
subprocess_cmd('. antenv/bin/activate')
subprocess_cmd(
'GUNICORN_CMD_ARGS="--bind=0.0.0.0" gunicorn ' + cmd
)
cmd = check_flask()
if cmd is not None:
subprocess_cmd('. antenv/bin/activate')
subprocess_cmd(
'GUNICORN_CMD_ARGS="--bind=0.0.0.0" gunicorn ' + cmd
)
else:
print('starting default app')
subprocess_cmd(
'GUNICORN_CMD_ARGS="--bind=0.0.0.0 --chdir /opt/defaultsite" gunicorn application:app'
)
subprocess_cmd('python --version')
subprocess_cmd('pip --version')
start_server()
| []
| []
| [
"PYTHONPATH"
]
| [] | ["PYTHONPATH"] | python | 1 | 0 | |
test/integration/reinvocation/reinvocation_test.go | package inject
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/linkerd/linkerd2/pkg/k8s"
"github.com/linkerd/linkerd2/testutil"
v1 "k8s.io/api/core/v1"
)
var TestHelper *testutil.TestHelper
func TestMain(m *testing.M) {
TestHelper = testutil.NewTestHelper()
os.Exit(m.Run())
}
// TestReinvocation installs https://github.com/kubemod/kubemod, then creates a modrule that
// adds the "linkerd.io/proxy-log-level: debug" annotation through a mutating webhook that
// gets called after the linkerd injector. The latter should be reinvoked so it reacts to that
// annotation.
func TestReinvocation(t *testing.T) {
// We're using a slightly reduced version of kubemod
// - Has the test.linkerd.io/is-test-data-plane label
// - Doesn't contain the validating admission controller
// - The mutating admission controller was renamed to z-kubemod-mutating-webhook-configuration
// so it runs after the linkerd injector (they're run alphabetically)
// - The command from the job generating the mwc cert and secret has been slightly changed in order
// to account for that renaming (see yaml)
if os.Getenv("RUN_ARM_TEST") != "" {
t.Skip("Skipped. Kubemod does not support ARM yet")
}
kubemodYAML, err := testutil.ReadFile("testdata/kubemod.yaml")
if err != nil {
testutil.AnnotatedFatalf(t, "failed to read kubemod.yaml", "failed to read kubemod.yaml: %s", err)
}
o, err := TestHelper.KubectlApply(kubemodYAML, "")
if err != nil {
testutil.AnnotatedFatalf(t, "failed to install kubemod",
"failed to install kubemod: %s\n%s", err, o)
}
ctx := context.Background()
nsAnnotations := map[string]string{
k8s.ProxyInjectAnnotation: k8s.ProxyInjectEnabled,
}
TestHelper.WithDataPlaneNamespace(ctx, "reinvocation", nsAnnotations, t, func(t *testing.T, ns string) {
modruleYAML, err := testutil.ReadFile("testdata/modrule.yaml")
if err != nil {
testutil.AnnotatedFatalf(t, "failed to read modrule.yaml", "failed to read modrule.yaml: %s", err)
}
err = TestHelper.RetryFor(40*time.Second, func() error {
o, err := TestHelper.KubectlApply(modruleYAML, ns)
if err != nil {
return fmt.Errorf("%s\n%s", err, o)
}
return nil
})
if err != nil {
testutil.AnnotatedFatalf(t, "failed to apply modrule.yaml",
"failed to apply modrule.yaml: %s", err)
}
podsYAML, err := testutil.ReadFile("testdata/inject_test.yaml")
if err != nil {
testutil.AnnotatedFatalf(t, "failed to read inject test file",
"failed to read inject test file: %s", err)
}
o, err = TestHelper.KubectlApply(podsYAML, ns)
if err != nil {
testutil.AnnotatedFatalf(t, "failed to install inject test file",
"failed to install inject test file: %s\n%s", err, o)
}
deployName := "inject-test-terminus"
var pod *v1.Pod
err = TestHelper.RetryFor(30*time.Second, func() error {
pods, err := TestHelper.GetPodsForDeployment(ctx, ns, deployName)
if err != nil {
return fmt.Errorf("failed to get pods for namespace %s", ns)
}
for _, p := range pods {
p := p //pin
creator, ok := p.Annotations[k8s.CreatedByAnnotation]
if ok && strings.Contains(creator, "proxy-injector") {
pod = &p
break
}
}
if pod == nil {
return fmt.Errorf("failed to find auto injected pod for deployment %s", deployName)
}
return nil
})
if err != nil {
testutil.AnnotatedFatalf(t, "failed to find autoinjected pod: ", err.Error())
}
injectionValidator := testutil.InjectValidator{
NoInitContainer: TestHelper.CNI() || TestHelper.Calico(),
// ****** TODO ****** this proves the changes made by the z-kubemod mutating webhook
// weren't surfaced by the injector. Once the injector implements reinvocation
// the log level should be "debug"
LogLevel: "warn,linkerd=info",
}
if err := injectionValidator.ValidatePod(&pod.Spec); err != nil {
testutil.AnnotatedFatalf(t, "received unexpected output", "received unexpected output\n%s", err.Error())
}
})
o, err = TestHelper.Kubectl(kubemodYAML, "delete", "-f", "-")
if err != nil {
testutil.AnnotatedFatalf(t, "failed to uninstall kubemod",
"failed to uninstall kubemod: %s\n%s", err, o)
}
}
| [
"\"RUN_ARM_TEST\""
]
| []
| [
"RUN_ARM_TEST"
]
| [] | ["RUN_ARM_TEST"] | go | 1 | 0 | |
cmd/kafka.go | /*
Copyright © 2021 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
ckafka "github.com/confluentinc/confluent-kafka-go/kafka"
"github.com/wwwillian/codepix-go/application/kafka"
"github.com/wwwillian/codepix-go/infrastructure/db"
"os"
"github.com/spf13/cobra"
)
// kafkaCmd represents the kafka command
var kafkaCmd = &cobra.Command{
Use: "kafka",
Short: "Start consuming transaction using Apache Kafka",
Run: func(cmd *cobra.Command, args []string) {
producer := kafka.NewKafkaProducer()
deliveryChan := make(chan ckafka.Event)
database := db.ConnectDB(os.Getenv("env"))
//kafka.Publish("Hello Consumer", "teste", producer, deliveryChan)
go kafka.DeliveryReport(deliveryChan)
kafkaProcessor := kafka.NewKafkaProcessor(database, producer, deliveryChan)
kafkaProcessor.Consume()
},
}
func init() {
rootCmd.AddCommand(kafkaCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// kafkaCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// kafkaCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
| [
"\"env\""
]
| []
| [
"env"
]
| [] | ["env"] | go | 1 | 0 | |
train.py | import numpy as np
from datetime import datetime
import cv2
import os
from PIL import Image
import torch
import torchvision
from torchvision import datasets, transforms, models
from dataset import Asbest_segmentation
from tqdm import tqdm
import matplotlib.pyplot as plt
import rawpy
from utils import parse_anno_file, create_mask_file, big_image_predict, AverageMeter
from apex import amp
lr = 1e-5
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
path_to_data = 'asbest'
anno_stones = parse_anno_file(os.path.join(path_to_data, 'images', 'annotation.xml'))
anno_tr_stones = parse_anno_file(os.path.join(path_to_data, 'tr_stones', 'annotation.xml'))
transporter_file = os.path.join('asbest', 'transporter', '2020.03.16', 'TRANS_11:28:05_16-03-2020_36.png')
img_tr_stones_shape = (int(anno_tr_stones[0]['height']), int(anno_tr_stones[0]['width']))
stones_valid_indexes = np.array([3, 7, 12, 15, 20, 30, 40], dtype=int)
stones_train_indexes = np.array(list(set(np.arange(len(anno_stones))) - set(stones_valid_indexes)), dtype=int)
from torch import nn
from torch import sigmoid
import segmentation_models_pytorch as smp
device = torch.device("cuda:" + str(torch.cuda.device_count() - 1) if torch.cuda.is_available() else "cpu")
model = smp.Unet(encoder_name='efficientnet-b7', in_channels=1, classes=2, activation='sigmoid').to(device)
bce = smp.utils.losses.BCEWithLogitsLoss()
dice = smp.utils.losses.DiceLoss()
# criterion = nn.CrossEntropyLoss()
# criterion.__name__= 'loss'
def pixel_acc(pred, label):
_, preds = torch.max(pred, dim=1)
valid = (label >= 0).long()
acc_sum = torch.sum(valid * (preds == label).long())
pixel_sum = torch.sum(valid)
acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
return acc
metrics = [
smp.utils.metrics.IoU(eps=1.),
smp.utils.metrics.Fscore(eps=1.),
]
optimizer = torch.optim.Adam([
{'params': model.decoder.parameters(), 'lr': lr},
{'params': model.encoder.parameters(), 'lr': lr},
])
model, optimizer = amp.initialize(model,
optimizer,
opt_level='O2',
# keep_batchnorm_fp32=True,
)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[2,4], gamma=0.1)
def save_fig(crop_size, inp_size):
files = ['asbest/tr_stones/9_12:40:22_05-03-2020_1.png',
'asbest/tr_stones/1_11:32:12_16-03-2020_1.png',
'asbest/tr_stones/22_13:21:36_16-03-2020_1.png',
'asbest/tr_stones/20_12:23:59_16-03-2020_1.png',
]
full_image = None
for i, file in enumerate(files):
img = cv2.imread(file, cv2.IMREAD_UNCHANGED)
image, st_mask, asb_mask = big_image_predict(model, img, crop_size=crop_size, inp_size=inp_size, device=device)
if full_image is None:
full_image = np.concatenate((image, st_mask, asb_mask), axis=0)
else:
full_image = np.concatenate((full_image, np.concatenate((image, st_mask, asb_mask), axis=0)), axis=1)
cv2.imwrite('graphics/' + datetime.now().strftime("%H:%M:%S") + '_segm_images.png', cv2.resize((full_image * 255).astype(np.uint8), (int(full_image.shape[1] / 8), int(full_image.shape[0] / 8))))
# return full_image
from tqdm import trange, tqdm
from torch.utils.data import DataLoader
#model.load_state_dict(torch.load('stone_asbest_segmentation.pth'))
img_sizes = [(1*224, 1*224),
(2*224, 2*224),
(4*224, 4*224),
]
# crop_sizes = [
# None,
# None,
# None,
# ]
crop_sizes = [(8*224, 8*224),
(8*224, 8*224),
(8*224, 8*224),
# (int(img_stones_shape[0] // 2), int(img_stones_shape[1] // 3)),
# (int(img_stones_shape[0] // 2), int(img_stones_shape[1] // 3)),
# (int(img_stones_shape[0] // 2), int(img_stones_shape[1] // 3)),
]
num_frames = [(400, 70),
(400, 70),
(400, 70)] #, (400, 50), (400, 50)]
batches = [8, 4, 1]
num_epochs = [100, 100, 1000]
for epochs, batch, crop_size, img_size, num_frame in zip(num_epochs, batches, crop_sizes, img_sizes, num_frames):
stones_train_data = Asbest_segmentation(np.array(anno_stones)[stones_train_indexes],
transporter_file=transporter_file,
crop_size=crop_size,
img_size=img_size,
load_in_ram = True,
num_frames=num_frame[0],
normalize=True
)
stones_valid_data = Asbest_segmentation(np.array(anno_stones)[stones_valid_indexes],
transporter_file=transporter_file,
crop_size=crop_size,
img_size=img_size,
load_in_ram = True,
num_frames=num_frame[1],
normalize=True
)
stones_train_loader = DataLoader(stones_train_data, batch_size=batch, shuffle=True, num_workers=4)
stones_valid_loader = DataLoader(stones_valid_data, batch_size=1, shuffle=False, num_workers=2)
# tr_stones_train_data = Asbest_segmentation(anno_tr_stones[:-30],
# crop_size=(img_tr_stones_shape[0] // 2, img_tr_stones_shape[1] // 2),
# img_size=img_size,
# num_frames=100,
# normalize=True)
# tr_stones_valid_data = Asbest_segmentation(anno_tr_stones[-30:],
# crop_size=(img_tr_stones_shape[0] // 2, img_tr_stones_shape[1] // 2),
# img_size=img_size,
# num_frames=30,
# normalize=True)
# tr_stones_train_loader = DataLoader(tr_stones_train_data, batch_size=2, shuffle=True, num_workers=4)
# tr_stones_valid_loader = DataLoader(tr_stones_valid_data, batch_size=2, shuffle=False, num_workers=2)
with tqdm(total=len(stones_train_loader) + len(stones_valid_loader),# + len(tr_stones_train_loader) + len(tr_stones_valid_loader),
bar_format='{desc} epoch {postfix[0]} ' +
'| {n_fmt}/{total_fmt} {elapsed}<{remaining} ' +
'| loss : {postfix[1]:>2.4f} ' +
'| iou_st: {postfix[2]:>2.4f} ' +
'| iou_asb: {postfix[3]:>2.4f} ' +
'| val_loss : {postfix[4]:>2.4f} ' +
'| val_iou_st: {postfix[5]:>2.4f} ' +
'| val_iou_asb: {postfix[6]:>2.4f} '
,
postfix=[0, 0, 0, 0, 0, 0, 0], desc = 'Training', leave=True) as t:
for epoch in range(epochs):
t.postfix[0] = epoch + 1
average_total_loss = AverageMeter()
average_iou_stones = AverageMeter()
average_iou_asbest = AverageMeter()
model.train()
for data in stones_train_loader:
# torch.cuda.empty_cache()
inputs, st_masks, asb_masks = data
masks = torch.cat((st_masks, asb_masks), axis=1)
inputs=inputs.to(device).float()
masks=masks.to(device).float()
optimizer.zero_grad()
outputs = model(inputs)
loss = 0.9 * bce(outputs, masks) + 0.1 * dice(outputs[:,1:,:,:], masks[:,1:,:,:])
# iou_stones = metrics[0](outputs[:,0:1,:,:], masks[:,0:1,:,:])
# fscore_stones = metrics[1](outputs[:,0:1,:,:], masks[:,0:1,:,:])
iou_asbest = metrics[0](outputs[:,1:,:,:], masks[:,1:,:,:])
# fscore_asbest = metrics[1](outputs[:,1:,:,:], masks[:,1:,:,:])
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
average_total_loss.update(loss.data.item())
# average_iou_stones.update(iou_stones.data.item())
average_iou_asbest.update(iou_asbest.data.item())
t.postfix[1] = average_total_loss.average()
# t.postfix[2] = average_iou_stones.average()
# t.postfix[3] = average_fscore_stones.average()
t.postfix[3] = average_iou_asbest.average()
# t.postfix[5] = average_fscore_asbest.average()
t.update()
## Validation
val_average_total_loss = AverageMeter()
val_average_iou_stones = AverageMeter()
# val_average_fscore_stones = AverageMeter()
val_average_iou_asbest = AverageMeter()
# val_average_fscore_asbest = AverageMeter()
with torch.no_grad():
model.eval()
for data in stones_valid_loader:
#
inputs, st_masks, asb_masks = data
masks = torch.cat((st_masks, asb_masks), axis=1)
inputs=inputs.to(device).float()
masks=masks.to(device).float()
outputs = model(inputs)
loss = 0.9 * bce(outputs, masks) + 0.1 * dice(outputs[:,1:,:,:], masks[:,1:,:,:])
# iou_stones = metrics[0](outputs[:,0:1,:,:], masks[:,0:1,:,:])
# fscore_stones = metrics[1](outputs[:,0:1,:,:], masks[:,0:1,:,:])
iou_asbest = metrics[0](outputs[:,1:,:,:], masks[:,1:,:,:])
# fscore_asbest = metrics[1](outputs[:,1:,:,:], masks[:,1:,:,:])
val_average_total_loss.update(loss.data.item())
# val_average_iou_stones.update(iou_stones.data.item())
# val_average_fscore_stones.update(fscore_stones.data.item())
val_average_iou_asbest.update(iou_asbest.data.item())
# val_average_fscore_asbest.update(fscore_asbest.data.item())
t.postfix[4] = val_average_total_loss.average()
# t.postfix[5] = val_average_iou_stones.average()
# t.postfix[8] = val_average_fscore_stones.average()
t.postfix[6] = val_average_iou_asbest.average()
# t.postfix[10] = val_average_fscore_asbest.average()
t.update()
# scheduler.step()
if (epoch + 1) % 50 == 0:
save_fig(crop_size=(img_tr_stones_shape[0] // 2, img_tr_stones_shape[1] // 2), inp_size=img_size)
t.reset()
torch.save(model.state_dict(), 'asbest_segmentation_b7.pth') | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
tests.py | import json
import os
import re
import datetime
from difflib import context_diff
from pathlib import Path
import six
from reporters_db import (
REPORTERS,
VARIATIONS_ONLY,
EDITIONS,
NAMES_TO_EDITIONS,
REGEX_VARIABLES,
)
from unittest import TestCase
from reporters_db.utils import substitute_editions, recursive_substitute
VALID_CITE_TYPES = (
"federal",
"neutral",
"scotus_early",
"specialty",
"specialty_west",
"specialty_lexis",
"state",
"state_regional",
)
def emit_strings(obj):
"""Recursively get all the strings out of a JSON object.
Convert ints to strs
"""
if isinstance(obj, dict):
# Feed the keys and items back into the function.
for k, v in obj.items():
for x in emit_strings(k):
yield x
for x in emit_strings(v):
yield x
elif isinstance(obj, list):
for item in obj:
for x in emit_strings(item):
yield x
elif isinstance(obj, int):
yield str(int)
elif isinstance(obj, six.text_type):
yield obj
def iter_reporters():
for reporter_abbv, reporter_list in REPORTERS.items():
for reporter_data in reporter_list:
yield reporter_abbv, reporter_list, reporter_data
def iter_editions():
for reporter_abbv, reporter_list, reporter_data in iter_reporters():
for edition_abbv, edition in reporter_data["editions"].items():
yield edition_abbv, edition
class ConstantsTest(TestCase):
def test_any_keys_missing_editions(self):
"""Have we added any new reporters that lack a matching edition?"""
for reporter_abbv, reporter_list, reporter_data in iter_reporters():
self.assertIn(
reporter_abbv,
reporter_data["editions"],
msg="Could not find edition for key: %s" % reporter_abbv,
)
def test_for_variations_mapping_to_bad_keys(self):
"""Do we have a variation that maps to a key that doesn't exist in the
first place?
"""
for variations in VARIATIONS_ONLY.values():
for variation in variations:
self.assertIn(
EDITIONS[variation],
REPORTERS.keys(),
msg="Could not map variation to a valid reporter: %s"
% variation,
)
def test_basic_names_to_editions(self):
"""Do we get something like we expected in the NAME_TO_EDITION var?"""
self.assertEqual(
["A.", "A.2d", "A.3d"], NAMES_TO_EDITIONS["Atlantic Reporter"]
)
def test_editions_ordering(self):
"""Test Ill. App., where we don't have good start dates."""
self.assertEqual(
["Ill. App.", "Ill. App. 2d", "Ill. App. 3d"],
NAMES_TO_EDITIONS["Illinois Appellate Court Reports"],
)
def test_that_all_dates_are_converted_to_dates_not_strings(self):
"""Do we properly make the ISO-8601 date strings into Python dates?"""
# for reporter_abbv, reporter_list, reporter_data in iter_reporters():
for e_name, e_dates in iter_editions():
# e_name == "A. 2d"
# e_dates == {
# "end": "1938-12-31T00:00:00",
# "start": "1885-01-01T00:00:00"
# }
for key in ["start", "end"]:
is_date_or_none = (
isinstance(e_dates[key], datetime.datetime)
or e_dates[key] is None
)
self.assertTrue(
is_date_or_none,
msg=(
"%s dates in the reporter '%s' appear to be "
"coming through as '%s'"
% (key, e_name, type(e_dates[key]))
),
)
if key == "start":
start_is_not_none = e_dates[key] is not None
self.assertTrue(
start_is_not_none,
msg=(
"Start date in reporter '%s' appears to "
"be None, not 1750" % e_name
),
)
def test_all_reporters_have_valid_cite_type(self):
"""Do all reporters have valid cite_type values?"""
for reporter_abbv, reporter_list, reporter_data in iter_reporters():
self.assertIn(
reporter_data["cite_type"],
VALID_CITE_TYPES,
"%s did not have a valid cite_type value" % reporter_abbv,
)
def test_all_required_keys_no_extra_keys(self):
"""Are all required keys present? Are there any keys present that
shouldn't be?
"""
required_fields = [
"cite_type",
"editions",
"mlz_jurisdiction",
"name",
"variations",
]
optional_fields = [
"cite_format",
"publisher",
"notes",
"href",
"regexes",
"examples",
]
all_fields = required_fields + optional_fields
for reporter_abbv, reporter_list, reporter_data in iter_reporters():
# All required fields present?
for required_field in required_fields:
try:
reporter_data[required_field]
except KeyError:
self.fail(
"Reporter '%s' lacks required field '%s'"
% (reporter_abbv, required_field)
)
# No extra fields?
for k in reporter_data.keys():
self.assertIn(
k,
all_fields,
"Reporter '%s' has an unknown field '%s'"
% (reporter_abbv, k),
)
# No empty string values?
for k, v in reporter_data.items():
if isinstance(v, str):
self.assertTrue(
v != "",
msg="Field '%s' is empty in reporter '%s'"
% (k, reporter_abbv),
)
def test_no_variation_is_same_as_key(self):
"""Are any variations identical to the keys they're supposed to be
variations of?
"""
for variation, keys in VARIATIONS_ONLY.items():
for key in keys:
self.assertNotEqual(
variation,
key,
"The variation '%s' is identical to the key it's supposed "
"to be a variation of." % variation,
)
def test_fields_tidy(self):
"""Do fields have any messiness?
For example:
- some punctuation is not allowed in some keys
- spaces at beginning/end not allowed
"""
def cleaner(s):
return re.sub(r"[^ 0-9a-zA-Z.,\-'&()\[\]]", "", s.strip())
msg = "Got bad punctuation in: %s"
for reporter_abbv, reporter_list, reporter_data in iter_reporters():
self.assertEqual(
reporter_abbv, cleaner(reporter_abbv), msg=msg % reporter_abbv
)
for k in reporter_data["editions"].keys():
self.assertEqual(cleaner(k), k, msg=msg % k)
for k, v in reporter_data["variations"].items():
self.assertEqual(cleaner(k), k, msg=msg % k)
self.assertEqual(cleaner(v), v, msg=msg % v)
for s in emit_strings(REPORTERS):
self.assertEqual(
s.strip(), s, msg="Fields needs whitespace stripped: '%s'" % s
)
def test_nothing_ends_before_it_starts(self):
"""Do any editions have end dates before their start dates?"""
for k, edition in iter_editions():
if edition["start"] and edition["end"]:
self.assertLessEqual(
edition["start"],
edition["end"],
msg="It appears that edition %s ends before it "
"starts." % k,
)
def test_json_format(self):
"""Does format of reporters.json match json.dumps(json.loads(), sort_keys=True)? """
for file_name in ("reporters.json", "regexes.json"):
with self.subTest(file_name=file_name):
json_path = (
Path(__file__).parent / "reporters_db" / "data" / file_name
)
json_str = json_path.read_text()
reformatted = json.dumps(
json.loads(json_str),
indent=4,
ensure_ascii=False,
sort_keys=True,
)
reformatted += "\n"
if json_str != reformatted:
if os.environ.get("FIX_JSON"):
json_path.write_text(reformatted)
else:
diff = context_diff(
json_str.splitlines(),
reformatted.splitlines(),
fromfile="reporters.json",
tofile="expected.json",
)
self.fail(
("%s needs reformatting. " % file_name)
+ "Run with env var FIX_JSON=1 to update the file automatically. "
+ "Diff of actual vs. expected:\n"
+ "\n".join(diff)
)
def test_regexes(self):
"""Do custom regexes and examples match up?"""
for reporter_abbv, reporter_list, reporter_data in iter_reporters():
examples = reporter_data.get("examples", [])
matched_examples = set()
custom_regexes = {}
# check that each custom regex matches at least one example
for edition_abbv, edition in reporter_data["editions"].items():
if not edition.get("regexes"):
continue
with self.subTest(
"Check edition regexes", edition=edition_abbv
):
for edition_regex in edition["regexes"]:
full_regex = recursive_substitute(
edition_regex, REGEX_VARIABLES
)
regexes = substitute_editions(
full_regex,
edition_abbv,
reporter_data["variations"],
)
custom_regexes[edition_regex] = regexes
has_match = False
for example in examples:
for regex in regexes:
if re.match(regex + "$", example):
has_match = True
matched_examples.add(example)
break
if not has_match:
try:
import exrex
candidate = "Possible examples: %s" % [
exrex.getone(regexes[0], limit=3)
for _ in range(10)
]
except ImportError:
candidate = "Run 'pip install exrex' to generate a candidate example"
self.fail(
"Reporter '%s' has no match in 'examples' for custom regex '%s'.\nExpanded regexes: %s.\n%s"
% (
reporter_abbv,
edition_regex,
regexes,
candidate,
)
)
# check that each example is matched by at least one regex
if custom_regexes:
with self.subTest(
"Check all examples matched by custom regex",
reporter=reporter_abbv,
):
self.assertEqual(
set(examples),
matched_examples,
"Not all examples matched. If custom regexes are provided, all examples should match. Regexes tried: %s"
% custom_regexes,
)
if __name__ == "__main__":
import unittest
unittest.main()
| []
| []
| [
"FIX_JSON"
]
| [] | ["FIX_JSON"] | python | 1 | 0 | |
test/unit/test_config.py | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import pytest
from molecule import config
from molecule import platforms
from molecule import scenario
from molecule import state
from molecule import util
from molecule.dependency import ansible_galaxy
from molecule.dependency import gilt
from molecule.dependency import shell
from molecule.driver import azure
from molecule.driver import delegated
from molecule.driver import docker
from molecule.driver import ec2
from molecule.driver import gce
from molecule.driver import lxc
from molecule.driver import lxd
from molecule.driver import openstack
from molecule.driver import vagrant
from molecule.lint import yamllint
from molecule.provisioner import ansible
from molecule.verifier import goss
from molecule.verifier import inspec
from molecule.verifier import testinfra
def test_molecule_file_private_member(molecule_file_fixture, config_instance):
assert molecule_file_fixture == config_instance.molecule_file
def test_args_member(config_instance):
assert {} == config_instance.args
def test_command_args_member(config_instance):
x = {'subcommand': 'test'}
assert x == config_instance.command_args
def test_debug_property(config_instance):
assert not config_instance.debug
def test_env_file_property(config_instance):
config_instance.args = {'env_file': '.env'}
result = config_instance.env_file
assert util.abs_path(config_instance.args.get('env_file')) == result
def test_subcommand_property(config_instance):
assert 'test' == config_instance.subcommand
def test_action_property(config_instance):
assert config_instance.action is None
def test_action_setter(config_instance):
config_instance.action = 'foo'
assert 'foo' == config_instance.action
def test_init_calls_validate(patched_config_validate, config_instance):
patched_config_validate.assert_called_once_with()
def test_project_directory_property(config_instance):
assert os.getcwd() == config_instance.project_directory
def test_molecule_directory_property(config_instance):
x = os.path.join(os.getcwd(), 'molecule')
assert x == config_instance.molecule_directory
def test_dependency_property(config_instance):
assert isinstance(config_instance.dependency, ansible_galaxy.AnsibleGalaxy)
@pytest.fixture
def _config_dependency_gilt_section_data():
return {
'dependency': {
'name': 'gilt'
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_dependency_gilt_section_data'], indirect=True)
def test_dependency_property_is_gilt(config_instance):
assert isinstance(config_instance.dependency, gilt.Gilt)
@pytest.fixture
def _config_dependency_shell_section_data():
return {
'dependency': {
'name': 'shell',
'command': 'bin/command',
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_dependency_shell_section_data'],
indirect=True)
def test_dependency_property_is_shell(config_instance):
assert isinstance(config_instance.dependency, shell.Shell)
def test_driver_property(config_instance):
assert isinstance(config_instance.driver, docker.Docker)
@pytest.fixture
def _config_driver_azure_section_data():
return {
'driver': {
'name': 'azure'
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_driver_azure_section_data'], indirect=True)
def test_driver_property_is_azure(config_instance):
assert isinstance(config_instance.driver, azure.Azure)
@pytest.fixture
def _config_driver_delegated_section_data():
return {
'driver': {
'name': 'delegated',
'options': {
'managed': False,
},
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_driver_delegated_section_data'],
indirect=True)
def test_driver_property_is_delegated(config_instance):
assert isinstance(config_instance.driver, delegated.Delegated)
@pytest.fixture
def _config_driver_ec2_section_data():
return {
'driver': {
'name': 'ec2'
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_driver_ec2_section_data'], indirect=True)
def test_driver_property_is_ec2(config_instance):
assert isinstance(config_instance.driver, ec2.EC2)
@pytest.fixture
def _config_driver_gce_section_data():
return {
'driver': {
'name': 'gce'
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_driver_gce_section_data'], indirect=True)
def test_driver_property_is_gce(config_instance):
assert isinstance(config_instance.driver, gce.GCE)
@pytest.fixture
def _config_driver_lxc_section_data():
return {
'driver': {
'name': 'lxc'
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_driver_lxc_section_data'], indirect=True)
def test_driver_property_is_lxc(config_instance):
assert isinstance(config_instance.driver, lxc.LXC)
@pytest.fixture
def _config_driver_lxd_section_data():
return {
'driver': {
'name': 'lxd'
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_driver_lxd_section_data'], indirect=True)
def test_driver_property_is_lxd(config_instance):
assert isinstance(config_instance.driver, lxd.LXD)
@pytest.fixture
def _config_driver_openstack_section_data():
return {
'driver': {
'name': 'openstack'
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_driver_openstack_section_data'],
indirect=True)
def test_driver_property_is_openstack(config_instance):
assert isinstance(config_instance.driver, openstack.Openstack)
@pytest.fixture
def _config_driver_vagrant_section_data():
return {
'driver': {
'name': 'vagrant',
'provider': {
'name': 'virtualbox',
},
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_driver_vagrant_section_data'], indirect=True)
def test_driver_property_is_vagrant(config_instance):
assert isinstance(config_instance.driver, vagrant.Vagrant)
def test_drivers_property(config_instance):
x = [
'azure',
'delegated',
'docker',
'ec2',
'gce',
'lxc',
'lxd',
'openstack',
'vagrant',
]
assert x == config_instance.drivers
def test_env(config_instance):
config_instance.args = {'env_file': '.env'}
x = {
'MOLECULE_DEBUG':
'False',
'MOLECULE_FILE':
config_instance.molecule_file,
'MOLECULE_ENV_FILE':
util.abs_path(config_instance.args.get('env_file')),
'MOLECULE_INVENTORY_FILE':
config_instance.provisioner.inventory_file,
'MOLECULE_EPHEMERAL_DIRECTORY':
config_instance.scenario.ephemeral_directory,
'MOLECULE_SCENARIO_DIRECTORY':
config_instance.scenario.directory,
'MOLECULE_PROJECT_DIRECTORY':
config_instance.project_directory,
'MOLECULE_INSTANCE_CONFIG':
config_instance.driver.instance_config,
'MOLECULE_DEPENDENCY_NAME':
'galaxy',
'MOLECULE_DRIVER_NAME':
'docker',
'MOLECULE_LINT_NAME':
'yamllint',
'MOLECULE_PROVISIONER_NAME':
'ansible',
'MOLECULE_PROVISIONER_LINT_NAME':
'ansible-lint',
'MOLECULE_SCENARIO_NAME':
'default',
'MOLECULE_VERIFIER_NAME':
'testinfra',
'MOLECULE_VERIFIER_LINT_NAME':
'flake8',
'MOLECULE_VERIFIER_TEST_DIRECTORY':
config_instance.verifier.directory,
}
assert x == config_instance.env
def test_lint_property(config_instance):
assert isinstance(config_instance.lint, yamllint.Yamllint)
def test_platforms_property(config_instance):
assert isinstance(config_instance.platforms, platforms.Platforms)
def test_provisioner_property(config_instance):
assert isinstance(config_instance.provisioner, ansible.Ansible)
def test_scenario_property(config_instance):
assert isinstance(config_instance.scenario, scenario.Scenario)
def test_state_property(config_instance):
assert isinstance(config_instance.state, state.State)
def test_verifier_property(config_instance):
assert isinstance(config_instance.verifier, testinfra.Testinfra)
@pytest.fixture
def _config_verifier_inspec_section_data():
return {
'verifier': {
'name': 'inspec',
'lint': {
'name': 'rubocop',
},
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_verifier_inspec_section_data'], indirect=True)
def test_verifier_property_is_inspec(config_instance):
assert isinstance(config_instance.verifier, inspec.Inspec)
@pytest.fixture
def _config_verifier_goss_section_data():
return {
'verifier': {
'name': 'goss',
'lint': {
'name': 'yamllint',
},
},
}
@pytest.mark.parametrize(
'config_instance', ['_config_verifier_goss_section_data'], indirect=True)
def test_verifier_property_is_goss(config_instance):
assert isinstance(config_instance.verifier, goss.Goss)
def test_verifiers_property(config_instance):
x = ['goss', 'inspec', 'testinfra']
assert x == config_instance.verifiers
def test_get_driver_name_from_state_file(config_instance):
config_instance.state.change_state('driver', 'state-driver')
assert 'state-driver' == config_instance._get_driver_name()
def test_get_driver_name_from_cli(config_instance):
config_instance.command_args = {'driver_name': 'cli-driver'}
assert 'cli-driver' == config_instance._get_driver_name()
def test_get_driver_name(config_instance):
assert 'docker' == config_instance._get_driver_name()
def test_get_driver_name_raises_when_different_driver_used(
patched_logger_critical, config_instance):
config_instance.state.change_state('driver', 'foo')
config_instance.command_args = {'driver_name': 'bar'}
with pytest.raises(SystemExit) as e:
config_instance._get_driver_name()
assert 1 == e.value.code
msg = ("Instance(s) were created with the 'foo' driver, "
"but the subcommand is using 'bar' driver.")
patched_logger_critical.assert_called_once_with(msg)
def test_get_config(config_instance):
assert isinstance(config_instance._get_config(), dict)
def test_get_config_with_base_config(config_instance):
config_instance.args = {'base_config': './foo.yml'}
contents = {'foo': 'bar'}
util.write_file(config_instance.args['base_config'],
util.safe_dump(contents))
result = config_instance._get_config()
assert result['foo'] == 'bar'
def test_reget_config(config_instance):
assert isinstance(config_instance._reget_config(), dict)
def test_interpolate(patched_logger_critical, config_instance):
string = 'foo: $HOME'
x = 'foo: {}'.format(os.environ['HOME'])
assert x == config_instance._interpolate(string, os.environ, None)
def test_interpolate_raises_on_failed_interpolation(patched_logger_critical,
config_instance):
string = '$6$8I5Cfmpr$kGZB'
with pytest.raises(SystemExit) as e:
config_instance._interpolate(string, os.environ, None)
assert 1 == e.value.code
msg = ("parsing config file '{}'.\n\n"
'Invalid placeholder in string: line 1, col 1\n'
'$6$8I5Cfmpr$kGZB').format(config_instance.molecule_file)
patched_logger_critical.assert_called_once_with(msg)
def test_preflight(mocker, config_instance, patched_logger_info):
m = mocker.patch('molecule.model.schema_v2.pre_validate')
m.return_value = None
config_instance._preflight('foo')
m.assert_called_once_with('foo', os.environ, config.MOLECULE_KEEP_STRING)
def test_preflight_exists_when_validation_fails(
mocker, patched_logger_critical, config_instance):
m = mocker.patch('molecule.model.schema_v2.pre_validate')
m.return_value = 'validation errors'
with pytest.raises(SystemExit) as e:
config_instance._preflight('invalid stream')
assert 1 == e.value.code
msg = 'Failed to validate.\n\nvalidation errors'
patched_logger_critical.assert_called_once_with(msg)
def test_validate(mocker, config_instance, patched_logger_info,
patched_logger_success):
m = mocker.patch('molecule.model.schema_v2.validate')
m.return_value = None
config_instance._validate()
msg = 'Validating schema {}.'.format(config_instance.molecule_file)
patched_logger_info.assert_called_once_with(msg)
m.assert_called_once_with(config_instance.config)
msg = 'Validation completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_validate_exists_when_validation_fails(mocker, patched_logger_critical,
config_instance):
m = mocker.patch('molecule.model.schema_v2.validate')
m.return_value = 'validation errors'
with pytest.raises(SystemExit) as e:
config_instance._validate()
assert 1 == e.value.code
msg = 'Failed to validate.\n\nvalidation errors'
patched_logger_critical.assert_called_once_with(msg)
def test_molecule_directory():
assert '/foo/bar/molecule' == config.molecule_directory('/foo/bar')
def test_molecule_file():
assert '/foo/bar/molecule.yml' == config.molecule_file('/foo/bar')
def test_molecule_drivers():
x = [
'azure',
'delegated',
'docker',
'ec2',
'gce',
'lxc',
'lxd',
'openstack',
'vagrant',
]
assert x == config.molecule_drivers()
def test_molecule_verifiers():
x = ['goss', 'inspec', 'testinfra']
assert x == config.molecule_verifiers()
def test_set_env_from_file(config_instance):
config_instance.args = {'env_file': '.env'}
contents = {
'foo': 'bar',
'BAZ': 'zzyzx',
}
env_file = config_instance.args.get('env_file')
util.write_file(env_file, util.safe_dump(contents))
env = config.set_env_from_file({}, env_file)
assert contents == env
def test_set_env_from_file_returns_original_env_when_env_file_not_found(
config_instance):
env = config.set_env_from_file({}, 'file-not-found')
assert {} == env
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
infra/templates/osdu-r2-resources/tests/integration/integration_test.go | // Copyright © Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"os"
"testing"
"github.com/gruntwork-io/terratest/modules/terraform"
cosmosIntegTests "github.com/microsoft/cobalt/infra/modules/providers/azure/cosmosdb/tests/integration"
sbIntegTests "github.com/microsoft/cobalt/infra/modules/providers/azure/service-bus/tests/integration"
storageIntegTests "github.com/microsoft/cobalt/infra/modules/providers/azure/storage-account/tests/integration"
esIntegTestConfig "github.com/microsoft/cobalt/infra/modules/providers/elastic/elastic-cloud-enterprise/tests"
esIntegTests "github.com/microsoft/cobalt/infra/modules/providers/elastic/elastic-cloud-enterprise/tests/integration"
"github.com/microsoft/cobalt/test-harness/infratests"
)
var subscription = os.Getenv("ARM_SUBSCRIPTION_ID")
var tfOptions = &terraform.Options{
TerraformDir: "../../",
BackendConfig: map[string]interface{}{
"storage_account_name": os.Getenv("TF_VAR_remote_state_account"),
"container_name": os.Getenv("TF_VAR_remote_state_container"),
},
}
// Runs a suite of test assertions to validate that a provisioned set of app services
// are fully funtional.
func TestAppSvcPlanSingleRegion(t *testing.T) {
esIntegTestConfig.ESVersion = "6.8.3"
testFixture := infratests.IntegrationTestFixture{
GoTest: t,
TfOptions: tfOptions,
ExpectedTfOutputCount: 28,
TfOutputAssertions: []infratests.TerraformOutputValidation{
verifyAppServiceConfig,
/* Now that we configured the services to run as Java containers via linux_fx_version,
we'll have to temporarily comment out the call to verifyAppServiceEndpointStatusCode...
The service(s) will be unresponsive until our Azure Pipeline deploys a jar
to the target app service. We'll remove the comment once our service CI/CD pipelines are in place.
verifyAppServiceEndpointStatusCode,
*/
verifyServicePrincipalRoleAssignments,
esIntegTests.ValidateElasticKvSecretValues("keyvault_secret_attributes", "elastic_cluster_properties"),
esIntegTests.CheckClusterHealth("elastic_cluster_properties"),
esIntegTests.CheckClusterVersion("elastic_cluster_properties"),
esIntegTests.CheckClusterIndexing("elastic_cluster_properties"),
storageIntegTests.InspectStorageAccount("storage_account", "storage_account_containers", "resource_group"),
sbIntegTests.VerifySubscriptionsList(subscription,
"resource_group",
"sb_namespace_name",
"sb_topics"),
cosmosIntegTests.InspectProvisionedCosmosDBAccount("resource_group", "cosmosdb_account_name", "cosmosdb_properties"),
},
}
infratests.RunIntegrationTests(&testFixture)
}
| [
"\"ARM_SUBSCRIPTION_ID\"",
"\"TF_VAR_remote_state_account\"",
"\"TF_VAR_remote_state_container\""
]
| []
| [
"ARM_SUBSCRIPTION_ID",
"TF_VAR_remote_state_account",
"TF_VAR_remote_state_container"
]
| [] | ["ARM_SUBSCRIPTION_ID", "TF_VAR_remote_state_account", "TF_VAR_remote_state_container"] | go | 3 | 0 | |
google/pubsub_v1/services/publisher/client.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import functools
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
from google.iam.v1 import policy_pb2 as policy # type: ignore
from google.pubsub_v1.services.publisher import pagers
from google.pubsub_v1.types import pubsub
import grpc
from .transports.base import PublisherTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import PublisherGrpcTransport
from .transports.grpc_asyncio import PublisherGrpcAsyncIOTransport
class PublisherClientMeta(type):
"""Metaclass for the Publisher client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[PublisherTransport]]
_transport_registry["grpc"] = PublisherGrpcTransport
_transport_registry["grpc_asyncio"] = PublisherGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[PublisherTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class PublisherClient(metaclass=PublisherClientMeta):
"""The service that an application uses to manipulate topics,
and to send messages to a topic.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/pubsub",
)
SERVICE_ADDRESS = "pubsub.googleapis.com:443"
"""The default address of the service."""
DEFAULT_ENDPOINT = "pubsub.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PublisherClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PublisherClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> PublisherTransport:
"""Return the transport used by the client instance.
Returns:
PublisherTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def schema_path(project: str, schema: str,) -> str:
"""Return a fully-qualified schema string."""
return "projects/{project}/schemas/{schema}".format(
project=project, schema=schema,
)
@staticmethod
def parse_schema_path(path: str) -> Dict[str, str]:
"""Parse a schema path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/schemas/(?P<schema>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def subscription_path(project: str, subscription: str,) -> str:
"""Return a fully-qualified subscription string."""
return "projects/{project}/subscriptions/{subscription}".format(
project=project, subscription=subscription,
)
@staticmethod
def parse_subscription_path(path: str) -> Dict[str, str]:
"""Parse a subscription path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/subscriptions/(?P<subscription>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def topic_path(project: str, topic: str,) -> str:
"""Return a fully-qualified topic string."""
return "projects/{project}/topics/{topic}".format(project=project, topic=topic,)
@staticmethod
def parse_topic_path(path: str) -> Dict[str, str]:
"""Parse a topic path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/topics/(?P<topic>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, PublisherTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the publisher client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, PublisherTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
client_cert_source_func = (
mtls.default_client_cert_source() if is_mtls else None
)
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, PublisherTransport):
# transport is a PublisherTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
emulator_host = os.environ.get("PUBSUB_EMULATOR_HOST")
if emulator_host:
if issubclass(Transport, type(self)._transport_registry["grpc"]):
channel = grpc.insecure_channel(target=emulator_host)
else:
channel = grpc.aio.insecure_channel(target=emulator_host)
Transport = functools.partial(Transport, channel=channel)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def create_topic(
self,
request: pubsub.Topic = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.Topic:
r"""Creates the given topic with the given name. See the [resource
name rules]
(https://cloud.google.com/pubsub/docs/admin#resource_names).
Args:
request (google.pubsub_v1.types.Topic):
The request object. A topic resource.
name (str):
Required. The name of the topic. It must have the format
``"projects/{project}/topics/{topic}"``. ``{topic}``
must start with a letter, and contain only letters
(``[A-Za-z]``), numbers (``[0-9]``), dashes (``-``),
underscores (``_``), periods (``.``), tildes (``~``),
plus (``+``) or percent signs (``%``). It must be
between 3 and 255 characters in length, and it must not
start with ``"goog"``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.Topic:
A topic resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.Topic.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.Topic):
request = pubsub.Topic(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_topic]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_topic(
self,
request: pubsub.UpdateTopicRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.Topic:
r"""Updates an existing topic. Note that certain
properties of a topic are not modifiable.
Args:
request (google.pubsub_v1.types.UpdateTopicRequest):
The request object. Request for the UpdateTopic method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.Topic:
A topic resource.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.UpdateTopicRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.UpdateTopicRequest):
request = pubsub.UpdateTopicRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_topic]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("topic.name", request.topic.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def publish(
self,
request: pubsub.PublishRequest = None,
*,
topic: str = None,
messages: Sequence[pubsub.PubsubMessage] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.PublishResponse:
r"""Adds one or more messages to the topic. Returns ``NOT_FOUND`` if
the topic does not exist.
Args:
request (google.pubsub_v1.types.PublishRequest):
The request object. Request for the Publish method.
topic (str):
Required. The messages in the request will be published
on this topic. Format is
``projects/{project}/topics/{topic}``.
This corresponds to the ``topic`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
messages (Sequence[google.pubsub_v1.types.PubsubMessage]):
Required. The messages to publish.
This corresponds to the ``messages`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.PublishResponse:
Response for the Publish method.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([topic, messages])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.PublishRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.PublishRequest):
request = pubsub.PublishRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if topic is not None:
request.topic = topic
if messages is not None:
request.messages = messages
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.publish]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("topic", request.topic),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_topic(
self,
request: pubsub.GetTopicRequest = None,
*,
topic: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.Topic:
r"""Gets the configuration of a topic.
Args:
request (google.pubsub_v1.types.GetTopicRequest):
The request object. Request for the GetTopic method.
topic (str):
Required. The name of the topic to get. Format is
``projects/{project}/topics/{topic}``.
This corresponds to the ``topic`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.Topic:
A topic resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([topic])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.GetTopicRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.GetTopicRequest):
request = pubsub.GetTopicRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if topic is not None:
request.topic = topic
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_topic]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("topic", request.topic),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_topics(
self,
request: pubsub.ListTopicsRequest = None,
*,
project: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTopicsPager:
r"""Lists matching topics.
Args:
request (google.pubsub_v1.types.ListTopicsRequest):
The request object. Request for the `ListTopics` method.
project (str):
Required. The name of the project in which to list
topics. Format is ``projects/{project-id}``.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.services.publisher.pagers.ListTopicsPager:
Response for the ListTopics method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.ListTopicsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.ListTopicsRequest):
request = pubsub.ListTopicsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_topics]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("project", request.project),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTopicsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def list_topic_subscriptions(
self,
request: pubsub.ListTopicSubscriptionsRequest = None,
*,
topic: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTopicSubscriptionsPager:
r"""Lists the names of the attached subscriptions on this
topic.
Args:
request (google.pubsub_v1.types.ListTopicSubscriptionsRequest):
The request object. Request for the
`ListTopicSubscriptions` method.
topic (str):
Required. The name of the topic that subscriptions are
attached to. Format is
``projects/{project}/topics/{topic}``.
This corresponds to the ``topic`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.services.publisher.pagers.ListTopicSubscriptionsPager:
Response for the ListTopicSubscriptions method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([topic])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.ListTopicSubscriptionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.ListTopicSubscriptionsRequest):
request = pubsub.ListTopicSubscriptionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if topic is not None:
request.topic = topic
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_topic_subscriptions]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("topic", request.topic),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTopicSubscriptionsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def list_topic_snapshots(
self,
request: pubsub.ListTopicSnapshotsRequest = None,
*,
topic: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTopicSnapshotsPager:
r"""Lists the names of the snapshots on this topic. Snapshots are
used in
`Seek <https://cloud.google.com/pubsub/docs/replay-overview>`__
operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages
in an existing subscription to the state captured by a snapshot.
Args:
request (google.pubsub_v1.types.ListTopicSnapshotsRequest):
The request object. Request for the `ListTopicSnapshots`
method.
topic (str):
Required. The name of the topic that snapshots are
attached to. Format is
``projects/{project}/topics/{topic}``.
This corresponds to the ``topic`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.services.publisher.pagers.ListTopicSnapshotsPager:
Response for the ListTopicSnapshots method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([topic])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.ListTopicSnapshotsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.ListTopicSnapshotsRequest):
request = pubsub.ListTopicSnapshotsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if topic is not None:
request.topic = topic
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_topic_snapshots]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("topic", request.topic),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTopicSnapshotsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_topic(
self,
request: pubsub.DeleteTopicRequest = None,
*,
topic: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the topic with the given name. Returns ``NOT_FOUND`` if
the topic does not exist. After a topic is deleted, a new topic
may be created with the same name; this is an entirely new topic
with none of the old configuration or subscriptions. Existing
subscriptions to this topic are not deleted, but their ``topic``
field is set to ``_deleted-topic_``.
Args:
request (google.pubsub_v1.types.DeleteTopicRequest):
The request object. Request for the `DeleteTopic`
method.
topic (str):
Required. Name of the topic to delete. Format is
``projects/{project}/topics/{topic}``.
This corresponds to the ``topic`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([topic])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.DeleteTopicRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.DeleteTopicRequest):
request = pubsub.DeleteTopicRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if topic is not None:
request.topic = topic
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_topic]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("topic", request.topic),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def detach_subscription(
self,
request: pubsub.DetachSubscriptionRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.DetachSubscriptionResponse:
r"""Detaches a subscription from this topic. All messages retained
in the subscription are dropped. Subsequent ``Pull`` and
``StreamingPull`` requests will return FAILED_PRECONDITION. If
the subscription is a push subscription, pushes to the endpoint
will stop.
Args:
request (google.pubsub_v1.types.DetachSubscriptionRequest):
The request object. Request for the DetachSubscription
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.DetachSubscriptionResponse:
Response for the DetachSubscription
method. Reserved for future use.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.DetachSubscriptionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.DetachSubscriptionRequest):
request = pubsub.DetachSubscriptionRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.detach_subscription]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("subscription", request.subscription),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def set_iam_policy(
self,
request: iam_policy.SetIamPolicyRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy.Policy:
r"""Sets the IAM access control policy on the specified
function. Replaces any existing policy.
Args:
request (:class:`~.iam_policy.SetIamPolicyRequest`):
The request object. Request message for `SetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.policy.Policy:
Defines an Identity and Access Management (IAM) policy.
It is used to specify access control policies for Cloud
Platform resources.
A ``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members`` to a single
``role``. Members can be user accounts, service
accounts, Google groups, and domains (such as G Suite).
A ``role`` is a named list of permissions (defined by
IAM or configured by users). A ``binding`` can
optionally specify a ``condition``, which is a logic
expression that further constrains the role binding
based on attributes about the request and/or target
resource.
**JSON Example**::
{
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:[email protected]",
"group:[email protected]",
"domain:google.com",
"serviceAccount:[email protected]"
]
},
{
"role": "roles/resourcemanager.organizationViewer",
"members": ["user:[email protected]"],
"condition": {
"title": "expirable access",
"description": "Does not grant access after Sep 2020",
"expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')",
}
}
]
}
**YAML Example**::
bindings:
- members:
- user:[email protected]
- group:[email protected]
- domain:google.com
- serviceAccount:[email protected]
role: roles/resourcemanager.organizationAdmin
- members:
- user:[email protected]
role: roles/resourcemanager.organizationViewer
condition:
title: expirable access
description: Does not grant access after Sep 2020
expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the `IAM
developer's
guide <https://cloud.google.com/iam/docs>`__.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy.SetIamPolicyRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.set_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_iam_policy(
self,
request: iam_policy.GetIamPolicyRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy.Policy:
r"""Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does
not have a policy set.
Args:
request (:class:`~.iam_policy.GetIamPolicyRequest`):
The request object. Request message for `GetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.policy.Policy:
Defines an Identity and Access Management (IAM) policy.
It is used to specify access control policies for Cloud
Platform resources.
A ``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members`` to a single
``role``. Members can be user accounts, service
accounts, Google groups, and domains (such as G Suite).
A ``role`` is a named list of permissions (defined by
IAM or configured by users). A ``binding`` can
optionally specify a ``condition``, which is a logic
expression that further constrains the role binding
based on attributes about the request and/or target
resource.
**JSON Example**::
{
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:[email protected]",
"group:[email protected]",
"domain:google.com",
"serviceAccount:[email protected]"
]
},
{
"role": "roles/resourcemanager.organizationViewer",
"members": ["user:[email protected]"],
"condition": {
"title": "expirable access",
"description": "Does not grant access after Sep 2020",
"expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')",
}
}
]
}
**YAML Example**::
bindings:
- members:
- user:[email protected]
- group:[email protected]
- domain:google.com
- serviceAccount:[email protected]
role: roles/resourcemanager.organizationAdmin
- members:
- user:[email protected]
role: roles/resourcemanager.organizationViewer
condition:
title: expirable access
description: Does not grant access after Sep 2020
expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the `IAM
developer's
guide <https://cloud.google.com/iam/docs>`__.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy.GetIamPolicyRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def test_iam_permissions(
self,
request: iam_policy.TestIamPermissionsRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy.TestIamPermissionsResponse:
r"""Tests the specified permissions against the IAM access control
policy for a function. If the function does not exist, this will
return an empty set of permissions, not a NOT_FOUND error.
Args:
request (:class:`~.iam_policy.TestIamPermissionsRequest`):
The request object. Request message for
`TestIamPermissions` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.iam_policy.TestIamPermissionsResponse:
Response message for ``TestIamPermissions`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy.TestIamPermissionsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.test_iam_permissions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
client_library_version=pkg_resources.get_distribution(
"google-cloud-pubsub",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("PublisherClient",)
| []
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT",
"PUBSUB_EMULATOR_HOST",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT", "PUBSUB_EMULATOR_HOST", "GOOGLE_API_USE_CLIENT_CERTIFICATE"] | python | 3 | 0 | |
students/K33401/Do_Thien/Lr2/django_project_do/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project_do.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
nnunet/training/data_augmentation/default_data_augmentation.py | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import deepcopy
import numpy as np
from batchgenerators.dataloading import MultiThreadedAugmenter
from batchgenerators.transforms import DataChannelSelectionTransform, SegChannelSelectionTransform, SpatialTransform, \
GammaTransform, MirrorTransform, Compose
from batchgenerators.transforms.utility_transforms import RemoveLabelTransform, RenameTransform, NumpyToTensor
from nnunet.training.data_augmentation.custom_transforms import Convert3DTo2DTransform, Convert2DTo3DTransform, \
MaskTransform, ConvertSegmentationToRegionsTransform
from nnunet.training.data_augmentation.pyramid_augmentations import MoveSegAsOneHotToData, \
ApplyRandomBinaryOperatorTransform, \
RemoveRandomConnectedComponentFromOneHotEncodingTransform
try:
from batchgenerators.dataloading.nondet_multi_threaded_augmenter import NonDetMultiThreadedAugmenter
except ImportError as ie:
NonDetMultiThreadedAugmenter = None
default_3D_augmentation_params = {
"selected_data_channels": None,
"selected_seg_channels": None,
"do_elastic": True,
"elastic_deform_alpha": (0., 900.),
"elastic_deform_sigma": (9., 13.),
"p_eldef": 0.2,
"do_scaling": True,
"scale_range": (0.85, 1.25),
"independent_scale_factor_for_each_axis": False,
"p_independent_scale_per_axis": 1,
"p_scale": 0.2,
"do_rotation": True,
"rotation_x": (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),
"rotation_y": (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),
"rotation_z": (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),
"rotation_p_per_axis": 1,
"p_rot": 0.2,
"random_crop": False,
"random_crop_dist_to_border": None,
"do_gamma": True,
"gamma_retain_stats": True,
"gamma_range": (0.7, 1.5),
"p_gamma": 0.3,
"do_mirror": True,
"mirror_axes": (0, 1, 2),
"dummy_2D": False,
"mask_was_used_for_normalization": False,
"border_mode_data": "constant",
"all_segmentation_labels": None, # used for cascade
"move_last_seg_chanel_to_data": False, # used for cascade
"cascade_do_cascade_augmentations": False, # used for cascade
"cascade_random_binary_transform_p": 0.4,
"cascade_random_binary_transform_p_per_label": 1,
"cascade_random_binary_transform_size": (1, 8),
"cascade_remove_conn_comp_p": 0.2,
"cascade_remove_conn_comp_max_size_percent_threshold": 0.15,
"cascade_remove_conn_comp_fill_with_other_class_p": 0.0,
"do_additive_brightness": False,
"additive_brightness_p_per_sample": 0.15,
"additive_brightness_p_per_channel": 0.5,
"additive_brightness_mu": 0.0,
"additive_brightness_sigma": 0.1,
"num_threads": 12 if 'nnUNet_n_proc_DA' not in os.environ else int(os.environ['nnUNet_n_proc_DA']),
"num_cached_per_thread": 1,
}
default_2D_augmentation_params = deepcopy(default_3D_augmentation_params)
default_2D_augmentation_params["elastic_deform_alpha"] = (0., 200.)
default_2D_augmentation_params["elastic_deform_sigma"] = (9., 13.)
default_2D_augmentation_params["rotation_x"] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi)
default_2D_augmentation_params["rotation_y"] = (-0. / 360 * 2. * np.pi, 0. / 360 * 2. * np.pi)
default_2D_augmentation_params["rotation_z"] = (-0. / 360 * 2. * np.pi, 0. / 360 * 2. * np.pi)
# sometimes you have 3d data and a 3d net but cannot augment them properly in 3d due to anisotropy (which is currently
# not supported in batchgenerators). In that case you can 'cheat' and transfer your 3d data into 2d data and
# transform them back after augmentation
default_2D_augmentation_params["dummy_2D"] = False
default_2D_augmentation_params["mirror_axes"] = (0, 1) # this can be (0, 1, 2) if dummy_2D=True
def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range):
if isinstance(rot_x, (tuple, list)):
rot_x = max(np.abs(rot_x))
if isinstance(rot_y, (tuple, list)):
rot_y = max(np.abs(rot_y))
if isinstance(rot_z, (tuple, list)):
rot_z = max(np.abs(rot_z))
rot_x = min(90 / 360 * 2. * np.pi, rot_x)
rot_y = min(90 / 360 * 2. * np.pi, rot_y)
rot_z = min(90 / 360 * 2. * np.pi, rot_z)
from batchgenerators.augmentations.utils import rotate_coords_3d, rotate_coords_2d
coords = np.array(final_patch_size)
final_shape = np.copy(coords)
if len(coords) == 3:
final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, rot_x, 0, 0)), final_shape)), 0)
final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, rot_y, 0)), final_shape)), 0)
final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, 0, rot_z)), final_shape)), 0)
elif len(coords) == 2:
final_shape = np.max(np.vstack((np.abs(rotate_coords_2d(coords, rot_x)), final_shape)), 0)
final_shape /= min(scale_range)
return final_shape.astype(int)
def get_default_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,
border_val_seg=-1, pin_memory=True,
seeds_train=None, seeds_val=None, regions=None):
assert params.get('mirror') is None, "old version of params, use new keyword do_mirror"
tr_transforms = []
if params.get("selected_data_channels") is not None:
tr_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
tr_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
# don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!
if params.get("dummy_2D") is not None and params.get("dummy_2D"):
tr_transforms.append(Convert3DTo2DTransform())
tr_transforms.append(SpatialTransform(
patch_size, patch_center_dist_from_border=None, do_elastic_deform=params.get("do_elastic"),
alpha=params.get("elastic_deform_alpha"), sigma=params.get("elastic_deform_sigma"),
do_rotation=params.get("do_rotation"), angle_x=params.get("rotation_x"), angle_y=params.get("rotation_y"),
angle_z=params.get("rotation_z"), do_scale=params.get("do_scaling"), scale=params.get("scale_range"),
border_mode_data=params.get("border_mode_data"), border_cval_data=0, order_data=3, border_mode_seg="constant",
border_cval_seg=border_val_seg,
order_seg=1, random_crop=params.get("random_crop"), p_el_per_sample=params.get("p_eldef"),
p_scale_per_sample=params.get("p_scale"), p_rot_per_sample=params.get("p_rot"),
independent_scale_for_each_axis=params.get("independent_scale_factor_for_each_axis")
))
if params.get("dummy_2D") is not None and params.get("dummy_2D"):
tr_transforms.append(Convert2DTo3DTransform())
if params.get("do_gamma"):
tr_transforms.append(
GammaTransform(params.get("gamma_range"), False, True, retain_stats=params.get("gamma_retain_stats"),
p_per_sample=params["p_gamma"]))
if params.get("do_mirror"):
tr_transforms.append(MirrorTransform(params.get("mirror_axes")))
if params.get("mask_was_used_for_normalization") is not None:
mask_was_used_for_normalization = params.get("mask_was_used_for_normalization")
tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0))
tr_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"):
tr_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data'))
if params.get("cascade_do_cascade_augmentations") and not None and params.get(
"cascade_do_cascade_augmentations"):
tr_transforms.append(ApplyRandomBinaryOperatorTransform(
channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)),
p_per_sample=params.get("cascade_random_binary_transform_p"),
key="data",
strel_size=params.get("cascade_random_binary_transform_size")))
tr_transforms.append(RemoveRandomConnectedComponentFromOneHotEncodingTransform(
channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)),
key="data",
p_per_sample=params.get("cascade_remove_conn_comp_p"),
fill_with_other_class_p=params.get("cascade_remove_conn_comp_max_size_percent_threshold"),
dont_do_if_covers_more_than_X_percent=params.get("cascade_remove_conn_comp_fill_with_other_class_p")))
tr_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
tr_transforms = Compose(tr_transforms)
# from batchgenerators.dataloading import SingleThreadedAugmenter
# batchgenerator_train = SingleThreadedAugmenter(dataloader_train, tr_transforms)
# import IPython;IPython.embed()
batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),
params.get("num_cached_per_thread"), seeds=seeds_train,
pin_memory=pin_memory)
val_transforms = []
val_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("selected_data_channels") is not None:
val_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
val_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"):
val_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data'))
val_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
val_transforms = Compose(val_transforms)
# batchgenerator_val = SingleThreadedAugmenter(dataloader_val, val_transforms)
batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1),
params.get("num_cached_per_thread"), seeds=seeds_val,
pin_memory=pin_memory)
return batchgenerator_train, batchgenerator_val
if __name__ == "__main__":
from nnunet.training.dataloading.dataset_loading import DataLoader3D, load_dataset
from nnunet.paths import preprocessing_output_dir
import os
import pickle
t = "Task002_Heart"
p = os.path.join(preprocessing_output_dir, t)
dataset = load_dataset(p, 0)
with open(os.path.join(p, "plans.pkl"), 'rb') as f:
plans = pickle.load(f)
basic_patch_size = get_patch_size(np.array(plans['stage_properties'][0].patch_size),
default_3D_augmentation_params['rotation_x'],
default_3D_augmentation_params['rotation_y'],
default_3D_augmentation_params['rotation_z'],
default_3D_augmentation_params['scale_range'])
dl = DataLoader3D(dataset, basic_patch_size, np.array(plans['stage_properties'][0].patch_size).astype(int), 1)
tr, val = get_default_augmentation(dl, dl, np.array(plans['stage_properties'][0].patch_size).astype(int))
| []
| []
| [
"nnUNet_n_proc_DA"
]
| [] | ["nnUNet_n_proc_DA"] | python | 1 | 0 | |
main.go | //(C) Copyright [2020] Hewlett Packard Enterprise Development LP
//
//Licensed under the Apache License, Version 2.0 (the "License"); you may
//not use this file except in compliance with the License. You may obtain
//a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
//WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
//License for the specific language governing permissions and limitations
// under the License.
package main
import (
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"os"
"strconv"
"strings"
"time"
dmtfmodel "github.com/ODIM-Project/ODIM/lib-dmtf/model"
dc "github.com/ODIM-Project/ODIM/lib-messagebus/datacommunicator"
"github.com/ODIM-Project/ODIM/lib-utilities/common"
lutilconf "github.com/ODIM-Project/ODIM/lib-utilities/config"
"github.com/ODIM-Project/PluginCiscoACI/capdata"
"github.com/ODIM-Project/PluginCiscoACI/caphandler"
"github.com/ODIM-Project/PluginCiscoACI/capmessagebus"
"github.com/ODIM-Project/PluginCiscoACI/capmiddleware"
"github.com/ODIM-Project/PluginCiscoACI/capmodel"
"github.com/ODIM-Project/PluginCiscoACI/caputilities"
"github.com/ODIM-Project/PluginCiscoACI/config"
"github.com/ODIM-Project/PluginCiscoACI/db"
"github.com/ciscoecosystem/aci-go-client/models"
iris "github.com/kataras/iris/v12"
uuid "github.com/satori/go.uuid"
"github.com/sirupsen/logrus"
)
var subscriptionInfo []capmodel.Device
var log = logrus.New()
// TokenObject will contains the generated token and public key of odimra
type TokenObject struct {
AuthToken string `json:"authToken"`
PublicKey []byte `json:"publicKey"`
}
func main() {
// verifying the uid of the user
if uid := os.Geteuid(); uid == 0 {
log.Fatal("Plugin Service should not be run as the root user")
}
if err := config.SetConfiguration(); err != nil {
log.Fatal("while reading from config, PluginCiscoACI got" + err.Error())
}
if err := dc.SetConfiguration(config.Data.MessageBusConf.MessageQueueConfigFilePath); err != nil {
log.Fatal("while trying to set messagebus configuration, PluginCiscoACI got: " + err.Error())
}
// CreateJobQueue defines the queue which will act as an infinite buffer
// In channel is an entry or input channel and the Out channel is an exit or output channel
caphandler.In, caphandler.Out = common.CreateJobQueue()
// RunReadWorkers will create a worker pool for doing a specific task
// which is passed to it as Publish method after reading the data from the channel.
go common.RunReadWorkers(caphandler.Out, capmessagebus.Publish, 1)
intializeACIData()
configFilePath := os.Getenv("PLUGIN_CONFIG_FILE_PATH")
if configFilePath == "" {
log.Fatal("No value get the environment variable PLUGIN_CONFIG_FILE_PATH")
}
// TrackConfigFileChanges monitors the config changes using fsnotfiy
go caputilities.TrackConfigFileChanges(configFilePath)
intializePluginStatus()
app()
}
func app() {
app := routers()
go func() {
eventsrouters()
}()
conf := &lutilconf.HTTPConfig{
Certificate: &config.Data.KeyCertConf.Certificate,
PrivateKey: &config.Data.KeyCertConf.PrivateKey,
CACertificate: &config.Data.KeyCertConf.RootCACertificate,
ServerAddress: config.Data.PluginConf.Host,
ServerPort: config.Data.PluginConf.Port,
}
pluginServer, err := conf.GetHTTPServerObj()
if err != nil {
log.Fatal("while initializing plugin server, PluginCiscoACI got: " + err.Error())
}
app.Run(iris.Server(pluginServer))
}
func routers() *iris.Application {
app := iris.New()
app.WrapRouter(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
path := r.URL.Path
if len(path) > 1 && path[len(path)-1] == '/' && path[len(path)-2] != '/' {
path = path[:len(path)-1]
r.RequestURI = path
r.URL.Path = path
}
next(w, r)
})
pluginRoutes := app.Party("/ODIM/v1")
pluginRoutes.Post("/validate", capmiddleware.BasicAuth, caphandler.Validate)
pluginRoutes.Post("/Sessions", caphandler.CreateSession)
pluginRoutes.Post("/Subscriptions", capmiddleware.BasicAuth, caphandler.CreateEventSubscription)
pluginRoutes.Delete("/Subscriptions", capmiddleware.BasicAuth, caphandler.DeleteEventSubscription)
pluginRoutes.Get("/Status", capmiddleware.BasicAuth, caphandler.GetPluginStatus)
pluginRoutes.Post("/Startup", capmiddleware.BasicAuth, caphandler.GetPluginStartup)
pluginRoutes.Get("/Chassis", capmiddleware.BasicAuth, caphandler.GetChassisCollection)
pluginRoutes.Get("/Chassis/{id}", capmiddleware.BasicAuth, caphandler.GetChassis)
pluginRoutes.Patch("/Chassis/{id}", capmiddleware.BasicAuth, caphandler.ChassisMethodNotAllowed)
pluginRoutes.Delete("/Chassis/{id}", capmiddleware.BasicAuth, caphandler.ChassisMethodNotAllowed)
fabricRoutes := pluginRoutes.Party("/Fabrics", capmiddleware.BasicAuth)
fabricRoutes.Get("/", caphandler.GetFabricResource)
fabricRoutes.Get("/{id}", caphandler.GetFabricData)
fabricRoutes.Get("/{id}/Switches", caphandler.GetSwitchCollection)
fabricRoutes.Get("/{id}/Switches/{rid}", caphandler.GetSwitchInfo)
fabricRoutes.Get("/{id}/Switches/{switchID}/Ports", caphandler.GetPortCollection)
fabricRoutes.Get("/{id}/Switches/{switchID}/Ports/{portID}", caphandler.GetPortInfo)
fabricRoutes.Patch("/{id}/Switches/{switchID}/Ports/{portID}", caphandler.PatchPort)
fabricRoutes.Get("/{id}/Zones", caphandler.GetZones)
fabricRoutes.Post("/{id}/Zones", caphandler.CreateZone)
fabricRoutes.Get("/{id}/Zones/{rid}", caphandler.GetZone)
fabricRoutes.Delete("/{id}/Zones/{rid}", caphandler.DeleteZone)
fabricRoutes.Patch("/{id}/Zones/{rid}", caphandler.UpdateZoneData)
fabricRoutes.Get("/{id}/AddressPools", caphandler.GetAddressPoolCollection)
fabricRoutes.Post("/{id}/AddressPools", caphandler.CreateAddressPool)
fabricRoutes.Get("/{id}/AddressPools/{rid}", caphandler.GetAddressPoolInfo)
fabricRoutes.Delete("/{id}/AddressPools/{rid}", caphandler.DeleteAddressPoolInfo)
fabricRoutes.Get("/{id}/Endpoints", caphandler.GetEndpointCollection)
fabricRoutes.Post("/{id}/Endpoints", caphandler.CreateEndpoint)
fabricRoutes.Get("/{id}/Endpoints/{rid}", caphandler.GetEndpointInfo)
fabricRoutes.Delete("/{id}/Endpoints/{rid}", caphandler.DeleteEndpointInfo)
managers := pluginRoutes.Party("/Managers")
managers.Get("/", caphandler.GetManagersCollection)
managers.Get("/{id}", caphandler.GetManagersInfo)
taskmon := pluginRoutes.Party("/taskmon")
taskmon.Get("/{TaskID}", caphandler.GetTaskMonitor)
task := pluginRoutes.Party("/TaskService")
task.Get("/", caphandler.GetTaskService)
task.Get("/Tasks", caphandler.GetTaskService)
task.Get("/Tasks/{TaskID}", caphandler.GetTaskService)
task.Get("/Tasks/{TaskID}/SubTasks", caphandler.GetTaskService)
task.Get("/Tasks/{TaskID}/SubTasks/{subTaskID}", caphandler.GetTaskService)
task.Delete("/Tasks/{TaskID}", caphandler.GetTaskService)
return app
}
func eventsrouters() {
app := iris.New()
app.Post(config.Data.EventConf.DestURI, caphandler.RedfishEvents)
conf := &lutilconf.HTTPConfig{
Certificate: &config.Data.KeyCertConf.Certificate,
PrivateKey: &config.Data.KeyCertConf.PrivateKey,
CACertificate: &config.Data.KeyCertConf.RootCACertificate,
ServerAddress: config.Data.EventConf.ListenerHost,
ServerPort: config.Data.EventConf.ListenerPort,
}
evtServer, err := conf.GetHTTPServerObj()
if err != nil {
log.Fatal("while initializing event server, PluginCiscoACI got: " + err.Error())
}
app.Run(iris.Server(evtServer))
}
// intializePluginStatus sets plugin status
func intializePluginStatus() {
caputilities.Status.Available = "yes"
caputilities.Status.Uptime = time.Now().Format(time.RFC3339)
go sendStartupEvent()
}
// intializeACIData reads required fabric,switch and port data from aci and stored it in the data store
func intializeACIData() {
aciNodesData, err := caputilities.GetFabricNodeData()
if err != nil {
log.Fatal("while intializing ACI Data PluginCiscoACI got: " + err.Error())
}
for _, aciNodeData := range aciNodesData {
switchID := uuid.NewV4().String() + ":" + aciNodeData.NodeId
fabricID := config.Data.RootServiceUUID + ":" + aciNodeData.FabricId
fabricExists := true
fabricData, err := capmodel.GetFabric(fabricID)
if err != nil {
if errors.Is(err, db.ErrorKeyNotFound) {
fabricExists = false
data := &capdata.Fabric{
SwitchData: []string{
switchID,
},
PodID: aciNodeData.PodId,
}
if err := capmodel.SaveFabric(fabricID, data); err != nil {
log.Fatal("storing " + fabricID + " fabric failed with " + err.Error())
}
} else {
log.Fatal("fetching " + fabricID + " fabric failed with " + err.Error())
}
}
if !checkSwitchIDExists(fabricData.SwitchData, aciNodeData.NodeId) {
if fabricExists {
fabricData.SwitchData = append(fabricData.SwitchData, switchID)
fabricData.PodID = aciNodeData.PodId
if err := capmodel.UpdateFabric(fabricID, &fabricData); err != nil {
log.Fatal("updating " + fabricID + " fabric failed with " + err.Error())
}
}
switchData, chassisData := getSwitchData(fabricID, aciNodeData, switchID)
if err := capmodel.SaveSwitchChassis(chassisData.ID, chassisData); err != nil {
log.Fatal("storing " + chassisData.ID + " chassis failed with " + err.Error())
}
if err := capmodel.SaveSwitch(switchID, switchData); err != nil {
log.Fatal("storing " + switchID + " switch failed with " + err.Error())
}
// adding logic to collect the ports data
portData, err := caputilities.GetPortData(aciNodeData.PodId, aciNodeData.NodeId)
if err != nil {
log.Fatal("while intializing ACI Port Data PluginCiscoACI got: " + err.Error())
}
parsePortData(portData, switchID, fabricID)
}
}
// TODO:
// registering the for the aci events
return
}
// parsePortData parses the portData and stores it in the inmemory
func parsePortData(portResponseData *capmodel.PortCollectionResponse, switchID, fabricID string) {
var portData []string
for _, imdata := range portResponseData.IMData {
portAttributes := imdata.PhysicalInterface.Attributes
id := portAttributes["id"].(string)
id = strings.Replace(id, "/", "-", -1)
portID := uuid.NewV4().String() + ":" + id
portData = append(portData, portID)
portInfo := dmtfmodel.Port{
ODataContext: "/ODIM/v1/$metadata#Port.Port",
ODataType: "#Port.v1_3_0.Port",
ODataID: fmt.Sprintf("/ODIM/v1/Fabrics/%s/Switches/%s/Ports/%s", fabricID, switchID, portID),
ID: portID,
Name: "Port-" + portAttributes["id"].(string),
PortID: portAttributes["id"].(string),
PortProtocol: "Ethernet",
PortType: "BidirectionalPort",
LinkNetworkTechnology: "Ethernet",
}
mtu, err := strconv.Atoi(portAttributes["mtu"].(string))
if err != nil {
log.Error("Unable to get mtu for the port" + portID)
}
portInfo.MaxFrameSize = mtu
if err = capmodel.SavePort(portInfo.ODataID, &portInfo); err != nil {
log.Fatal("storing " + portInfo.ODataID + " port failed with " + err.Error())
}
}
if err := capmodel.SaveSwitchPort(switchID, portData); err != nil {
log.Fatal("storing port data of switch " + switchID + " failed with " + err.Error())
}
}
func getSwitchData(fabricID string, fabricNodeData *models.FabricNodeMember, switchID string) (*dmtfmodel.Switch, *dmtfmodel.Chassis) {
switchUUIDData := strings.Split(switchID, ":")
var switchData = dmtfmodel.Switch{
ODataContext: "/ODIM/v1/$metadata#Switch.Switch",
ODataType: "#Switch.v1_4_0.Switch",
ODataID: "/ODIM/v1/Fabrics/" + fabricID + "/Switches/" + switchID,
ID: switchID,
Name: fabricNodeData.Name,
SwitchType: "Ethernet",
UUID: switchUUIDData[0],
SerialNumber: fabricNodeData.Serial,
}
podID, err := strconv.Atoi(fabricNodeData.PodId)
if err != nil {
log.Fatal("Converstion of PODID" + fabricNodeData.PodId + " failed")
}
nodeID, err := strconv.Atoi(fabricNodeData.NodeId)
if err != nil {
log.Fatal("Converstion of NodeID" + fabricNodeData.NodeId + " failed")
}
log.Info("Getting the switchData for NodeID" + fabricNodeData.NodeId)
switchRespData, err := caputilities.GetSwitchInfo(podID, nodeID)
if err != nil {
log.Fatal("Unable to get the Switch info:" + err.Error())
}
switchData.FirmwareVersion = switchRespData.SystemAttributes.Version
switchChassisData, healthChassisData, err := caputilities.GetSwitchChassisInfo(fabricNodeData.PodId, fabricNodeData.NodeId)
if err != nil {
log.Fatal("Unable to get the Switch Chassis info for node " + fabricNodeData.NodeId + " :" + err.Error())
}
switchData.Manufacturer = switchChassisData.IMData[0].SwitchChassisData.Attributes["vendor"].(string)
switchData.Model = switchChassisData.IMData[0].SwitchChassisData.Attributes["model"].(string)
chassisID := switchChassisData.IMData[0].SwitchChassisData.Attributes["id"].(string)
chassisUUID := uuid.NewV4().String()
var chassisHealth string
//take health value
data := healthChassisData.IMData[0].HealthData.Attributes
currentHealthValue := data["cur"].(string)
healthValue, err := strconv.Atoi(currentHealthValue)
if err != nil {
log.Fatal("Unable to convert current Health value:" + currentHealthValue + " go the error" + err.Error())
}
if healthValue > 90 {
chassisHealth = "OK"
} else if healthValue <= 90 && healthValue < 30 {
chassisHealth = "Warning"
} else {
chassisHealth = "Critical"
}
var chassisData = dmtfmodel.Chassis{
Ocontext: "/ODIM/v1/$metadata#Chassis.Chassis",
Otype: "#Chassis.v1_4_0.Chassis",
Oid: "/ODIM/v1/Chassis/" + chassisUUID + ":" + chassisID,
ID: chassisUUID + ":" + chassisID,
Name: fabricNodeData.Name + "_chassis",
ChassisType: "RackMount",
UUID: chassisUUID,
SerialNumber: switchChassisData.IMData[0].SwitchChassisData.Attributes["ser"].(string),
Manufacturer: switchChassisData.IMData[0].SwitchChassisData.Attributes["vendor"].(string),
Model: switchChassisData.IMData[0].SwitchChassisData.Attributes["model"].(string),
PowerState: switchChassisData.IMData[0].SwitchChassisData.Attributes["operSt"].(string),
Status: &dmtfmodel.Status{
State: "Enabled",
Health: chassisHealth,
},
Links: &dmtfmodel.Links{
Switches: []*dmtfmodel.Link{
&dmtfmodel.Link{
Oid: switchData.ODataID,
},
},
},
}
switchData.Links = &dmtfmodel.SwitchLinks{
Chassis: &dmtfmodel.Link{
Oid: chassisData.Oid,
},
}
return &switchData, &chassisData
}
func checkSwitchIDExists(switchIDs []string, nodeID string) (exists bool) {
for _, switchid := range switchIDs {
if strings.HasSuffix(switchid, ":"+nodeID) {
return true
}
}
return false
}
// sendStartupEvent is for sending startup event
func sendStartupEvent() {
// grace wait time for plugin to be functional
time.Sleep(3 * time.Second)
var pluginIP string
if pluginIP = os.Getenv("ASSIGNED_POD_IP"); pluginIP == "" {
pluginIP = config.Data.PluginConf.Host
}
startupEvt := common.PluginStatusEvent{
Name: "Plugin startup event",
Type: "PluginStarted",
Timestamp: time.Now().String(),
OriginatorID: pluginIP,
}
request, _ := json.Marshal(startupEvt)
event := common.Events{
IP: net.JoinHostPort(config.Data.PluginConf.Host, config.Data.PluginConf.Port),
Request: request,
EventType: "PluginStartUp",
}
done := make(chan bool)
events := []interface{}{event}
go common.RunWriteWorkers(caphandler.In, events, 1, done)
log.Info("successfully sent startup event")
}
| [
"\"PLUGIN_CONFIG_FILE_PATH\"",
"\"ASSIGNED_POD_IP\""
]
| []
| [
"ASSIGNED_POD_IP",
"PLUGIN_CONFIG_FILE_PATH"
]
| [] | ["ASSIGNED_POD_IP", "PLUGIN_CONFIG_FILE_PATH"] | go | 2 | 0 | |
functionaltests/utils.py | import os
from subprocess import check_call, check_output
CODE_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "source"))
ROOT_PATH = os.path.abspath(os.path.join(CODE_PATH, "../.."))
def run_python(python_binary, command, extra_args=(), output=False,
additional_env={}):
"""
Run a Python program.
Returns output if output=True, in which case stderr will cause error.
"""
args = [python_binary, os.path.join(CODE_PATH, command)] + list(extra_args)
if output:
command = check_output
else:
command = check_call
env = os.environ.copy()
env.update(additional_env)
return command(args, env=env)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
disquest-postgres.py | # --------- Notes ---------
# This version uses PostgreSQL as the backend database
# The reason why is that SQLite3 locks up very fast and is not recommended for cogs like these, where high read/write speeds are key
# Make sure to have an PostgreSQL server running, and a database called "disquest"
import asyncio
import math
import os
import random
import discord
import uvloop
from discord.commands import slash_command
from discord.ext import commands
from dotenv import load_dotenv
from sqlalchemy import (BigInteger, Column, Integer, MetaData, Sequence, Table,
func, select)
from sqlalchemy.ext.asyncio import create_async_engine
load_dotenv()
# Make sure to create an .env file and add the env values
Password = os.getenv("Postgres_Password")
IP = os.getenv("Postgres_Server_IP")
Username = os.getenv("Postgres_Username")
class disaccount:
def __init__(self, ctx):
self.id = ctx.author.id
self.gid = ctx.guild.id
async def getxp(self):
meta = MetaData()
engine = create_async_engine(
f"postgresql+asyncpg://{Username}:{Password}@{IP}:5432/disquest"
)
users = Table(
"users",
meta,
Column(
"tracking_id",
Integer,
Sequence("tracking_id"),
primary_key=True,
autoincrement=True,
),
Column("id", BigInteger),
Column("gid", BigInteger),
Column("xp", Integer),
)
async with engine.connect() as conn:
s = select(users.c.xp).where(
users.c.id == self.id, users.c.gid == self.gid)
results = await conn.execute(s)
results_fetched = results.fetchone()
if results_fetched is None:
insert_new = users.insert().values(xp=0, id=self.id, gid=self.gid)
await conn.execute(insert_new)
else:
for row in results_fetched:
return row
async def setxp(self, xp):
meta = MetaData()
engine = create_async_engine(
f"postgresql+asyncpg://{Username}:{Password}@{IP}:5432/rin-disquest"
)
users = Table(
"users",
meta,
Column(
"tracking_id",
Integer,
Sequence("tracking_id"),
primary_key=True,
autoincrement=True,
),
Column("id", BigInteger),
Column("gid", BigInteger),
Column("xp", Integer),
)
async with engine.begin() as conn:
update_values = (
users.update()
.values(xp=xp)
.filter(users.c.id == self.id)
.filter(users.c.gid == self.gid)
)
await conn.execute(update_values)
async def addxp(self, offset):
pxp = await self.getxp()
pxp += offset
await self.setxp(pxp)
class lvl:
def near(xp):
return round(xp / 100)
def next(xp):
return math.ceil(xp / 100)
def cur(xp):
return int(xp / 100)
class DisQuest(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(
name="mylvl",
description="Displays your activity level!",
guild_ids=[866199405090308116],
)
async def mylvl(self, ctx):
user = disaccount(ctx)
xp = await user.getxp()
embedVar = discord.Embed(color=discord.Color.from_rgb(255, 217, 254))
embedVar.add_field(
name="User", value=f"{ctx.author.mention}", inline=True)
embedVar.add_field(name="LVL", value=f"{lvl.cur(xp)}", inline=True)
embedVar.add_field(
name="XP", value=f"{xp}/{lvl.next(xp)*100}", inline=True)
await ctx.respond(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class DisQuestV2(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(
name="rank", help="Displays the most active members of your server!"
)
@slash_command(
name="rank",
description="Displays the most active members of your server!",
guild_ids=[866199405090308116],
)
async def rank(self, ctx):
gid = ctx.guild.id
meta = MetaData()
engine = create_async_engine(
f"postgresql+asyncpg://{Username}:{Password}@{IP}:5432/rin-disquest"
)
users = Table(
"users",
meta,
Column(
"tracking_id",
Integer,
Sequence("tracking_id"),
primary_key=True,
autoincrement=True,
),
Column("id", BigInteger),
Column("gid", BigInteger),
Column("xp", Integer),
)
async with engine.connect() as conn:
s = (
select(Column("id", BigInteger), Column("xp", Integer))
.where(users.c.gid == gid)
.order_by(users.c.xp.desc())
)
results = await conn.execute(s)
members = list(results.fetchall())
for i, mem in enumerate(members):
members[
i
] = f"{i}. {(await self.bot.fetch_user(mem[0])).name} | XP. {mem[1]}\n"
embedVar = discord.Embed(
color=discord.Color.from_rgb(254, 255, 217))
embedVar.description = f"**Server Rankings**\n{''.join(members)}"
await ctx.respond(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class DisQuestV3(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(
name="globalrank",
description="Displays the most active members of all servers that this bot is connected to!",
guild_ids=[866199405090308116],
)
async def grank(self, ctx):
meta = MetaData()
engine = create_async_engine(
f"postgresql+asyncpg://{Username}:{Password}@{IP}:5432/rin-disquest"
)
users = Table(
"users",
meta,
Column(
"tracking_id",
Integer,
Sequence("tracking_id"),
primary_key=True,
autoincrement=True,
),
Column("id", BigInteger),
Column("gid", BigInteger),
Column("xp", Integer),
)
async with engine.connect() as conn:
s = (
select(Column("id", Integer), func.sum(
users.c.xp).label("txp"))
.group_by(users.c.id)
.group_by(users.c.xp)
.order_by(users.c.xp.desc())
.limit(10)
)
results = await conn.execute(s)
results_fetched = results.fetchall()
members = list(results_fetched)
for i, mem in enumerate(members):
members[
i
] = f"{i}. {(await self.bot.fetch_user(mem[0])).name} | XP. {mem[1]}\n"
embedVar = discord.Embed(
color=discord.Color.from_rgb(217, 255, 251))
embedVar.description = f"**Global Rankings**\n{''.join(members)}"
await ctx.respond(embed=embedVar)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class DisQuestV4(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, ctx):
if ctx.author.bot:
return
user = disaccount(ctx)
reward = random.randint(0, 20)
await user.addxp(reward)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
def setup(bot):
bot.add_cog(DisQuest(bot))
bot.add_cog(DisQuestV2(bot))
bot.add_cog(DisQuestV3(bot))
bot.add_cog(DisQuestV4(bot)) | []
| []
| [
"Postgres_Password",
"Postgres_Server_IP",
"Postgres_Username"
]
| [] | ["Postgres_Password", "Postgres_Server_IP", "Postgres_Username"] | python | 3 | 0 | |
test/e2e_test.go | //
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build e2e
// +build e2e
package test
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http/httptest"
"net/url"
"os"
"path"
"path/filepath"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/registry"
"github.com/google/go-containerregistry/pkg/v1/random"
"github.com/google/go-containerregistry/pkg/v1/remote"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/sigstore/cosign/cmd/cosign/cli"
"github.com/sigstore/cosign/cmd/cosign/cli/attach"
"github.com/sigstore/cosign/cmd/cosign/cli/attest"
"github.com/sigstore/cosign/cmd/cosign/cli/download"
"github.com/sigstore/cosign/cmd/cosign/cli/generate"
"github.com/sigstore/cosign/cmd/cosign/cli/options"
"github.com/sigstore/cosign/cmd/cosign/cli/publickey"
"github.com/sigstore/cosign/cmd/cosign/cli/sign"
"github.com/sigstore/cosign/cmd/cosign/cli/upload"
cliverify "github.com/sigstore/cosign/cmd/cosign/cli/verify"
"github.com/sigstore/cosign/pkg/cosign"
"github.com/sigstore/cosign/pkg/cosign/kubernetes"
cremote "github.com/sigstore/cosign/pkg/cosign/remote"
ociremote "github.com/sigstore/cosign/pkg/oci/remote"
"github.com/sigstore/cosign/pkg/sget"
sigs "github.com/sigstore/cosign/pkg/signature"
"github.com/sigstore/sigstore/pkg/signature/payload"
)
const (
serverEnv = "REKOR_SERVER"
rekorURL = "https://rekor.sigstore.dev"
)
var keyPass = []byte("hello")
var passFunc = func(_ bool) ([]byte, error) {
return keyPass, nil
}
var verify = func(keyRef, imageRef string, checkClaims bool, annotations map[string]interface{}, attachment string) error {
cmd := cliverify.VerifyCommand{
KeyRef: keyRef,
RekorURL: rekorURL,
CheckClaims: checkClaims,
Annotations: sigs.AnnotationsMap{Annotations: annotations},
Attachment: attachment,
}
args := []string{imageRef}
return cmd.Exec(context.Background(), args)
}
func TestSignVerify(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// So should download
mustErr(download.SignatureCmd(ctx, options.RegistryOptions{}, imgName), t)
// Now sign the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify and download should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
must(download.SignatureCmd(ctx, options.RegistryOptions{}, imgName), t)
// Look for a specific annotation
mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}, ""), t)
// Sign the image with an annotation
annotations := map[string]interface{}{"foo": "bar"}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, annotations, []string{imgName}, "", true, "", false, false, ""), t)
// It should match this time.
must(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}, ""), t)
// But two doesn't work
mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar", "baz": "bat"}, ""), t)
}
func TestSignVerifyClean(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, _ = mkimage(t, imgName)
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Now sign the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify and download should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
must(download.SignatureCmd(ctx, options.RegistryOptions{}, imgName), t)
// Now clean signature from the given image
must(cli.CleanCmd(ctx, options.RegistryOptions{}, imgName), t)
// It doesn't work
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
}
func TestAttestVerify(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-attest-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
verifyAttestation := cliverify.VerifyAttestationCommand{
KeyRef: pubKeyPath,
}
// Fail case when using without type and policy flag
mustErr(verifyAttestation.Exec(ctx, []string{imgName}), t)
slsaAttestation := `{ "builder": { "id": "2" }, "recipe": {} }`
slsaAttestationPath := filepath.Join(td, "attestation.slsa.json")
if err := os.WriteFile(slsaAttestationPath, []byte(slsaAttestation), 0600); err != nil {
t.Fatal(err)
}
// Now attest the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(attest.AttestCmd(ctx, ko, options.RegistryOptions{}, imgName, "", false, slsaAttestationPath, false,
"custom", time.Duration(30*time.Second)), t)
// Use cue to verify attestation
policyPath := filepath.Join(td, "policy.cue")
verifyAttestation.PredicateType = "slsaprovenance"
verifyAttestation.Policies = []string{policyPath}
// Fail case
cuePolicy := `builder: id: "1"`
if err := os.WriteFile(policyPath, []byte(cuePolicy), 0600); err != nil {
t.Fatal(err)
}
// Success case
cuePolicy = `builder: id: "2"`
if err := os.WriteFile(policyPath, []byte(cuePolicy), 0600); err != nil {
t.Fatal(err)
}
must(verifyAttestation.Exec(ctx, []string{imgName}), t)
// Look for a specific annotation
mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}, ""), t)
}
func TestBundle(t *testing.T) {
// turn on the tlog
defer setenv(t, options.ExperimentalEnv, "1")()
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
ko := sign.KeyOpts{
KeyRef: privKeyPath,
PassFunc: passFunc,
RekorURL: rekorURL,
}
// Sign the image
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Make sure verify works
must(verify(pubKeyPath, imgName, true, nil, ""), t)
// Make sure offline verification works with bundling
// use rekor prod since we have hardcoded the public key
os.Setenv(serverEnv, "notreal")
must(verify(pubKeyPath, imgName, true, nil, ""), t)
}
func TestDuplicateSign(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
ref, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// So should download
mustErr(download.SignatureCmd(ctx, options.RegistryOptions{}, imgName), t)
// Now sign the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify and download should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
must(download.SignatureCmd(ctx, options.RegistryOptions{}, imgName), t)
// Signing again should work just fine...
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
se, err := ociremote.SignedEntity(ref, ociremote.WithRemoteOptions(registryClientOpts(ctx)...))
must(err, t)
sigs, err := se.Signatures()
must(err, t)
signatures, err := sigs.Get()
must(err, t)
if len(signatures) > 1 {
t.Errorf("expected there to only be one signature, got %v", signatures)
}
}
func TestKeyURLVerify(t *testing.T) {
// TODO: re-enable once distroless images are being signed by the new client
t.Skip()
// Verify that an image can be verified via key url
keyRef := "https://raw.githubusercontent.com/GoogleContainerTools/distroless/main/cosign.pub"
img := "gcr.io/distroless/base:latest"
must(verify(keyRef, img, true, nil, ""), t)
}
func TestGenerateKeyPairEnvVar(t *testing.T) {
defer setenv(t, "COSIGN_PASSWORD", "foo")()
keys, err := cosign.GenerateKeyPair(generate.GetPass)
if err != nil {
t.Fatal(err)
}
if _, err := cosign.LoadECDSAPrivateKey(keys.PrivateBytes, []byte("foo")); err != nil {
t.Fatal(err)
}
}
func TestGenerateKeyPairK8s(t *testing.T) {
td := t.TempDir()
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if err := os.Chdir(td); err != nil {
t.Fatal(err)
}
defer func() {
os.Chdir(wd)
}()
password := "foo"
defer setenv(t, "COSIGN_PASSWORD", password)()
ctx := context.Background()
name := "cosign-secret"
namespace := "default"
if err := kubernetes.KeyPairSecret(ctx, fmt.Sprintf("k8s://%s/%s", namespace, name), generate.GetPass); err != nil {
t.Fatal(err)
}
// make sure the secret actually exists
client, err := kubernetes.Client()
if err != nil {
t.Fatal(err)
}
s, err := client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if v, ok := s.Data["cosign.password"]; !ok || string(v) != password {
t.Fatalf("password is incorrect, got %v expected %v", v, "foo")
}
}
func TestMultipleSignatures(t *testing.T) {
repo, stop := reg(t)
defer stop()
td1 := t.TempDir()
td2 := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, priv1, pub1 := keypair(t, td1)
_, priv2, pub2 := keypair(t, td2)
ctx := context.Background()
// Verify should fail at first for both keys
mustErr(verify(pub1, imgName, true, nil, ""), t)
mustErr(verify(pub2, imgName, true, nil, ""), t)
// Now sign the image with one key
ko := sign.KeyOpts{KeyRef: priv1, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify should work with that one, but not the other
must(verify(pub1, imgName, true, nil, ""), t)
mustErr(verify(pub2, imgName, true, nil, ""), t)
// Now sign with the other key too
ko.KeyRef = priv2
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify should work with both
must(verify(pub1, imgName, true, nil, ""), t)
must(verify(pub2, imgName, true, nil, ""), t)
}
func TestSignBlob(t *testing.T) {
blob := "someblob"
td1 := t.TempDir()
td2 := t.TempDir()
t.Cleanup(func() {
os.RemoveAll(td1)
os.RemoveAll(td2)
})
bp := filepath.Join(td1, blob)
if err := os.WriteFile(bp, []byte(blob), 0644); err != nil {
t.Fatal(err)
}
_, privKeyPath1, pubKeyPath1 := keypair(t, td1)
_, _, pubKeyPath2 := keypair(t, td2)
ctx := context.Background()
ko1 := sign.KeyOpts{
KeyRef: pubKeyPath1,
}
ko2 := sign.KeyOpts{
KeyRef: pubKeyPath2,
}
// Verify should fail on a bad input
mustErr(cliverify.VerifyBlobCmd(ctx, ko1, "", "badsig", blob), t)
mustErr(cliverify.VerifyBlobCmd(ctx, ko2, "", "badsig", blob), t)
// Now sign the blob with one key
ko := sign.KeyOpts{
KeyRef: privKeyPath1,
PassFunc: passFunc,
}
sig, err := sign.SignBlobCmd(ctx, ko, options.RegistryOptions{}, bp, true, "", time.Duration(30*time.Second))
if err != nil {
t.Fatal(err)
}
// Now verify should work with that one, but not the other
must(cliverify.VerifyBlobCmd(ctx, ko1, "", string(sig), bp), t)
mustErr(cliverify.VerifyBlobCmd(ctx, ko2, "", string(sig), bp), t)
}
func TestGenerate(t *testing.T) {
repo, stop := reg(t)
defer stop()
imgName := path.Join(repo, "cosign-e2e")
_, desc, cleanup := mkimage(t, imgName)
defer cleanup()
// Generate the payload for the image, and check the digest.
b := bytes.Buffer{}
must(generate.GenerateCmd(context.Background(), options.RegistryOptions{}, imgName, nil, &b), t)
ss := payload.SimpleContainerImage{}
must(json.Unmarshal(b.Bytes(), &ss), t)
equals(desc.Digest.String(), ss.Critical.Image.DockerManifestDigest, t)
// Now try with some annotations.
b.Reset()
a := map[string]interface{}{"foo": "bar"}
must(generate.GenerateCmd(context.Background(), options.RegistryOptions{}, imgName, a, &b), t)
must(json.Unmarshal(b.Bytes(), &ss), t)
equals(desc.Digest.String(), ss.Critical.Image.DockerManifestDigest, t)
equals(ss.Optional["foo"], "bar", t)
}
func keypair(t *testing.T, td string) (*cosign.Keys, string, string) {
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if err := os.Chdir(td); err != nil {
t.Fatal(err)
}
defer func() {
os.Chdir(wd)
}()
keys, err := cosign.GenerateKeyPair(passFunc)
if err != nil {
t.Fatal(err)
}
privKeyPath := filepath.Join(td, "cosign.key")
if err := os.WriteFile(privKeyPath, keys.PrivateBytes, 0600); err != nil {
t.Fatal(err)
}
pubKeyPath := filepath.Join(td, "cosign.pub")
if err := os.WriteFile(pubKeyPath, keys.PublicBytes, 0600); err != nil {
t.Fatal(err)
}
return keys, privKeyPath, pubKeyPath
}
func TestUploadDownload(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
ctx := context.Background()
testCases := map[string]struct {
signature string
signatureType attach.SignatureArgType
expectedErr bool
}{
"file containing signature": {
signature: "testsignaturefile",
signatureType: attach.FileSignature,
expectedErr: false,
},
"raw signature as argument": {
signature: "testsignatureraw",
signatureType: attach.RawSignature,
expectedErr: false,
},
"empty signature as argument": {
signature: "",
signatureType: attach.RawSignature,
expectedErr: true,
},
}
imgName := path.Join(repo, "cosign-e2e")
for testName, testCase := range testCases {
t.Run(testName, func(t *testing.T) {
ref, _, cleanup := mkimage(t, imgName)
payload := "testpayload"
payloadPath := mkfile(payload, td, t)
signature := base64.StdEncoding.EncodeToString([]byte(testCase.signature))
var sigRef string
if testCase.signatureType == attach.FileSignature {
sigRef = mkfile(signature, td, t)
} else {
sigRef = signature
}
// Upload it!
err := attach.SignatureCmd(ctx, options.RegistryOptions{}, sigRef, payloadPath, imgName)
if testCase.expectedErr {
mustErr(err, t)
} else {
must(err, t)
}
// Now download it!
se, err := ociremote.SignedEntity(ref, ociremote.WithRemoteOptions(registryClientOpts(ctx)...))
must(err, t)
sigs, err := se.Signatures()
must(err, t)
signatures, err := sigs.Get()
must(err, t)
if testCase.expectedErr {
if len(signatures) != 0 {
t.Fatalf("unexpected signatures %d, wanted 0", len(signatures))
}
} else {
if len(signatures) != 1 {
t.Fatalf("unexpected signatures %d, wanted 1", len(signatures))
}
if b64sig, err := signatures[0].Base64Signature(); err != nil {
t.Fatalf("Base64Signature() = %v", err)
} else if diff := cmp.Diff(b64sig, signature); diff != "" {
t.Error(diff)
}
if p, err := signatures[0].Payload(); err != nil {
t.Fatalf("Payload() = %v", err)
} else if diff := cmp.Diff(p, []byte(payload)); diff != "" {
t.Error(diff)
}
}
// Now delete it!
cleanup()
})
}
}
func TestUploadBlob(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
ctx := context.Background()
imgName := path.Join(repo, "/cosign-upload-e2e")
payload := "testpayload"
payloadPath := mkfile(payload, td, t)
// Upload it!
files := []cremote.File{cremote.FileFromFlag(payloadPath)}
must(upload.BlobCmd(ctx, options.RegistryOptions{}, files, "", imgName), t)
// Check it
ref, err := name.ParseReference(imgName)
if err != nil {
t.Fatal(err)
}
// Now download it with sget (this should fail by tag)
if err := sget.New(imgName, "", os.Stdout).Do(ctx); err == nil {
t.Error("expected download to fail")
}
img, err := remote.Image(ref)
if err != nil {
t.Fatal(err)
}
dgst, err := img.Digest()
if err != nil {
t.Fatal(err)
}
result := &bytes.Buffer{}
// But pass by digest
if err := sget.New(imgName+"@"+dgst.String(), "", result).Do(ctx); err != nil {
t.Fatal(err)
}
b, err := io.ReadAll(result)
if err != nil {
t.Fatal(err)
}
if string(b) != payload {
t.Errorf("expected contents to be %s, got %s", payload, string(b))
}
}
func TestAttachSBOM(t *testing.T) {
repo, stop := reg(t)
defer stop()
ctx := context.Background()
imgName := path.Join(repo, "sbom-image")
img, _, cleanup := mkimage(t, imgName)
defer cleanup()
out := bytes.Buffer{}
_, err := download.SBOMCmd(ctx, options.RegistryOptions{}, img.Name(), &out)
if err == nil {
t.Fatal("Expected error")
}
t.Log(out.String())
out.Reset()
// Upload it!
must(attach.SBOMCmd(ctx, options.RegistryOptions{}, "./testdata/bom-go-mod.spdx", "spdx", imgName), t)
sboms, err := download.SBOMCmd(ctx, options.RegistryOptions{}, imgName, &out)
if err != nil {
t.Fatal(err)
}
t.Log(out.String())
if len(sboms) != 1 {
t.Fatalf("Expected one sbom, got %d", len(sboms))
}
want, err := os.ReadFile("./testdata/bom-go-mod.spdx")
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(string(want), sboms[0]); diff != "" {
t.Errorf("diff: %s", diff)
}
// Generate key pairs to sign the sbom
td1 := t.TempDir()
td2 := t.TempDir()
_, privKeyPath1, pubKeyPath1 := keypair(t, td1)
_, _, pubKeyPath2 := keypair(t, td2)
// Verify should fail on a bad input
mustErr(verify(pubKeyPath1, imgName, true, nil, "sbom"), t)
mustErr(verify(pubKeyPath2, imgName, true, nil, "sbom"), t)
// Now sign the sbom with one key
ko1 := sign.KeyOpts{KeyRef: privKeyPath1, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko1, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", false, false, "sbom"), t)
// Now verify should work with that one, but not the other
must(verify(pubKeyPath1, imgName, true, nil, "sbom"), t)
mustErr(verify(pubKeyPath2, imgName, true, nil, "sbom"), t)
}
func setenv(t *testing.T, k, v string) func() {
if err := os.Setenv(k, v); err != nil {
t.Fatalf("error setitng env: %v", err)
}
return func() {
os.Unsetenv(k)
}
}
func TestTlog(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// Now sign the image without the tlog
ko := sign.KeyOpts{
KeyRef: privKeyPath,
PassFunc: passFunc,
RekorURL: rekorURL,
}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
// Now we turn on the tlog!
defer setenv(t, options.ExperimentalEnv, "1")()
// Verify shouldn't work since we haven't put anything in it yet.
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// Sign again with the tlog env var on
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// And now verify works!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
}
func TestGetPublicKeyCustomOut(t *testing.T) {
td := t.TempDir()
keys, privKeyPath, _ := keypair(t, td)
ctx := context.Background()
outFile := "output.pub"
outPath := filepath.Join(td, outFile)
outWriter, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE, 0600)
must(err, t)
pk := publickey.Pkopts{
KeyRef: privKeyPath,
}
must(publickey.GetPublicKey(ctx, pk, publickey.NamedWriter{Name: outPath, Writer: outWriter}, passFunc), t)
output, err := os.ReadFile(outPath)
must(err, t)
equals(keys.PublicBytes, output, t)
}
func mkfile(contents, td string, t *testing.T) string {
f, err := os.CreateTemp(td, "")
if err != nil {
t.Fatal(err)
}
defer f.Close()
if _, err := f.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
return f.Name()
}
func mkimage(t *testing.T, n string) (name.Reference, *remote.Descriptor, func()) {
ref, err := name.ParseReference(n, name.WeakValidation)
if err != nil {
t.Fatal(err)
}
img, err := random.Image(512, 5)
if err != nil {
t.Fatal(err)
}
regClientOpts := registryClientOpts(context.Background())
if err := remote.Write(ref, img, regClientOpts...); err != nil {
t.Fatal(err)
}
remoteImage, err := remote.Get(ref, regClientOpts...)
if err != nil {
t.Fatal(err)
}
cleanup := func() {
_ = remote.Delete(ref, regClientOpts...)
ref, _ := ociremote.SignatureTag(ref.Context().Digest(remoteImage.Descriptor.Digest.String()), ociremote.WithRemoteOptions(regClientOpts...))
_ = remote.Delete(ref, regClientOpts...)
}
return ref, remoteImage, cleanup
}
func must(err error, t *testing.T) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
func mustErr(err error, t *testing.T) {
t.Helper()
if err == nil {
t.Fatal("expected error")
}
}
func equals(v1, v2 interface{}, t *testing.T) {
if diff := cmp.Diff(v1, v2); diff != "" {
t.Error(diff)
}
}
func reg(t *testing.T) (string, func()) {
repo := os.Getenv("COSIGN_TEST_REPO")
if repo != "" {
return repo, func() {}
}
t.Log("COSIGN_TEST_REPO unset, using fake registry")
r := httptest.NewServer(registry.New())
u, err := url.Parse(r.URL)
if err != nil {
t.Fatal(err)
}
return u.Host, r.Close
}
func registryClientOpts(ctx context.Context) []remote.Option {
return []remote.Option{
remote.WithAuthFromKeychain(authn.DefaultKeychain),
remote.WithContext(ctx),
}
}
| [
"\"COSIGN_TEST_REPO\""
]
| []
| [
"COSIGN_TEST_REPO"
]
| [] | ["COSIGN_TEST_REPO"] | go | 1 | 0 | |
examples/v2/ws-update-order/main.go | package main
import (
"log"
"os"
"time"
"context"
"github.com/bitfinexcom/bitfinex-api-go/v2"
"github.com/bitfinexcom/bitfinex-api-go/v2/websocket"
)
func SubmitTestOrder(c *websocket.Client) {
log.Printf("Submitting new order")
err := c.SubmitOrder(context.Background(), &bitfinex.OrderNewRequest{
Symbol: "tBTCUSD",
CID: time.Now().Unix() / 1000,
Amount: 0.02,
Type: "EXCHANGE LIMIT",
Price: 5000,
})
if err != nil {
log.Fatal(err)
}
}
func UpdateTestOrder(orderId int64, c *websocket.Client) {
log.Printf("Updating order")
err := c.SubmitUpdateOrder(context.Background(), &bitfinex.OrderUpdateRequest{
ID: orderId,
Amount: 0.04,
})
if err != nil {
log.Fatal(err)
}
}
func main() {
key := os.Getenv("BFX_KEY")
secret := os.Getenv("BFX_SECRET")
p := websocket.NewDefaultParameters()
p.URL = "wss://test.bitfinex.com/ws/2"
c := websocket.NewWithParams(p).Credentials(key, secret)
err := c.Connect()
if err != nil {
log.Fatalf("connecting authenticated websocket: %s", err)
}
defer c.Close()
// Begin listening to incoming messages
for obj := range c.Listen() {
switch obj.(type) {
case error:
log.Fatalf("channel closed: %s", obj)
break
case *websocket.AuthEvent:
// on authorize create new order
SubmitTestOrder(c)
case *bitfinex.OrderNew:
// new order received so update it
id := obj.(*bitfinex.OrderNew).ID
UpdateTestOrder(id, c)
default:
log.Printf("MSG RECV: %#v", obj)
}
}
time.Sleep(time.Second * 10)
}
| [
"\"BFX_KEY\"",
"\"BFX_SECRET\""
]
| []
| [
"BFX_SECRET",
"BFX_KEY"
]
| [] | ["BFX_SECRET", "BFX_KEY"] | go | 2 | 0 | |
Master/tools/build.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# build.py — build a sketch using arduino-builder
#
# Wrapper script around arduino-builder which accepts some ESP8266-specific
# options and translates them into FQBN
#
# Copyright © 2016 Ivan Grokhotkov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
#
from __future__ import print_function
import sys
import os
import argparse
import subprocess
import tempfile
import shutil
def compile(tmp_dir, sketch, tools_dir, hardware_dir, ide_path, f, args):
cmd = ide_path + '/arduino-builder '
cmd += '-compile -logger=human '
cmd += '-build-path "' + tmp_dir + '" '
cmd += '-tools "' + ide_path + '/tools-builder" '
if args.library_path:
for lib_dir in args.library_path:
cmd += '-libraries "' + lib_dir + '" '
cmd += '-hardware "' + ide_path + '/hardware" '
if args.hardware_dir:
for hw_dir in args.hardware_dir:
cmd += '-hardware "' + hw_dir + '" '
else:
cmd += '-hardware "' + hardware_dir + '" '
# Debug=Serial,DebugLevel=Core____
cmd += '-fqbn=esp8266com:esp8266:{board_name}:' \
'CpuFrequency={cpu_freq},' \
'FlashFreq={flash_freq},' \
'FlashMode={flash_mode},' \
'UploadSpeed=921600,' \
'FlashSize={flash_size},' \
'ResetMethod=nodemcu'.format(**vars(args))
if args.debug_port and args.debug_level:
cmd += 'Debug={debug_port},DebugLevel={debug_level}'.format(**vars(args))
cmd += ' '
cmd += '-ide-version=10607 '
cmd += '-warnings={warnings} '.format(**vars(args))
if args.verbose:
cmd += '-verbose '
cmd += sketch
if args.verbose:
print('Building: ' + cmd, file=f)
cmds = cmd.split(' ')
p = subprocess.Popen(cmds, stdout=f, stderr=subprocess.STDOUT)
p.wait()
return p.returncode
def parse_args():
parser = argparse.ArgumentParser(description='Sketch build helper')
parser.add_argument('-v', '--verbose', help='Enable verbose output',
action='store_true')
parser.add_argument('-i', '--ide_path', help='Arduino IDE path')
parser.add_argument('-p', '--build_path', help='Build directory')
parser.add_argument('-l', '--library_path', help='Additional library path',
action='append')
parser.add_argument('-d', '--hardware_dir', help='Additional hardware path',
action='append')
parser.add_argument('-b', '--board_name', help='Board name', default='generic')
parser.add_argument('-s', '--flash_size', help='Flash size', default='512K64',
choices=['512K0', '512K64', '1M512', '4M1M', '4M3M'])
parser.add_argument('-f', '--cpu_freq', help='CPU frequency', default=80,
choices=[80, 160], type=int)
parser.add_argument('-m', '--flash_mode', help='Flash mode', default='qio',
choices=['dio', 'qio'])
parser.add_argument('-w', '--warnings', help='Compilation warnings level',
default='none', choices=['none', 'all', 'more'])
parser.add_argument('-o', '--output_binary', help='File name for output binary')
parser.add_argument('-k', '--keep', action='store_true',
help='Don\'t delete temporary build directory')
parser.add_argument('--flash_freq', help='Flash frequency', default=40,
type=int, choices=[40, 80])
parser.add_argument('--debug_port', help='Debug port',
choices=['Serial', 'Serial1'])
parser.add_argument('--debug_level', help='Debug level')
parser.add_argument('sketch_path', help='Sketch file path')
return parser.parse_args()
def main():
args = parse_args()
ide_path = args.ide_path
if not ide_path:
ide_path = os.environ.get('ARDUINO_IDE_PATH')
if not ide_path:
print("Please specify Arduino IDE path via --ide_path option"
"or ARDUINO_IDE_PATH environment variable.", file=sys.stderr)
return 2
sketch_path = args.sketch_path
tmp_dir = args.build_path
created_tmp_dir = False
if not tmp_dir:
tmp_dir = tempfile.mkdtemp()
created_tmp_dir = True
tools_dir = os.path.dirname(os.path.realpath(__file__)) + '/../tools'
# this is not the correct hardware folder to add.
hardware_dir = os.path.dirname(os.path.realpath(__file__)) + '/../cores'
output_name = tmp_dir + '/' + os.path.basename(sketch_path) + '.bin'
if args.verbose:
print("Sketch: ", sketch_path)
print("Build dir: ", tmp_dir)
print("Output: ", output_name)
if args.verbose:
f = sys.stdout
else:
f = open(tmp_dir + '/build.log', 'w')
res = compile(tmp_dir, sketch_path, tools_dir, hardware_dir, ide_path, f, args)
if res != 0:
return res
if args.output_binary is not None:
shutil.copy(output_name, args.output_binary)
if created_tmp_dir and not args.keep:
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
sys.exit(main())
| []
| []
| [
"ARDUINO_IDE_PATH"
]
| [] | ["ARDUINO_IDE_PATH"] | python | 1 | 0 | |
testgrid/tgrun/pkg/runner/loop.go | package runner
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"time"
"github.com/pkg/errors"
tghandlers "github.com/replicatedhq/kurl/testgrid/tgapi/pkg/handlers"
"github.com/replicatedhq/kurl/testgrid/tgrun/pkg/runner/types"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"kubevirt.io/client-go/kubecli"
)
var lastScheduledInstance = time.Now().Add(-time.Minute)
const Namespace = "default"
func MainRunLoop(runnerOptions types.RunnerOptions) error {
fmt.Println("beginning main run loop")
tempDir, err := ioutil.TempDir("", "")
if err != nil {
return errors.Wrap(err, "failed to create temp dir")
}
defer os.RemoveAll(tempDir)
for {
if err := CleanUpVMIs(); err != nil {
fmt.Println("VMI clean up ERROR: ", err)
}
if err := CleanUpData(); err != nil {
fmt.Println("PV clean up ERROR: ", err)
}
canSchedule, err := canScheduleNewVM()
if err != nil {
return errors.Wrap(err, "failed to check if can schedule")
}
if !canSchedule {
time.Sleep(time.Second * 15)
continue
}
// hit the API and get the next
resp, err := http.DefaultClient.Get(fmt.Sprintf("%s/v1/dequeue/instance", runnerOptions.APIEndpoint))
if err != nil {
return errors.Wrap(err, "failed to get next run")
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return errors.Wrap(err, "failed to read body")
}
dequeueInstanceResponse := []tghandlers.DequeueInstanceResponse{}
if err := json.Unmarshal(body, &dequeueInstanceResponse); err != nil {
return errors.Wrapf(err, "failed to unmarshal: %s", body)
}
if len(dequeueInstanceResponse) == 0 {
time.Sleep(time.Second * 15)
continue
}
lastScheduledInstance = time.Now()
uploadProxyURL, err := getUploadProxyURL()
if err != nil {
return errors.Wrap(err, "failed to get upload proxy url")
}
for _, dequeuedInstance := range dequeueInstanceResponse {
singleTest := types.SingleRun{
ID: dequeuedInstance.ID,
OperatingSystemName: dequeuedInstance.OperatingSystemName,
OperatingSystemVersion: dequeuedInstance.OperatingSystemVersion,
OperatingSystemImage: dequeuedInstance.OperatingSystemImage,
PVCName: fmt.Sprintf("%s-disk", dequeuedInstance.ID),
KurlYAML: dequeuedInstance.KurlYAML,
KurlURL: dequeuedInstance.KurlURL,
KurlRef: dequeuedInstance.KurlRef,
TestGridAPIEndpoint: runnerOptions.APIEndpoint,
DockerEmail: os.Getenv("DOCKERHUB_EMAIL"),
DockerUser: os.Getenv("DOCKERHUB_USER"),
DockerPass: os.Getenv("DOCKERHUB_PASS"),
}
if err := Run(singleTest, uploadProxyURL, tempDir); err != nil {
return errors.Wrap(err, "failed to run test")
}
}
}
}
// canScheduleVM will return a boolean indicating if
// the current cluster can handle scheduling another
// test instance at this time
func canScheduleNewVM() (bool, error) {
clientset, err := GetClientset()
if err != nil {
return false, errors.Wrap(err, "failed to get clientset")
}
pods, err := clientset.CoreV1().Pods(Namespace).List(metav1.ListOptions{})
if err != nil {
return false, errors.Wrap(err, "failed to get pods in the default namespace")
}
// if there are pending pods, hold off until there are no longer pending pods
for _, pod := range pods.Items {
if pod.Status.Phase == v1.PodPending {
return false, nil
}
}
return true, nil
}
func getUploadProxyURL() (string, error) {
clientset, err := GetClientset()
if err != nil {
return "", errors.Wrap(err, "failed to get clientset")
}
svc, err := clientset.CoreV1().Services("cdi").Get("cdi-uploadproxy", metav1.GetOptions{})
if err != nil {
return "", errors.Wrap(err, "failed to get upload proxy service")
}
return fmt.Sprintf("https://%s", svc.Spec.ClusterIP), nil
}
func GetRestConfig() (*restclient.Config, error) {
kubeconfig := filepath.Join(homeDir(), ".kube", "config")
if os.Getenv("KUBECONFIG") != "" {
kubeconfig = os.Getenv("KUBECONFIG")
}
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, errors.Wrap(err, "failed to build config")
}
return config, nil
}
func GetClientset() (*kubernetes.Clientset, error) {
config, err := GetRestConfig()
if err != nil {
return nil, errors.Wrap(err, "failed to get restconfig")
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, errors.Wrap(err, "failed to create clientset")
}
return clientset, nil
}
func GetKubevirtClientset() (kubecli.KubevirtClient, error) {
config, err := GetRestConfig()
if err != nil {
return nil, errors.Wrap(err, "failed to get restconfig")
}
virtClient, err := kubecli.GetKubevirtClientFromRESTConfig(config)
if err != nil {
return nil, errors.Wrap(err, "failed to create kubevirt clientset")
}
return virtClient, nil
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
| [
"\"DOCKERHUB_EMAIL\"",
"\"DOCKERHUB_USER\"",
"\"DOCKERHUB_PASS\"",
"\"KUBECONFIG\"",
"\"KUBECONFIG\"",
"\"HOME\"",
"\"USERPROFILE\""
]
| []
| [
"DOCKERHUB_EMAIL",
"DOCKERHUB_PASS",
"KUBECONFIG",
"USERPROFILE",
"HOME",
"DOCKERHUB_USER"
]
| [] | ["DOCKERHUB_EMAIL", "DOCKERHUB_PASS", "KUBECONFIG", "USERPROFILE", "HOME", "DOCKERHUB_USER"] | go | 6 | 0 | |
cmd/drone-server/server.go | // Copyright 2018 Drone.IO Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"crypto/tls"
"errors"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"golang.org/x/crypto/acme/autocert"
"golang.org/x/sync/errgroup"
"github.com/cncd/logging"
"github.com/cncd/pipeline/pipeline/rpc/proto"
"github.com/cncd/pubsub"
"github.com/drone/drone/plugins/sender"
"github.com/drone/drone/remote"
"github.com/drone/drone/router"
"github.com/drone/drone/router/middleware"
droneserver "github.com/drone/drone/server"
"github.com/drone/drone/store"
"github.com/Sirupsen/logrus"
"github.com/gin-gonic/contrib/ginrus"
"github.com/urfave/cli"
oldcontext "golang.org/x/net/context"
)
var flags = []cli.Flag{
cli.BoolFlag{
EnvVar: "DRONE_DEBUG",
Name: "debug",
Usage: "enable server debug mode",
},
cli.StringFlag{
EnvVar: "DRONE_SERVER_HOST,DRONE_HOST",
Name: "server-host",
Usage: "server fully qualified url (<scheme>://<host>)",
},
cli.StringFlag{
EnvVar: "DRONE_SERVER_ADDR",
Name: "server-addr",
Usage: "server address",
Value: ":8000",
},
cli.StringFlag{
EnvVar: "DRONE_SERVER_CERT",
Name: "server-cert",
Usage: "server ssl cert path",
},
cli.StringFlag{
EnvVar: "DRONE_SERVER_KEY",
Name: "server-key",
Usage: "server ssl key path",
},
cli.BoolFlag{
EnvVar: "DRONE_LETS_ENCRYPT",
Name: "lets-encrypt",
Usage: "enable let's encrypt",
},
cli.BoolFlag{
EnvVar: "DRONE_QUIC",
Name: "quic",
Usage: "enable quic",
},
cli.StringFlag{
EnvVar: "DRONE_WWW",
Name: "www",
Usage: "serve the website from disk",
Hidden: true,
},
cli.StringSliceFlag{
EnvVar: "DRONE_ADMIN",
Name: "admin",
Usage: "list of admin users",
},
cli.StringSliceFlag{
EnvVar: "DRONE_ORGS",
Name: "orgs",
Usage: "list of approved organizations",
},
cli.BoolFlag{
EnvVar: "DRONE_OPEN",
Name: "open",
Usage: "enable open user registration",
},
cli.StringFlag{
EnvVar: "DRONE_REPO_CONFIG",
Name: "repo-config",
Usage: "file path for the drone config",
Value: ".drone.yml",
},
cli.DurationFlag{
EnvVar: "DRONE_SESSION_EXPIRES",
Name: "session-expires",
Usage: "session expiration time",
Value: time.Hour * 72,
},
cli.StringSliceFlag{
EnvVar: "DRONE_ESCALATE",
Name: "escalate",
Usage: "images to run in privileged mode",
Value: &cli.StringSlice{
"plugins/docker",
"plugins/gcr",
"plugins/ecr",
},
},
cli.StringSliceFlag{
EnvVar: "DRONE_VOLUME",
Name: "volume",
},
cli.StringSliceFlag{
EnvVar: "DRONE_NETWORK",
Name: "network",
},
cli.StringFlag{
EnvVar: "DRONE_AGENT_SECRET,DRONE_SECRET",
Name: "agent-secret",
Usage: "server-agent shared password",
},
cli.StringFlag{
EnvVar: "DRONE_SECRET_ENDPOINT",
Name: "secret-service",
Usage: "secret plugin endpoint",
},
cli.StringFlag{
EnvVar: "DRONE_REGISTRY_ENDPOINT",
Name: "registry-service",
Usage: "registry plugin endpoint",
},
cli.StringFlag{
EnvVar: "DRONE_GATEKEEPER_ENDPOINT",
Name: "gating-service",
Usage: "gated build endpoint",
},
cli.StringFlag{
EnvVar: "DRONE_DATABASE_DRIVER,DATABASE_DRIVER",
Name: "driver",
Usage: "database driver",
Value: "sqlite3",
},
cli.StringFlag{
EnvVar: "DRONE_DATABASE_DATASOURCE,DATABASE_CONFIG",
Name: "datasource",
Usage: "database driver configuration string",
Value: "drone.sqlite",
},
//
// resource limit parameters
//
cli.Int64Flag{
EnvVar: "DRONE_LIMIT_MEM_SWAP",
Name: "limit-mem-swap",
Usage: "maximum swappable memory allowed in bytes",
},
cli.Int64Flag{
EnvVar: "DRONE_LIMIT_MEM",
Name: "limit-mem",
Usage: "maximum memory allowed in bytes",
},
cli.Int64Flag{
EnvVar: "DRONE_LIMIT_SHM_SIZE",
Name: "limit-shm-size",
Usage: "docker compose /dev/shm allowed in bytes",
},
cli.Int64Flag{
EnvVar: "DRONE_LIMIT_CPU_QUOTA",
Name: "limit-cpu-quota",
Usage: "impose a cpu quota",
},
cli.Int64Flag{
EnvVar: "DRONE_LIMIT_CPU_SHARES",
Name: "limit-cpu-shares",
Usage: "change the cpu shares",
},
cli.StringFlag{
EnvVar: "DRONE_LIMIT_CPU_SET",
Name: "limit-cpu-set",
Usage: "set the cpus allowed to execute containers",
},
//
// remote parameters
//
cli.BoolFlag{
EnvVar: "DRONE_GITHUB",
Name: "github",
Usage: "github driver is enabled",
},
cli.StringFlag{
EnvVar: "DRONE_GITHUB_URL",
Name: "github-server",
Usage: "github server address",
Value: "https://github.com",
},
cli.StringFlag{
EnvVar: "DRONE_GITHUB_CONTEXT",
Name: "github-context",
Usage: "github status context",
Value: "continuous-integration/drone",
},
cli.StringFlag{
EnvVar: "DRONE_GITHUB_CLIENT",
Name: "github-client",
Usage: "github oauth2 client id",
},
cli.StringFlag{
EnvVar: "DRONE_GITHUB_SECRET",
Name: "github-secret",
Usage: "github oauth2 client secret",
},
cli.StringSliceFlag{
EnvVar: "DRONE_GITHUB_SCOPE",
Name: "github-scope",
Usage: "github oauth scope",
Value: &cli.StringSlice{
"repo",
"repo:status",
"user:email",
"read:org",
},
},
cli.StringFlag{
EnvVar: "DRONE_GITHUB_GIT_USERNAME",
Name: "github-git-username",
Usage: "github machine user username",
},
cli.StringFlag{
EnvVar: "DRONE_GITHUB_GIT_PASSWORD",
Name: "github-git-password",
Usage: "github machine user password",
},
cli.BoolTFlag{
EnvVar: "DRONE_GITHUB_MERGE_REF",
Name: "github-merge-ref",
Usage: "github pull requests use merge ref",
},
cli.BoolFlag{
EnvVar: "DRONE_GITHUB_PRIVATE_MODE",
Name: "github-private-mode",
Usage: "github is running in private mode",
},
cli.BoolFlag{
EnvVar: "DRONE_GITHUB_SKIP_VERIFY",
Name: "github-skip-verify",
Usage: "github skip ssl verification",
},
cli.BoolFlag{
EnvVar: "DRONE_GOGS",
Name: "gogs",
Usage: "gogs driver is enabled",
},
cli.StringFlag{
EnvVar: "DRONE_GOGS_URL",
Name: "gogs-server",
Usage: "gogs server address",
Value: "https://github.com",
},
cli.StringFlag{
EnvVar: "DRONE_GOGS_GIT_USERNAME",
Name: "gogs-git-username",
Usage: "gogs service account username",
},
cli.StringFlag{
EnvVar: "DRONE_GOGS_GIT_PASSWORD",
Name: "gogs-git-password",
Usage: "gogs service account password",
},
cli.BoolFlag{
EnvVar: "DRONE_GOGS_PRIVATE_MODE",
Name: "gogs-private-mode",
Usage: "gogs private mode enabled",
},
cli.BoolFlag{
EnvVar: "DRONE_GOGS_SKIP_VERIFY",
Name: "gogs-skip-verify",
Usage: "gogs skip ssl verification",
},
cli.BoolFlag{
EnvVar: "DRONE_GITEA",
Name: "gitea",
Usage: "gitea driver is enabled",
},
cli.StringFlag{
EnvVar: "DRONE_GITEA_URL",
Name: "gitea-server",
Usage: "gitea server address",
Value: "https://try.gitea.io",
},
cli.StringFlag{
EnvVar: "DRONE_GITEA_GIT_USERNAME",
Name: "gitea-git-username",
Usage: "gitea service account username",
},
cli.StringFlag{
EnvVar: "DRONE_GITEA_GIT_PASSWORD",
Name: "gitea-git-password",
Usage: "gitea service account password",
},
cli.BoolFlag{
EnvVar: "DRONE_GITEA_PRIVATE_MODE",
Name: "gitea-private-mode",
Usage: "gitea private mode enabled",
},
cli.BoolFlag{
EnvVar: "DRONE_GITEA_SKIP_VERIFY",
Name: "gitea-skip-verify",
Usage: "gitea skip ssl verification",
},
cli.BoolFlag{
EnvVar: "DRONE_BITBUCKET",
Name: "bitbucket",
Usage: "bitbucket driver is enabled",
},
cli.StringFlag{
EnvVar: "DRONE_BITBUCKET_CLIENT",
Name: "bitbucket-client",
Usage: "bitbucket oauth2 client id",
},
cli.StringFlag{
EnvVar: "DRONE_BITBUCKET_SECRET",
Name: "bitbucket-secret",
Usage: "bitbucket oauth2 client secret",
},
cli.BoolFlag{
EnvVar: "DRONE_GITLAB",
Name: "gitlab",
Usage: "gitlab driver is enabled",
},
cli.StringFlag{
EnvVar: "DRONE_GITLAB_URL",
Name: "gitlab-server",
Usage: "gitlab server address",
Value: "https://gitlab.com",
},
cli.StringFlag{
EnvVar: "DRONE_GITLAB_CLIENT",
Name: "gitlab-client",
Usage: "gitlab oauth2 client id",
},
cli.StringFlag{
EnvVar: "DRONE_GITLAB_SECRET",
Name: "gitlab-secret",
Usage: "gitlab oauth2 client secret",
},
cli.StringFlag{
EnvVar: "DRONE_GITLAB_GIT_USERNAME",
Name: "gitlab-git-username",
Usage: "gitlab service account username",
},
cli.StringFlag{
EnvVar: "DRONE_GITLAB_GIT_PASSWORD",
Name: "gitlab-git-password",
Usage: "gitlab service account password",
},
cli.BoolFlag{
EnvVar: "DRONE_GITLAB_SKIP_VERIFY",
Name: "gitlab-skip-verify",
Usage: "gitlab skip ssl verification",
},
cli.BoolFlag{
EnvVar: "DRONE_GITLAB_PRIVATE_MODE",
Name: "gitlab-private-mode",
Usage: "gitlab is running in private mode",
},
cli.BoolFlag{
EnvVar: "DRONE_GITLAB_V3_API",
Name: "gitlab-v3-api",
Usage: "gitlab is running the v3 api",
},
cli.BoolFlag{
EnvVar: "DRONE_STASH",
Name: "stash",
Usage: "stash driver is enabled",
},
cli.StringFlag{
EnvVar: "DRONE_STASH_URL",
Name: "stash-server",
Usage: "stash server address",
},
cli.StringFlag{
EnvVar: "DRONE_STASH_CONSUMER_KEY",
Name: "stash-consumer-key",
Usage: "stash oauth1 consumer key",
},
cli.StringFlag{
EnvVar: "DRONE_STASH_CONSUMER_RSA",
Name: "stash-consumer-rsa",
Usage: "stash oauth1 private key file",
},
cli.StringFlag{
EnvVar: "DRONE_STASH_CONSUMER_RSA_STRING",
Name: "stash-consumer-rsa-string",
Usage: "stash oauth1 private key string",
},
cli.StringFlag{
EnvVar: "DRONE_STASH_GIT_USERNAME",
Name: "stash-git-username",
Usage: "stash service account username",
},
cli.StringFlag{
EnvVar: "DRONE_STASH_GIT_PASSWORD",
Name: "stash-git-password",
Usage: "stash service account password",
},
cli.BoolFlag{
EnvVar: "DRONE_STASH_SKIP_VERIFY",
Name: "stash-skip-verify",
Usage: "stash skip ssl verification",
},
cli.BoolFlag{
EnvVar: "DRONE_CODING",
Name: "coding",
Usage: "coding driver is enabled",
},
cli.StringFlag{
EnvVar: "DRONE_CODING_URL",
Name: "coding-server",
Usage: "coding server address",
Value: "https://coding.net",
},
cli.StringFlag{
EnvVar: "DRONE_CODING_CLIENT",
Name: "coding-client",
Usage: "coding oauth2 client id",
},
cli.StringFlag{
EnvVar: "DRONE_CODING_SECRET",
Name: "coding-secret",
Usage: "coding oauth2 client secret",
},
cli.StringSliceFlag{
EnvVar: "DRONE_CODING_SCOPE",
Name: "coding-scope",
Usage: "coding oauth scope",
Value: &cli.StringSlice{
"user",
"project",
"project:depot",
},
},
cli.StringFlag{
EnvVar: "DRONE_CODING_GIT_MACHINE",
Name: "coding-git-machine",
Usage: "coding machine name",
Value: "git.coding.net",
},
cli.StringFlag{
EnvVar: "DRONE_CODING_GIT_USERNAME",
Name: "coding-git-username",
Usage: "coding machine user username",
},
cli.StringFlag{
EnvVar: "DRONE_CODING_GIT_PASSWORD",
Name: "coding-git-password",
Usage: "coding machine user password",
},
cli.BoolFlag{
EnvVar: "DRONE_CODING_SKIP_VERIFY",
Name: "coding-skip-verify",
Usage: "coding skip ssl verification",
},
cli.DurationFlag{
EnvVar: "DRONE_KEEPALIVE_MIN_TIME",
Name: "keepalive-min-time",
Usage: "server-side enforcement policy on the minimum amount of time a client should wait before sending a keepalive ping.",
},
}
func server(c *cli.Context) error {
// debug level if requested by user
if c.Bool("debug") {
logrus.SetLevel(logrus.DebugLevel)
} else {
logrus.SetLevel(logrus.WarnLevel)
}
// must configure the drone_host variable
if c.String("server-host") == "" {
logrus.Fatalln("DRONE_HOST is not properly configured")
}
if !strings.Contains(c.String("server-host"), "://") {
logrus.Fatalln(
"DRONE_HOST must be <scheme>://<hostname> format",
)
}
if strings.HasSuffix(c.String("server-host"), "/") {
logrus.Fatalln(
"DRONE_HOST must not have trailing slash",
)
}
remote_, err := SetupRemote(c)
if err != nil {
logrus.Fatal(err)
}
store_ := setupStore(c)
setupEvilGlobals(c, store_, remote_)
// we are switching from gin to httpservermux|treemux,
// so if this code looks strange, that is why.
tree := setupTree(c)
// setup the server and start the listener
handler := router.Load(
tree,
ginrus.Ginrus(logrus.StandardLogger(), time.RFC3339, true),
middleware.Version,
middleware.Config(c),
middleware.Store(c, store_),
middleware.Remote(remote_),
)
var g errgroup.Group
// start the grpc server
g.Go(func() error {
lis, err := net.Listen("tcp", ":9000")
if err != nil {
logrus.Error(err)
return err
}
auther := &authorizer{
password: c.String("agent-secret"),
}
s := grpc.NewServer(
grpc.StreamInterceptor(auther.streamInterceptor),
grpc.UnaryInterceptor(auther.unaryIntercaptor),
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: c.Duration("keepalive-min-time"),
}),
)
ss := new(droneserver.DroneServer)
ss.Queue = droneserver.Config.Services.Queue
ss.Logger = droneserver.Config.Services.Logs
ss.Pubsub = droneserver.Config.Services.Pubsub
ss.Remote = remote_
ss.Store = store_
ss.Host = droneserver.Config.Server.Host
proto.RegisterDroneServer(s, ss)
err = s.Serve(lis)
if err != nil {
logrus.Error(err)
return err
}
return nil
})
// start the server with tls enabled
if c.String("server-cert") != "" {
g.Go(func() error {
return http.ListenAndServe(":http", http.HandlerFunc(redirect))
})
g.Go(func() error {
serve := &http.Server{
Addr: ":https",
Handler: handler,
TLSConfig: &tls.Config{
NextProtos: []string{"http/1.1"}, // disable h2 because Safari :(
},
}
return serve.ListenAndServeTLS(
c.String("server-cert"),
c.String("server-key"),
)
})
return g.Wait()
}
// start the server without tls enabled
if !c.Bool("lets-encrypt") {
return http.ListenAndServe(
c.String("server-addr"),
handler,
)
}
// start the server with lets encrypt enabled
// listen on ports 443 and 80
address, err := url.Parse(c.String("server-host"))
if err != nil {
return err
}
dir := cacheDir()
os.MkdirAll(dir, 0700)
manager := &autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(address.Host),
Cache: autocert.DirCache(dir),
}
g.Go(func() error {
return http.ListenAndServe(":http", manager.HTTPHandler(http.HandlerFunc(redirect)))
})
g.Go(func() error {
serve := &http.Server{
Addr: ":https",
Handler: handler,
TLSConfig: &tls.Config{
GetCertificate: manager.GetCertificate,
NextProtos: []string{"http/1.1"}, // disable h2 because Safari :(
},
}
return serve.ListenAndServeTLS("", "")
})
return g.Wait()
}
// HACK please excuse the message during this period of heavy refactoring.
// We are currently transitioning from storing services (ie database, queue)
// in the gin.Context to storing them in a struct. We are also moving away
// from gin to gorilla. We will temporarily use global during our refactoring
// which will be removing in the final implementation.
func setupEvilGlobals(c *cli.Context, v store.Store, r remote.Remote) {
// storage
droneserver.Config.Storage.Files = v
droneserver.Config.Storage.Config = v
// services
droneserver.Config.Services.Queue = setupQueue(c, v)
droneserver.Config.Services.Logs = logging.New()
droneserver.Config.Services.Pubsub = pubsub.New()
droneserver.Config.Services.Pubsub.Create(context.Background(), "topic/events")
droneserver.Config.Services.Registries = setupRegistryService(c, v)
droneserver.Config.Services.Secrets = setupSecretService(c, v)
droneserver.Config.Services.Senders = sender.New(v, v)
droneserver.Config.Services.Environ = setupEnvironService(c, v)
droneserver.Config.Services.Limiter = setupLimiter(c, v)
if endpoint := c.String("gating-service"); endpoint != "" {
droneserver.Config.Services.Senders = sender.NewRemote(endpoint)
}
// limits
droneserver.Config.Pipeline.Limits.MemSwapLimit = c.Int64("limit-mem-swap")
droneserver.Config.Pipeline.Limits.MemLimit = c.Int64("limit-mem")
droneserver.Config.Pipeline.Limits.ShmSize = c.Int64("limit-shm-size")
droneserver.Config.Pipeline.Limits.CPUQuota = c.Int64("limit-cpu-quota")
droneserver.Config.Pipeline.Limits.CPUShares = c.Int64("limit-cpu-shares")
droneserver.Config.Pipeline.Limits.CPUSet = c.String("limit-cpu-set")
// server configuration
droneserver.Config.Server.Cert = c.String("server-cert")
droneserver.Config.Server.Key = c.String("server-key")
droneserver.Config.Server.Pass = c.String("agent-secret")
droneserver.Config.Server.Host = strings.TrimRight(c.String("server-host"), "/")
droneserver.Config.Server.Port = c.String("server-addr")
droneserver.Config.Server.RepoConfig = c.String("repo-config")
droneserver.Config.Server.SessionExpires = c.Duration("session-expires")
droneserver.Config.Pipeline.Networks = c.StringSlice("network")
droneserver.Config.Pipeline.Volumes = c.StringSlice("volume")
droneserver.Config.Pipeline.Privileged = c.StringSlice("escalate")
// droneserver.Config.Server.Open = cli.Bool("open")
// droneserver.Config.Server.Orgs = sliceToMap(cli.StringSlice("orgs"))
// droneserver.Config.Server.Admins = sliceToMap(cli.StringSlice("admin"))
}
type authorizer struct {
username string
password string
}
func (a *authorizer) streamInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
if err := a.authorize(stream.Context()); err != nil {
return err
}
return handler(srv, stream)
}
func (a *authorizer) unaryIntercaptor(ctx oldcontext.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
if err := a.authorize(ctx); err != nil {
return nil, err
}
return handler(ctx, req)
}
func (a *authorizer) authorize(ctx context.Context) error {
if md, ok := metadata.FromContext(ctx); ok {
if len(md["password"]) > 0 && md["password"][0] == a.password {
return nil
}
return errors.New("invalid agent token")
}
return errors.New("missing agent token")
}
func redirect(w http.ResponseWriter, req *http.Request) {
var serverHost string = droneserver.Config.Server.Host
serverHost = strings.TrimPrefix(serverHost, "http://")
serverHost = strings.TrimPrefix(serverHost, "https://")
req.URL.Scheme = "https"
req.URL.Host = serverHost
w.Header().Set("Strict-Transport-Security", "max-age=31536000")
http.Redirect(w, req, req.URL.String(), http.StatusMovedPermanently)
}
func cacheDir() string {
const base = "golang-autocert"
if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" {
return filepath.Join(xdg, base)
}
return filepath.Join(os.Getenv("HOME"), ".cache", base)
}
| [
"\"XDG_CACHE_HOME\"",
"\"HOME\""
]
| []
| [
"HOME",
"XDG_CACHE_HOME"
]
| [] | ["HOME", "XDG_CACHE_HOME"] | go | 2 | 0 | |
utils/gen_doc.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
from io import StringIO
import io
import os
import sys
import datetime
import argparse
import numpy as np # type: ignore
from onnx import defs, FunctionProto, helper, OperatorStatus
from onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN
from onnx.backend.test.case import collect_snippets
from onnx.backend.sample.ops import collect_sample_implementations
from typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run-onnx-ops",
help="Output ONNXOps.td.inc content to stdout.",
action="store_true",
default=False)
parser.add_argument("--dry-run-op-build-table",
help="Output OpBuildTable.inc content to stdout.",
action="store_true",
default=False)
args = parser.parse_args()
# Manual specification of attribute defaults.
special_attr_defaults = dict([
# ("AveragePool.kernel_shape", ('ints', '{}')),
# ("MaxPool.kernel_shape", ('ints', '{}')),
# ("Cast.to", ('int', '0')),
# ("Concat.axis", ('int', '0')),
# ("Conv.group", ('int', '1')),
# ("Unsqueeze.axes", ('ints', '{}')),
# ("RNN.activation_alpha", ('floats', '{}')),
# ("RNN.activation_beta", ('floats', '{}')),
])
# Special operation importing handlers.
special_op_handler = dict([
("MaxPool", "ImportNodeMaxPool"),
("BatchNormalization", "ImportNodeBatchNormalization"),
("Pad", "ImportNodePad"),
("Reshape", "ImportNodeReshape"),
#("Transpose", "ImportNodeTranspose")
])
# Operations supporting shape inference.
OpsWithShapeInference = [
'Exp', 'Tanh', 'Sinh', 'Cosh', 'Sigmoid', 'Relu', 'Add', 'Mul', 'Div',
'Sub', 'And', 'Or', 'Xor', 'Sum', 'Max', 'Min', 'MatMul', 'Gemm',
'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal',
'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin',
'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze',
'Sign', 'Constant', 'AveragePool', 'Abs', 'Conv', 'Concat', 'Neg'
]
# Operations supporting canonicalization.
OpsWithCanonicalizer = ['Add', 'Identity', 'Gemm', 'Conv']
# Operations who have operands that, if produced by constant operations, should
# be promoted to become an attribute (via attribute promotion).
#
# For each operation, a key/value pair is used to specify how attribute promotion
# should proceed. The key is the operation's name and the value is a list of
# tuples, whose first item is the attribute/operand name, and the second item is
# the index at which such operand occurs in the list of the operation's inputs.
OpsWithPromotableConstOperands = {"Reshape": [("shape", 1)]}
# Add an Op in this list if the Op needs result type deduction which is required
# when writing declarative rewriting rules. Deduced type is always
# an UnrankedTensorType whose element type is the same as the first operand's
# element type.
#
# Currenlty, there are only two build methods generated:
# - one with operands and attributes having a separate parameter, and
# - one with operands and attributes having aggregated parameters.
custom_builder_ops_list = ['Abs', 'Mul', 'Exp', 'ReduceSum', 'ReduceSumSquare']
SNIPPETS = collect_snippets()
SAMPLE_IMPLEMENTATIONS = collect_sample_implementations()
ONNX_ML = not bool(os.getenv('ONNX_ML') == '0')
ONNX_ML = False
sys.stderr.write("ONNX_ML {}\n".format(ONNX_ML))
if ONNX_ML:
ext = '-ml.md'
else:
ext = '.md'
def should_render_domain(domain): # type: (Text) -> bool
if domain == ONNX_ML_DOMAIN and not ONNX_ML:
return False
elif ONNX_ML and domain != ONNX_ML_DOMAIN:
return False
return True
def display_attr_type(v): # type: (OpSchema.AttrType) -> Text
assert isinstance(v, OpSchema.AttrType)
s = Text(v)
s = s[s.rfind('.') + 1:].lower()
if s[-1] == 's':
s = 'list of ' + s
return s
def get_unique_output_name(schema, name):
for input in schema.inputs:
if input.name == name:
return 'out_' + name
return name
def onnx_attr_type_to_mlir_attr_type(t):
onnx_attr_type = Text(t)
onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()
if onnx_attr_type == 'int':
mlir_attr_type = 'I64Attr'
elif onnx_attr_type == 'float':
mlir_attr_type = 'F32Attr'
elif onnx_attr_type == 'ints':
mlir_attr_type = 'I64ArrayAttr'
elif onnx_attr_type == 'floats':
mlir_attr_type = 'F32ArrayAttr'
elif onnx_attr_type == "string":
mlir_attr_type = 'StrAttr'
elif onnx_attr_type == "strings":
mlir_attr_type = 'StrArrayAttr'
else:
mlir_attr_type = 'AnyAttr'
#TODO: tensor and sparse tensor
return mlir_attr_type
#TODO: any better way to do this.
def tblgen_attr_type_to_cpp_type(t):
if 'I64Attr' in t:
cpp_type = 'IntegerAttr'
elif 'F32Attr' in t:
cpp_type = 'FloatAttr'
elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:
cpp_type = 'ArrayAttr'
elif 'StrAttr' in t:
cpp_type = 'StringAttr'
elif 'strings' in t:
cpp_type = 'ArrayAttr'
else:
cpp_type = 'Attribute'
return cpp_type
def tblgen_operand_type_to_cpp_type(op_type):
if op_type.startswith('Variadic'):
mytype = 'ValueRange'
else:
mytype = 'Value'
return mytype
def np_type_to_tblgen_attr_type(tstr):
tfrom = np.array([
'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',
'float', 'double'
])
tto = np.array(
['I1', 'I8', 'I16', 'I32', 'I64', 'BF16', 'F16', 'F32', 'F64'])
index = -1
for i in range(len(tfrom)):
if tfrom[i] in tstr:
index = i
break
if index == -1:
print("error", tstr)
return ''
else:
return tto[i]
def get_allowed_elem_types(schema, input):
allowed_types_str = None
return allowed_types_str
# TODO: enable type constraints.
# if input.typeStr :
# tstr = input.typeStr
# else :
# return allwedTypeStr
# if schema.type_constraints:
# for type_constraint in schema.type_constraints:
# if type_constraint.type_param_str != tstr :
# continue
# allowedTypes = type_constraint.allowed_type_strs
# allowedTypeStr=''
# if (len(allowedTypes) > 0):
# t = convert_type(allowedTypes[0])
# if t == '' :
# return ''
# allowedTypeStr += t
# for allowedType in allowedTypes[1:]:
# t = convert_type(allowedType)
# if t == '' :
# return ''
# if not t in allowedTypeStr :
# allowedTypeStr += ', '+t
#
# return allowedTypeStr
#
# return allowedTypeStr
def inc_indent(indent=None):
return "" if indent is None else indent + ' ' * 2
def dec_indent(indent):
return indent[:-2]
def join_args(args):
return ", ".join(args)
def get_operands_or_results(schema, is_input):
value_list = schema.inputs if is_input else schema.outputs
if not value_list:
return OrderedDict()
def any_type_of(types):
assert isinstance(types, list)
if len(types) == 1:
return types[0]
else:
return "AnyTypeOf<[{}]>".format(", ".join(types))
name_to_types = OrderedDict()
for i, value in enumerate(value_list):
elem_types = get_allowed_elem_types(schema, value)
if elem_types is None:
types = ["AnyMemRef", "AnyTensor"]
else:
types = ["TensorOf<[{}]>", "MemRefOf<[{}]>"]
types = list(map(lambda x: x.format(elem_types), types))
# If operand is promotable to an attribute, then it must be
# nullable in case it migrates to be an attribute.
if schema.name in OpsWithPromotableConstOperands:
idxs = dict(OpsWithPromotableConstOperands[schema.name]).values()
if i in idxs:
types.append("NoneType")
if OpSchema.FormalParameterOption.Optional == value.option:
types.append("NoneType")
elif OpSchema.FormalParameterOption.Variadic == value.option:
if value.isHomogeneous:
types = ["Variadic<{}>".format(any_type_of(types))]
else:
#TODO handle(variadic, heterogeneous) "
sys.stderr.write("warning: (variadic, heterogeneous) for" + schema.name +
' ' + value.name + "\n")
# Since output name can coincide with that of an input, we explicitly
# append a suffix "_out" to such names for disambiguation.
if is_input:
value_name = value.name
else:
value_name = get_unique_output_name(schema, value.name)
name_to_types[value_name] = any_type_of(types)
return name_to_types
def get_attrs(schema):
def get_attr_type_optional(attr_type):
return 'OptionalAttr<{}>'.format(
onnx_attr_type_to_mlir_attr_type(attr_type))
def get_attr_type_with_default(attr_type, attr_default):
return 'DefaultValuedAttr<{}, "{}">'.format(
onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)
if not schema.attributes:
return OrderedDict()
name_to_type = OrderedDict()
for _, attr in sorted(schema.attributes.items()):
qualified_attr_name = "{}.{}".format(schema.name, attr.name)
if qualified_attr_name in special_attr_defaults:
name_to_type[attr.name] = get_attr_type_with_default(
*special_attr_defaults[qualified_attr_name])
# option holds either required or default value
elif attr.required:
name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(
attr.type)
elif attr.default_value.name:
def format_value(value): # type: (Any) -> Text
if isinstance(value, float):
formatted = str(np.round(value, 5))
# use default formatting, unless too long.
if (len(formatted) > 10):
formatted = str("({:e})".format(value))
return formatted
elif isinstance(
value,
(bytes, bytearray)) and sys.version_info[0] == 3:
return str(value.decode('utf-8'))
return str(value)
default_value = helper.get_attribute_value(attr.default_value)
if isinstance(default_value, list):
default_value = [format_value(val) for val in default_value]
default_value_str = '{}'.format(default_value)
default_value_str = default_value_str.replace('[', '{', 1)
default_value_str = default_value_str.replace(']', '}', 1)
if Text(attr.type) == "AttrType.STRINGS":
default_value_str = default_value_str.replace("'", '\\"')
else:
default_value_str = default_value_str.replace("'", '')
else:
default_value = format_value(default_value)
default_value_str = default_value
name_to_type[attr.name] = get_attr_type_with_default(
attr.type, default_value_str)
else:
name_to_type[attr.name] = get_attr_type_optional(attr.type)
return name_to_type
def get_promotable_const_operands_func(s, indent, const_operands_name_to_idx):
cpp_name_to_idx_literal = "{" + ", ".join([
"{{\"{}\", {}}}".format(*name_to_idx)
for name_to_idx in const_operands_name_to_idx
]) + "}"
s += indent + "let extraClassDeclaration = [{\n"
indent = inc_indent(indent)
s += indent + "std::map<std::string, size_t> promotableConstOperands() {\n"
indent = inc_indent(indent)
s += indent + "return {};\n".format(cpp_name_to_idx_literal)
indent = dec_indent(indent)
s += indent + "}\n"
indent = dec_indent(indent)
s += indent + "}];\n"
return s
def gen_op_def(schema):
indent = inc_indent()
s = 'def ONNX{0}Op:ONNX_Op<"{0}",\n'.format(schema.name)
# Generate decl for op traits.
traits = ["NoSideEffect"]
if schema.name in OpsWithShapeInference:
traits.append("DeclareOpInterfaceMethods<ShapeInferenceOpInterface>")
if schema.name in OpsWithPromotableConstOperands.keys():
traits.append("OpInterface<\"PromotableConstOperandsOpInterface\">")
s += inc_indent(indent) + '[{}]> {{\n'.format(join_args(traits))
# Generate decl for canonicalizer.
indent = inc_indent(indent)
if schema.name in OpsWithCanonicalizer:
s += indent + 'let hasCanonicalizer = 1;\n'
# Generate decl for summary.
s += indent + 'let summary = "ONNX {} operation";\n'.format(schema.name)
# Generate description.
s += indent + 'let description = [{\n'
if schema.doc:
lines = schema.doc.lstrip().splitlines()
for line in lines:
escaped_line = line.replace('"', '\\"')\
.replace('}]', '\\}\\]')
s += indent + '"{}"\n'.format(escaped_line)
s += indent + '}];\n'
# Generate ins (consisting of operands and attributes).
ins = get_operands_or_results(schema, is_input=True)
ins.update(get_attrs(schema))
ins_strs = ["{1}:${0}".format(*i) for i in ins.items()]
s += indent + 'let arguments = (ins {});\n'.format(
(',\n' + inc_indent(indent)).join(ins_strs))
# Generate outs (operation results).
outs = get_operands_or_results(schema, is_input=False)
outs_strs = ["{1}:${0}".format(*i) for i in outs.items()]
s += indent + 'let results = (outs {});\n'.format(
(',\n' + inc_indent(indent)).join(outs_strs))
# add custom builders
# use element type of the first operand to construct an UnrankedTensorType for the output.
if schema.name in custom_builder_ops_list:
if len(ins) == 0:
raise RuntimeWarning(
"warning: not generate custom build methods for " +
schema.name + " since it does not have operands.")
else:
s += indent + 'let builders = [\n'
# Custom builders with operands and attributes having a seperate parameter.
# E.g. OpBuilder<"Builder *builder, OperationState &state, Value X, Value, Y, Attribute A", [{}]>
indent = inc_indent(indent)
s += indent + 'OpBuilder<"Builder *builder, OperationState &state'
operands_dict = get_operands_or_results(schema, is_input=True)
for name, ty in operands_dict.items():
s += ', {} {}'.format(tblgen_operand_type_to_cpp_type(ty),
name)
for name, ty in get_attrs(schema).items():
s += ', {} {}'.format(tblgen_attr_type_to_cpp_type(ty), name)
s += '", [{\n'
indent = inc_indent(indent)
# Get output type from first operand's type.
first_operand_name = list(ins.items())[0][0]
s += indent + 'auto elementType = {}.getType().cast<TensorType>().getElementType();\n'.format(
first_operand_name)
s += indent + 'build(builder, state, UnrankedTensorType::get(elementType)'
for name, _ in ins.items():
s += ', ' + name
s += ');\n'
indent = dec_indent(indent)
s += indent + '}]>,\n'
# Custom builders with all operands and attributes having aggregate parameters.
# E.g. OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes", [{}]>'
s += indent + 'OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes", [{\n'
indent = inc_indent(indent)
s += indent + 'auto elementType = operands[0].getType().cast<TensorType>().getElementType();\n'
s += indent + 'std::vector<mlir::Type> outputTypes;\n'
s += indent + 'outputTypes.emplace_back(UnrankedTensorType::get(elementType));\n'
s += indent + 'build(builder, state, outputTypes, operands, attributes);\n'
indent = dec_indent(indent)
s += indent + '}]>'
s += '\n' + indent + '];\n'
if schema.name in OpsWithPromotableConstOperands:
s = get_promotable_const_operands_func(
s, indent, OpsWithPromotableConstOperands[schema.name])
s += '}\n\n'
return s
"""
special cases:
* Split: attr split default value: sizeof(output1) namely 1
* Conv: attr dilations default value is {num_dim of first input - 2, 1}
* Conv: attr kernel_shape type is ints
* Transpose: attr perm default value is {} empty int list
"""
def gen_op_importer(schema, file):
indent = inc_indent()
s = indent + 'if (opName == "' + schema.name + '")\n'
expected_num_operands = len(schema.inputs)
expected_num_results = len(schema.outputs)
for input in schema.inputs:
if OpSchema.FormalParameterOption.Variadic == input.option:
expected_num_operands = -1
for output in schema.outputs:
if OpSchema.FormalParameterOption.Variadic == output.option:
expected_num_results = -1
handler_func = special_op_handler.get(
schema.name, "buildOperation<mlir::ONNX{}Op>".format(schema.name))
# Special handlers currently require expected num operands/results to be specified.
# TODO: remove special handlers.
args = ["node"]
if expected_num_operands != -1 or expected_num_results != -1 or "buildOperation" not in handler_func:
args.append(
"/* expected_num_operands = */ {}".format(expected_num_operands))
args.append(
'/* expected_num_results = */ {}'.format(expected_num_results))
s += inc_indent(indent) + "return {}({});\n".format(
handler_func, ", ".join(args))
file.write(s)
def build_operator_schemas():
# domain -> support level -> name -> [schema]
index = defaultdict(lambda: defaultdict(lambda: defaultdict(
list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]
for schema in defs.get_all_schemas_with_history():
index[schema.domain][int(
schema.support_level)][schema.name].append(schema)
# Preprocess the Operator Schemas
# [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]
operator_schemas = list(
) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]
exsting_ops = set() # type: Set[Text]
for domain, _supportmap in sorted(index.items()):
if not should_render_domain(domain):
continue
processed_supportmap = list()
for _support, _namemap in sorted(_supportmap.items()):
processed_namemap = list()
for n, unsorted_versions in sorted(_namemap.items()):
versions = sorted(unsorted_versions,
key=lambda s: s.since_version)
schema = versions[-1]
if schema.name in exsting_ops:
continue
exsting_ops.add(schema.name)
processed_namemap.append((n, schema, versions))
processed_supportmap.append((_support, processed_namemap))
operator_schemas.append((domain, processed_supportmap))
return operator_schemas
def main(args): # type: (Type[Args]) -> None
curr_utc_time = datetime.datetime.now(
datetime.timezone.utc).strftime("%m/%d/%Y, %H:%M:%S")
autogen_warning = (
'//********************************************************\n'
'// Do not modify this file directly.\n'
'// This file is automatically generated via script.\n'
'// Details can be found in docs/readonnxdefs.md .\n'
'//********************************************************\n\n')
autogen_warning = autogen_warning.format(curr_utc_time)
op_def = args.op_def
op_def.write(autogen_warning)
op_importer = args.op_importer
op_importer.write(autogen_warning)
for domain, supportmap in build_operator_schemas():
for _, namemap in supportmap:
for op_type, schema, versions in namemap:
gen_op_importer(schema, op_importer)
r = gen_op_def(schema)
op_def.write(r)
if __name__ == '__main__':
curr_dir = os.path.dirname(os.path.realpath(__file__))
class Args(object):
if args.dry_run_onnx_ops:
op_def = StringIO()
else:
op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc')
op_def = io.open(op_def_file_path, 'w', newline='')
if args.dry_run_op_build_table:
op_importer = StringIO()
else:
op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc')
op_importer = io.open(op_importer_file_path, 'w', newline='')
main(Args)
if args.dry_run_onnx_ops:
sys.stdout.write(Args.op_def.getvalue())
if args.dry_run_op_build_table:
sys.stdout.write(Args.op_importer.getvalue())
| []
| []
| [
"ONNX_ML"
]
| [] | ["ONNX_ML"] | python | 1 | 0 | |
server/restful/restful_server_test.go | package restful
import (
"log"
"net/http"
"os"
"path/filepath"
"testing"
"github.com/emicklei/go-restful"
rf "github.com/emicklei/go-restful"
"github.com/go-chassis/go-chassis/core/config"
"github.com/go-chassis/go-chassis/core/config/model"
"github.com/go-chassis/go-chassis/core/lager"
"github.com/go-chassis/go-chassis/core/server"
"github.com/stretchr/testify/assert"
)
var addrHighway = "127.0.0.1:2399"
var addrHighway1 = "127.0.0.1:2330"
func initEnv() {
p := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", filepath.Join(p, "src", "github.com", "go-chassis", "go-chassis", "examples", "discovery", "server"))
log.Println(os.Getenv("CHASSIS_HOME"))
os.Setenv("GO_CHASSIS_SWAGGERFILEPATH", filepath.Join(p, "src", "github.com", "go-chassis", "go-chassis", "examples", "discovery", "server"))
log.Println(os.Getenv("GO_CHASSIS_SWAGGERFILEPATH"))
lager.Initialize("", "INFO", "", "size", true, 1, 10, 7)
config.Init()
defaultChain := make(map[string]string)
defaultChain["default"] = ""
config.GlobalDefinition = &model.GlobalCfg{}
}
func TestRestStart(t *testing.T) {
t.Log("Testing restful server start function")
initEnv()
schema := "schema1"
//trClient := tcp.NewTransport()
defaultChain := make(map[string]string)
defaultChain["default"] = ""
config.GlobalDefinition.Cse.Handler.Chain.Provider = defaultChain
config.GlobalDefinition.Cse.Handler.Chain.Consumer = defaultChain
f, err := server.GetServerFunc("rest")
assert.NoError(t, err)
s := f(server.Options{
Address: addrHighway,
ChainName: "default",
})
_, err = s.Register(&TestSchema{},
server.WithSchemaID(schema))
assert.NoError(t, err)
name := s.String()
assert.Equal(t, "rest", name)
err = s.Stop()
assert.NoError(t, err)
}
func TestRestStartFailure(t *testing.T) {
t.Log("Testing restful server for start function failure")
initEnv()
schema := "schema2"
//trClient := tcp.NewTransport()
defaultChain := make(map[string]string)
defaultChain["default"] = ""
config.GlobalDefinition.Cse.Handler.Chain.Provider = defaultChain
config.GlobalDefinition.Cse.Handler.Chain.Consumer = defaultChain
f, err := server.GetServerFunc("rest")
assert.NoError(t, err)
s := f(server.Options{
Address: addrHighway,
ChainName: "default",
})
_, err = s.Register(TestSchema{},
server.WithSchemaID(schema))
assert.Error(t, err)
err = s.Start()
assert.NoError(t, err)
name := s.String()
assert.Equal(t, "rest", name)
err = s.Stop()
assert.NoError(t, err)
}
type TestSchema struct {
}
func (r *TestSchema) Put(b *Context) {
}
func (r *TestSchema) Get(b *Context) {
}
func (r *TestSchema) Delete(b *Context) {
}
func (r *TestSchema) Head(b *Context) {
}
func (r *TestSchema) Patch(b *Context) {
}
func (r *TestSchema) Post(b *Context) {
}
//URLPatterns helps to respond for corresponding API calls
func (r *TestSchema) URLPatterns() []Route {
return []Route{
{Method: http.MethodGet, Path: "/", ResourceFunc: r.Get,
Returns: []*Returns{{Code: 200}}},
{Method: http.MethodPost, Path: "/sayhello/{userid}", ResourceFunc: r.Post,
Returns: []*Returns{{Code: 200}}},
{Method: http.MethodDelete, Path: "/sayhi", ResourceFunc: r.Delete,
Returns: []*Returns{{Code: 200}}},
{Method: http.MethodHead, Path: "/sayjson", ResourceFunc: r.Head,
Returns: []*Returns{{Code: 200}}},
{Method: http.MethodPatch, Path: "/sayjson", ResourceFunc: r.Patch,
Returns: []*Returns{{Code: 200}}},
{Method: http.MethodPut, Path: "/hi", ResourceFunc: r.Put,
Returns: []*Returns{{Code: 200}}},
}
}
func TestNoRefreshSchemaConfig(t *testing.T) {
p := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", filepath.Join(p, "src", "github.com", "go-chassis", "go-chassis", "examples", "discovery", "server"))
log.Println(os.Getenv("CHASSIS_HOME"))
config.Init()
assert.Equal(t, true, config.GlobalDefinition.Cse.NoRefreshSchema)
config.GlobalDefinition = &model.GlobalCfg{}
}
type Data struct {
ID string `json:"priceID"`
Category string `json:"type"`
Value string `json:"value"`
CreateTime string `json:"-"`
}
func TestFillParam(t *testing.T) {
var rb *rf.RouteBuilder = &rf.RouteBuilder{}
var routeSpec Route
p := &Parameters{
"p", "", rf.QueryParameterKind, "",
}
routeSpec.Parameters = append(routeSpec.Parameters, p)
p1 := &Parameters{
"p1", "", rf.BodyParameterKind, "",
}
routeSpec.Parameters = append(routeSpec.Parameters, p1)
p2 := &Parameters{
"p2", "", rf.FormParameterKind, "",
}
routeSpec.Parameters = append(routeSpec.Parameters, p2)
p3 := &Parameters{
"p3", "", rf.HeaderParameterKind, "",
}
routeSpec.Parameters = append(routeSpec.Parameters, p3)
rb = fillParam(routeSpec, rb)
assert.Equal(t, rf.QueryParameterKind, rb.ParameterNamed("p").Kind())
assert.Equal(t, rf.BodyParameterKind, rb.ParameterNamed("p1").Kind())
assert.Equal(t, rf.FormParameterKind, rb.ParameterNamed("p2").Kind())
assert.Equal(t, rf.HeaderParameterKind, rb.ParameterNamed("p3").Kind())
}
var schemaTestProduces = []string{"application/json"}
var schemaTestConsumes = []string{"application/xml"}
var schemaTestRoutes = []Route{
{
Method: http.MethodGet,
Path: "none",
ResourceFuncName: "Handler",
},
{
Method: http.MethodGet,
Path: "with-produces",
ResourceFuncName: "Handler",
Produces: schemaTestProduces,
},
{
Method: http.MethodGet,
Path: "with-consumes",
ResourceFuncName: "Handler",
Consumes: schemaTestConsumes,
},
{
Method: http.MethodGet,
Path: "with-all",
ResourceFuncName: "Handler",
Produces: schemaTestProduces,
Consumes: schemaTestConsumes,
},
}
type SchemaTest struct {
}
func (st SchemaTest) URLPatterns() []Route {
return schemaTestRoutes
}
func (st SchemaTest) Handler(ctx *Context) {
}
func Test_restfulServer_register2GoRestful(t *testing.T) {
initEnv()
rest := &restfulServer{
microServiceName: "rest",
container: restful.NewContainer(),
ws: new(restful.WebService),
server: &http.Server{},
}
_, err := rest.Register(&SchemaTest{})
assert.NoError(t, err)
routes := rest.ws.Routes()
assert.Equal(t, 4, len(routes), "there should be %d routes", len(schemaTestRoutes))
for _, route := range routes {
switch route.Path {
case "/none":
assert.Equal(t, []string{"*/*"}, route.Consumes)
assert.Equal(t, []string{"*/*"}, route.Produces)
case "/with-produces":
assert.Equal(t, schemaTestProduces, route.Produces)
assert.Equal(t, []string{"*/*"}, route.Consumes)
case "/with-consumes":
assert.Equal(t, []string{"*/*"}, route.Produces)
assert.Equal(t, schemaTestConsumes, route.Consumes)
case "/with-all":
assert.Equal(t, schemaTestProduces, route.Produces)
assert.Equal(t, schemaTestConsumes, route.Consumes)
default:
log.Println(route.Path)
}
}
}
| [
"\"GOPATH\"",
"\"CHASSIS_HOME\"",
"\"GO_CHASSIS_SWAGGERFILEPATH\"",
"\"GOPATH\"",
"\"CHASSIS_HOME\""
]
| []
| [
"GOPATH",
"CHASSIS_HOME",
"GO_CHASSIS_SWAGGERFILEPATH"
]
| [] | ["GOPATH", "CHASSIS_HOME", "GO_CHASSIS_SWAGGERFILEPATH"] | go | 3 | 0 | |
algorithms/implementation/breaking-best-and-worst-records.py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the breakingRecords function below.
def breakingRecords(scores):
min_ctr=[scores[0],0]
max_ctr=[scores[0],0]
for i in scores:
if(i<min_ctr[0]):
min_ctr=[i,min_ctr[1]+1]
if(i>max_ctr[0]):
max_ctr=[i,max_ctr[1]+1]
return [max_ctr[1],min_ctr[1]]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
scores = list(map(int, input().rstrip().split()))
result = breakingRecords(scores)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
| []
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | python | 1 | 0 | |
train_pose2vid.py | import os
import numpy as np
import torch
import time
import sys
from collections import OrderedDict
from torch.autograd import Variable
from pathlib import Path
import warnings
warnings.filterwarnings('ignore')
mainpath = os.getcwd()
pix2pixhd_dir = Path(mainpath+'/src/pix2pixHD/')
sys.path.append(str(pix2pixhd_dir))
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
import src.config.train_opt as opt
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
torch.multiprocessing.set_sharing_strategy('file_system')
torch.backends.cudnn.benchmark = True
def main():
iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
start_epoch, epoch_iter = 1, 0
total_steps = (start_epoch - 1) * dataset_size + epoch_iter
display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq
model = create_model(opt)
model = model.cuda()
visualizer = Visualizer(opt)
for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
if epoch != start_epoch:
epoch_iter = epoch_iter % dataset_size
for i, data in enumerate(dataset, start=epoch_iter):
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
# whether to collect output images
save_fake = total_steps % opt.display_freq == display_delta
############## Forward Pass ######################
losses, generated = model(Variable(data['label']), Variable(data['inst']),
Variable(data['image']), Variable(data['feat']), infer=save_fake)
# sum per device losses
losses = [torch.mean(x) if not isinstance(x, int) else x for x in losses]
loss_dict = dict(zip(model.loss_names, losses))
# calculate final loss scalar
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat', 0) + loss_dict.get('G_VGG', 0)
############### Backward Pass ####################
# update generator weights
model.optimizer_G.zero_grad()
loss_G.backward()
model.optimizer_G.step()
# update discriminator weights
model.optimizer_D.zero_grad()
loss_D.backward()
model.optimizer_D.step()
############## Display results and errors ##########
### print out errors
if total_steps % opt.print_freq == print_delta:
errors = {k: v.data if not isinstance(v, int) else v for k, v in loss_dict.items()} # CHANGE: removed [0] after v.data
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
visualizer.plot_current_errors(errors, total_steps)
### display output images
if save_fake:
visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
('synthesized_image', util.tensor2im(generated.data[0])),
('real_image', util.tensor2im(data['image'][0]))])
visualizer.display_current_results(visuals, epoch, total_steps)
### save latest model
if total_steps % opt.save_latest_freq == save_delta:
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
model.save('latest')
np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')
if epoch_iter >= dataset_size:
break
# end of epoch
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
### save model for this epoch
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.save('latest')
model.save(epoch)
np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d')
### instead of only training the local enhancer, train the entire network after certain iterations
if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):
model.update_fixed_params()
### linearly decay learning rate after certain iterations
if epoch > opt.niter:
model.update_learning_rate()
torch.cuda.empty_cache()
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
queue_green_oa_scrape.py | import argparse
import concurrent.futures
import logging
import os
import pickle
from datetime import datetime, timedelta
from multiprocessing import current_process
from time import sleep
from time import time
from urllib.parse import urlparse
import redis
from redis import WatchError
from sqlalchemy import orm, text
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import make_transient
from app import db
from app import logger
from oa_page import publisher_equivalent_endpoint_id
from page import PageNew
from queue_main import DbQueue
from recordthresher.record_maker import PmhRecordMaker
from util import elapsed
from util import safe_commit
from pub import Pub # magic
import endpoint # magic
import pmh_record # magic
def _procs_per_worker():
return int(os.getenv('GREEN_SCRAPE_PROCS_PER_WORKER', 10))
def _redis_max_connections():
return 2
_redis_client = None
_redis_init = False
def get_redis_client():
global _redis_client, _redis_init
if not _redis_init:
try:
_redis_client = redis.from_url(os.environ.get("REDIS_URL"), max_connections=1)
except Exception as e:
logger.exception(f'failed creating redis client: {e}')
_redis_init = True
return _redis_client
def scrape_pages(pages):
for page in pages:
make_transient(page)
# free up the connection while doing net IO
db.session.close()
db.engine.dispose()
with concurrent.futures.ProcessPoolExecutor(max_workers=_procs_per_worker()) as pool:
map_results = pool.map(scrape_page, pages, chunksize=1)
scraped_pages = [p for p in map_results if p]
logger.info('finished scraping all pages')
logger.info('preparing update records')
extant_page_ids = [
row[0] for row in
db.session.query(PageNew.id).filter(PageNew.id.in_(
[p.id for p in scraped_pages]
)).all()
]
scraped_pages = [db.session.merge(p) for p in scraped_pages if p.id in extant_page_ids]
for scraped_page in scraped_pages:
scraped_page.save_first_version_availability()
return scraped_pages
def scrape_page(page):
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as pool:
worker = current_process().name
try:
return pool.submit(scrape_page_worker, page).result(timeout=300)
except concurrent.futures.TimeoutError as e:
logger.error(f'{worker} timed out')
for pid, process in pool._processes.items():
process.terminate()
pool.shutdown()
return None
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
logger.exception(f'{worker} exception scraping page {page.id}')
return None
def scrape_page_worker(page):
worker = current_process().name
site_key_stem = redis_key(page, '')
logger.info('{} started scraping page {} {} {}'.format(worker, page.id, site_key_stem, page))
total_wait_seconds = 0
wait_seconds = 5
while total_wait_seconds < 60:
if begin_rate_limit(page):
page.scrape()
end_rate_limit(page)
logger.info('{} finished scraping page {} {} {}'.format(worker, page.id, site_key_stem, page))
return page
else:
logger.info('{} not ready to scrape page {} {} {}, waiting'.format(worker, page.id, site_key_stem, page))
sleep(wait_seconds)
total_wait_seconds += wait_seconds
logger.info('{} done waiting to scrape page {} {} {}, giving up'.format(worker, page.id, site_key_stem, page))
return None
def unpickle(v):
return pickle.loads(v) if v else None
def redis_key(page, scrape_property):
domain = urlparse(page.url).netloc
return 'green-scrape-p3:{}:{}:{}'.format(page.endpoint_id, domain, scrape_property)
def scrape_interval_seconds(page):
hostname = urlparse(page.url).hostname
one_sec_hosts = [
'citeseerx.ist.psu.edu',
'www.ncbi.nlm.nih.gov',
'pt.cision.com',
'doaj.org',
'hal.archives-ouvertes.fr',
'figshare.com',
'arxiv.org',
'europepmc.org',
'bibliotheques-specialisees.paris.fr',
'nbn-resolving.de',
'osti.gov',
'zenodo.org',
'kuleuven.be',
'edoc.hu-berlin.de',
'rug.nl',
]
for host in one_sec_hosts:
if hostname and hostname.endswith(host):
return 1
return 10
def begin_rate_limit(page, interval_seconds=None):
redis_client = get_redis_client()
if page.endpoint_id == publisher_equivalent_endpoint_id:
return True
interval_seconds = interval_seconds or scrape_interval_seconds(page)
started_key = redis_key(page, 'started')
finished_key = redis_key(page, 'finished')
with redis_client.pipeline() as pipe:
try:
pipe.watch(started_key)
pipe.watch(finished_key)
scrape_started = unpickle(pipe.get(started_key))
if scrape_started and scrape_started >= datetime.utcnow() - timedelta(seconds=interval_seconds):
return False
pipe.multi()
pipe.set(started_key, pickle.dumps(datetime.utcnow()))
pipe.set(finished_key, pickle.dumps(None))
pipe.execute()
return True
except WatchError:
return False
def end_rate_limit(page):
redis_client = get_redis_client()
redis_client.set(redis_key(page, 'started'), pickle.dumps(None))
redis_client.set(redis_key(page, 'finished'), pickle.dumps(datetime.utcnow()))
def merge_and_commit_objects(objects, retry=2):
try:
logger.info('starting merge')
merge_start_time = time()
[db.session.merge(o) for o in objects]
logger.info("merge took {} seconds".format(elapsed(merge_start_time, 2)))
logger.info('starting commit')
commit_start_time = time()
db.session.commit()
logger.info("commit took {} seconds".format(elapsed(commit_start_time, 2)))
except IntegrityError as e:
logger.exception(f'integrity error merging objects: {e}')
db.session.rollback()
if retry > 0:
logger.info('retrying merge_and_commit_objects')
merge_and_commit_objects(objects, retry=retry-1)
else:
logger.error('giving up on merge_and_commit_objects')
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
db.session.rollback()
logger.exception(f'error merging objects: {e}')
class DbQueueGreenOAScrape(DbQueue):
def table_name(self, job_type):
return 'page_green_scrape_queue'
def process_name(self, job_type):
return 'run_green_oa_scrape'
def worker_run(self, **kwargs):
run_class = PageNew
single_id = kwargs.get("id", None)
chunk_size = kwargs.get("chunk", 100)
limit = kwargs.get("limit", None)
scrape_publisher = kwargs.get("scrape_publisher", False)
if limit is None:
limit = float("inf")
if single_id:
page = run_class.query.filter(run_class.id == single_id).first()
page.scrape()
page.save_first_version_availability()
db.session.merge(page)
safe_commit(db) or logger.info("COMMIT fail")
if recordthresher_record := PmhRecordMaker.make_record(page.pmh_record):
db.session.merge(recordthresher_record)
db.session.merge(PmhRecordMaker.make_unpaywall_api_response(recordthresher_record))
safe_commit(db) or logger.info("COMMIT fail")
else:
index = 0
num_updated = 0
start_time = time()
while num_updated < limit:
new_loop_start_time = time()
objects = self.fetch_queue_chunk(chunk_size, scrape_publisher)
if not objects:
logger.info('no queued pages ready. waiting...')
sleep(5)
continue
scraped_pages = scrape_pages(objects)
scraped_ids = [p.id for p in scraped_pages]
unscraped_ids = [obj.id for obj in objects if obj.id not in scraped_ids]
logger.info('scraped {} pages and returned {} to the queue'.format(
len(scraped_ids), len(unscraped_ids)
))
scraped_batch_text = '''
update {queue_table}
set finished = now(), started=null
where id = any(:ids)'''.format(queue_table=self.table_name(None))
unscraped_batch_text = '''
update {queue_table}
set started=null
where id = any(:ids)'''.format(queue_table=self.table_name(None))
scraped_batch_command = text(scraped_batch_text).bindparams(
ids=scraped_ids)
unscraped_batch_command = text(unscraped_batch_text).bindparams(
ids=unscraped_ids)
db.session.execute(scraped_batch_command)
db.session.execute(unscraped_batch_command)
commit_start_time = time()
safe_commit(db) or logger.info("COMMIT fail")
logger.info("commit took {} seconds".format(elapsed(commit_start_time, 2)))
logger.info('making recordthresher records')
recordthresher_records = [PmhRecordMaker.make_record(p.pmh_record) for p in scraped_pages]
distinct_records = {}
for recordthresher_record in recordthresher_records:
if recordthresher_record:
distinct_records[recordthresher_record.id] = recordthresher_record
if distinct_records:
logger.info('saving recordthresher records')
merge_and_commit_objects(distinct_records.values())
logger.info('making mock unpaywall responses')
unpaywall_responses = [
PmhRecordMaker.make_unpaywall_api_response(r) for r in distinct_records.values()
]
logger.info('saving mock unpaywall responses')
merge_and_commit_objects(unpaywall_responses)
index += 1
num_updated += chunk_size
self.print_update(new_loop_start_time, len(scraped_ids), limit, start_time, index)
def fetch_queue_chunk(self, chunk_size, scrape_publisher):
logger.info("looking for new jobs")
endpoint_filter = "and qt.endpoint_id {} '{}'".format(
'=' if scrape_publisher else 'is distinct from',
publisher_equivalent_endpoint_id
)
text_query_pattern = """
with update_chunk as (
select
lru_by_endpoint.id
from
endpoint e
cross join lateral (
select qt.*
from
{queue_table} qt
join page_new p using (id)
where
qt.endpoint_id = e.id
and qt.started is null
and e.green_scrape
{endpoint_filter}
order by qt.finished asc nulls first
limit {per_endpoint_limit}
for update of qt skip locked
) lru_by_endpoint
where
finished is null
or finished < now() - '60 days'::interval
order by lru_by_endpoint.finished asc nulls first, lru_by_endpoint.rand
limit {chunk_size}
)
update {queue_table} queue_rows_to_update
set started=now()
from update_chunk
where update_chunk.id = queue_rows_to_update.id
returning update_chunk.id;
"""
text_query = text_query_pattern.format(
chunk_size=chunk_size,
queue_table=self.table_name(None),
endpoint_filter=endpoint_filter,
per_endpoint_limit=chunk_size if scrape_publisher else 10
)
logger.info("the queue query is:\n{}".format(text_query))
job_time = time()
row_list = db.engine.execute(text(text_query).execution_options(autocommit=True)).fetchall()
object_ids = [row[0] for row in row_list]
logger.info("got {} ids, took {} seconds".format(len(object_ids), elapsed(job_time)))
job_time = time()
q = db.session.query(PageNew).options(
orm.undefer('*')
).filter(PageNew.id.in_(object_ids))
objects = q.all()
logger.info("got page_new objects in {} seconds".format(elapsed(job_time)))
return objects
if __name__ == "__main__":
if os.getenv('OADOI_LOG_SQL'):
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
db.session.configure()
parser = argparse.ArgumentParser(description="Run stuff.")
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update (case sensitive)")
parser.add_argument('--doi', nargs="?", type=str, help="id of the one thing you want to update (case insensitive)")
parser.add_argument('--reset', default=False, action='store_true', help="do you want to just reset?")
parser.add_argument('--run', default=False, action='store_true', help="to run the queue")
parser.add_argument('--status', default=False, action='store_true', help="to logger.info(the status")
parser.add_argument('--dynos', default=None, type=int, help="scale to this many dynos")
parser.add_argument('--logs', default=False, action='store_true', help="logger.info(out logs")
parser.add_argument('--monitor', default=False, action='store_true', help="monitor till done, then turn off dynos")
parser.add_argument('--kick', default=False, action='store_true', help="put started but unfinished dois back to unstarted so they are retried")
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many jobs to do")
parser.add_argument('--chunk', "-ch", nargs="?", default=100, type=int, help="how many to take off db at once")
parser.add_argument('--scrape-publisher', default=False, action='store_true', help="scrape publisher-equivalent pages")
parsed_args = parser.parse_args()
job_type = "normal" # should be an object attribute
my_queue = DbQueueGreenOAScrape()
my_queue.parsed_vars = vars(parsed_args)
my_queue.run_right_thing(parsed_args, job_type)
| []
| []
| [
"REDIS_URL",
"GREEN_SCRAPE_PROCS_PER_WORKER",
"OADOI_LOG_SQL"
]
| [] | ["REDIS_URL", "GREEN_SCRAPE_PROCS_PER_WORKER", "OADOI_LOG_SQL"] | python | 3 | 0 | |
xpred.py | import xgboost as xgb
from sklearn.metrics import roc_auc_score, auc, roc_curve
import pandas as pd
import numpy as np
import pickle
from xiter import *
import argparse
import matplotlib.pyplot as plt
data=pd.read_csv('xgb/random-search.csv')
parser=argparse.ArgumentParser()
parser.add_argument("--end",type=float,default=100000.,help='end ratio')
parser.add_argument("--save",type=str,default="random-search.csv",help='save name')
parser.add_argument("--right",type=str,default="/scratch/yjdata/gluon100_img",help='which train sample (qq,gg,zq,zg)')
parser.add_argument("--pt",type=int,default=200,help='pt range pt~pt*1.1')
parser.add_argument("--ptmin",type=float,default=0.,help='pt range pt~pt*1.1')
parser.add_argument("--ptmax",type=float,default=2.,help='pt range pt~pt*1.1')
parser.add_argument("--batch_size",type=int,default=100000,help='batch_size')
parser.add_argument("--gpu",type=int,default=0,help='gpu number')
parser.add_argument("--isz",type=int,default=0,help='0 or z or not')
parser.add_argument("--channel",type=int,default=30,help='sequence channel')
parser.add_argument("--order",type=int,default=1,help='pt ordering')
parser.add_argument("--eta",type=float,default=0.,help='end ratio')
parser.add_argument("--etabin",type=float,default=1.,help='end ratio')
args=parser.parse_args()
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
batch_size=args.batch_size
vzjdata="Data/zj_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
vjjdata="Data/jj_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
vzqdata="Data/zq_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
vzgdata="Data/zg_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
vqqdata="Data/qq_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
vggdata="Data/gg_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
rc=""
onehot=0
if(args.isz==0):
tqdata="Data/zj_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
tgdata="Data/jj_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
train=wkiter([tqdata,tgdata],batch_size=batch_size,end=args.end*0.7,istrain=1,rc=rc,onehot=onehot,etabin=args.etabin,pt=args.pt,ptmin=args.ptmin,ptmax=args.ptmax)
valid1=wkiter([vzjdata,vjjdata],batch_size=batch_size,begin=0.8*args.end,end=args.end*1.,rc=rc,onehot=onehot,etabin=args.etabin,pt=args.pt,ptmin=args.ptmin,ptmax=args.ptmax)
elif(args.isz==1):
tqdata="Data/zq_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
tgdata="Data/zg_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
train=wkiter([tqdata,tgdata],batch_size=batch_size,begin=0.6*args.end,end=args.end*1.,istrain=1,rc=rc,onehot=onehot,etabin=args.etabin,pt=args.pt,ptmin=args.ptmin,ptmax=args.ptmax)
else:
tqdata="Data/qq_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
tgdata="Data/gg_pt_{0}_{1}.root".format(args.pt,int(args.pt*1.1))
train=wkiter([tqdata,tgdata],batch_size=batch_size,begin=0.6*args.end,end=args.end*1.,istrain=1,rc=rc,onehot=onehot,etabin=args.etabin,pt=args.pt,ptmin=args.ptmin,ptmax=args.ptmax)
test2=wkiter([vzqdata,vzgdata],batch_size=batch_size,begin=args.end*0.,end=args.end*0.6,rc=rc,onehot=onehot,channel=args.channel,order=args.order,eta=0,etabin=2.4)
test3=wkiter([vqqdata,vggdata],batch_size=batch_size,begin=args.end*0.,end=args.end*0.6,rc=rc,onehot=onehot,channel=args.channel,order=args.order,eta=0,etabin=2.4)
entries=test2.totalnum()
print ("test ",entries)
print(args.pt)
gen=train.next()
#genv=valid1.next()
#epoch=eval(open(savename+"/history").readline())+1
X,Y=next(gen)
Y=np.array(Y)[:,0]
X=np.array(X[0])
#xv,yv=next(genv)
#xv=np.array(xv[0])
#yv=np.array(yv[:,0])
test2.reset()
test3.reset()
#genz=test2.next()
#genq=test3.next()
#xz,yz=next(genz)
#xq,yq=next(genq)
#xz=np.array(xz[0])
#xq=np.array(xq[0])
#yz=np.array(yz[:,0])
#yq=np.array(yq[:,0])
csv=pd.read_csv("xgb/{}-{}.csv".format(args.save,args.pt))
#csv=pd.read_csv(args.save)
best=csv.loc[csv["mean_test_score"].idxmax()]
model=xgb.XGBClassifier(objective='binary:logistic',tree_method="gpu_exact",**best)
#model=pickle.load(open("xgb/bdt100pickle-{}.dat".format(args.pt)))
model.fit(X,Y,eval_metric="auc")
f=rt.TFile("xgb/{}get.root".format(args.save),"recreate")
dq=rt.TTree("dq","dq tree")
dg=rt.TTree("dg","dg tree")
zq=rt.TTree("zq","zq tree")
zg=rt.TTree("zg","zg tree")
p=array('f',[0.])
pt=array('f',[0.])
eta=array('f',[0.])
dq.Branch("p",p,"p/F")
dq.Branch("pt",pt,"pt/F")
dq.Branch("eta",eta,"eta/F")
dg.Branch("p",p,"p/F")
dg.Branch("pt",pt,"pt/F")
dg.Branch("eta",eta,"eta/F")
zq.Branch("p",p,"p/F")
zq.Branch("pt",pt,"pt/F")
zq.Branch("eta",eta,"eta/F")
zg.Branch("p",p,"p/F")
zg.Branch("pt",pt,"pt/F")
zg.Branch("eta",eta,"eta/F")
"""py=model.predict_proba(xv)[:,1]
t_fpr,t_tpr,thresholds=roc_curve(yv,py)
t_tnr=1-t_fpr
#print(args.pt,"v",auc(t_fpr,t_tpr))
va=auc(t_fpr,t_tpr)
plt.figure(1)
q=[]
g=[]
for i in range(len(py)):
if(yv[i]==1):q.append(py[i])
else:g.append(py[i])
plt.hist(q,bins=50,weights=np.ones_like(q),histtype='step',alpha=0.5,label='vq')
plt.hist(g,bins=50,weights=np.ones_like(g),histtype='step',alpha=0.5,label='vg')
"""
#xz,yz=next(genz)
#xq,yq=next(genq)
bpy=model.predict_proba(test2.gjetset)[:,1]
bpt=test2.gptset
beta=test2.getaset
for i in range(len(bpy)):
p[0]=bpy[i]
pt[0]=bpt[i]
eta[0]=beta[i]
zg.Fill()
bpy=model.predict_proba(test2.qjetset)[:,1]
bpt=test2.qptset
beta=test2.qetaset
for i in range(len(bpy)):
p[0]=bpy[i]
pt[0]=bpt[i]
eta[0]=beta[i]
zq.Fill()
bpy=model.predict_proba(test3.gjetset)[:,1]
bpt=test3.gptset
beta=test3.getaset
for i in range(len(bpy)):
p[0]=bpy[i]
pt[0]=bpt[i]
eta[0]=beta[i]
dg.Fill()
bpy=model.predict_proba(test3.qjetset)[:,1]
bpt=test3.qptset
beta=test3.qetaset
for i in range(len(bpy)):
p[0]=bpy[i]
pt[0]=bpt[i]
eta[0]=beta[i]
dq.Fill()
f.Write()
f.Close()
#t_fpr,t_tpr,thresholds=roc_curve(yz,py)
#t_tnr=1-t_fpr
#print(args.pt,"z",auc(t_fpr,t_tpr))
#za=auc(t_fpr,t_tpr)
q=[]
g=[]
#for i in range(len(py)):
# if(yz[i]==1):q.append(py[i])
# else:g.append(py[i])
#plt.hist(q,bins=50,weights=np.ones_like(q),histtype='step',alpha=0.5,label='zq')
#plt.hist(g,bins=50,weights=np.ones_like(g),histtype='step',alpha=0.5,label='zg')
#t_fpr,t_tpr,thresholds=roc_curve(yq,py)
#t_tnr=1-t_fpr
#print(args.pt,"q",auc(t_fpr,t_tpr),len(yq))
#qa=auc(t_fpr,t_tpr)
q=[]
g=[]
#for i in range(len(py)):
# if(yq[i]==1):q.append(py[i])
# else:g.append(py[i])
#plt.hist(q,bins=50,weights=np.ones_like(q),histtype='step',alpha=0.5,label='qq')
#plt.hist(g,bins=50,weights=np.ones_like(g),histtype='step',alpha=0.5,label='qg')
#print(args.pt,va,qa,za)
#plt.legend()
#plt.savefig("xgb/bdtout{}.png".format(args.pt))
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
mmtbx/validation/omegalyze.py | from __future__ import division
from mmtbx.validation import residue, validation, atom
import iotbx.phil
import os.path
from libtbx import slots_getstate_setstate
import numpy
import os, sys
from mmtbx.conformation_dependent_library import generate_protein_fragments
################################################################################
# omegalyze.py
# This is a class to assess the omega (peptide bond) dihedral in protein
# backbone. It originated following concerns that cis-peptides, especially
# non-proline cis-peptides were not being flagged by MolProbity, and so
# structures with an improbable over-abundance of cis-peptides were passing
# validation.
#
# This code reuses existing ramalyze code, structure, and naming conventions
# where possible. Interfacing with this code should be either the same as or
# parallel to interfacing with ramalyze code.
#
################################################################################
def get_master_phil():
return iotbx.phil.parse(input_string="""
model = None
.type = path
.help = "Model file (PDB or mmCIF)"
nontrans_only = True
.type = bool
.help = "Controls whether trans peptides are stored and printed"
text = True
.type = bool
.help = "Prints verbose, colon-delimited text output and summary"
kinemage = False
.type = bool
.help = "Prints kinemage markup for cis-peptides"
oneline = False
.type = bool
.help = "Prints oneline-style summary statistics"
help = False
.type = bool
.help = "Prints this help message if true"
""", process_includes=True)
prog = os.getenv('LIBTBX_DISPATCHER_NAME')
usage_string = """
%(prog)s file.pdb [params.eff] [options ...]
Options:
model=input_file input PDB or mmCIF file
nontrans_only=True only print nontrans residues (does not affect kinemage)
text=True verbose colon-delimited text output (default)
kinemage=False Create kinemage markup (overrides text output)
help = False Prints this help message if true
text output is colon-delimited and follows the format:
residue:type:omega:conformation
'residue' is a unique residue identifier
'type' is either proline or general case
'omega' is the calculated omega dihedral for the peptide between this
residue and the preceeding residue
'conformation' is: cis for omega within 30 degrees of planar cis
trans for omega within 30 degrees of planar trans
twisted for omega not within 30 degrees of planar
SUMMARY statistics provide:
counts of cis prolines and twisted prolines relative to total prolines with
measurable omega dihedrals across all chains
counts of non-proline cis and twisted peptides relative to total non-proline
peptides with measurable omega dihedrals across all chains
Cis Prolines occur in ~5%% of prolines (1 in 20) at high resolution
Non-Proline Cis residues occur in ~0.05%% of residues (1 in 2000) and require
clear support from experimental data or homology.
Twisted peptides are even less frequent and are highly suspect without
high-resolution data.
Example:
%(prog)s model=1ubq.pdb kinemage=True
""" % locals()
#{{{ XXX Use these constants internally, not the strings
OMEGA_GENERAL = 0
OMEGA_PRO = 1
OMEGALYZE_TRANS =0
OMEGALYZE_CIS =1
OMEGALYZE_TWISTED =2
res_types = ["non-proline", "proline"] #used in GUI table
res_type_labels = ["non-Pro", "Pro "] #used in text output for MolProbity
res_type_kin = ["nonPro", "Pro"]
omega_types = ["Trans", "Cis", "Twisted"]
#}}}
class kin_atom(slots_getstate_setstate):
"""Container class used in generation of kinemages."""
__slots__ = ['id_str','xyz']
def __init__(self, id_str, xyz):
self.id_str = id_str
self.xyz = xyz
def dist(p1,p2):
return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2)**0.5
def single_offset(p1,p2,offset):
d = dist(p1,p2)
return (p1[0]-(p1[0]-p2[0])/d*offset, p1[1]-(p1[1]-p2[1])/d*offset, p1[2]-(p1[2]-p2[2])/d*offset)
class omega_result(residue):
"""
Result class for protein backbone omega angle analysis (molprobity.omegalyze).
"""
__omega_attr__ = [
"res_type",
"omega_type",
"omega",
"is_nontrans",
"markup_atoms",
"highest_mc_b",
"prev_resseq",
"prev_icode",
"prev_resname",
"prev_altloc",
"model_id"
]
__slots__ = residue.__slots__ + __omega_attr__
@staticmethod
def header():
return "%-31s %-12s %6s %-13s %6s" % ("Residues", "Type", "Omega","Conformation","MC_high_b")
def residue_type(self):
return res_type_labels[self.res_type]
def omegalyze_type(self):
return omega_types[self.omega_type]
def residue_type_kin(self):
return res_type_kin[self.res_type]
def prev_id_str(self):
return "%2s%4s%1s%1s%3s" % (
self.chain_id, self.prev_resseq, self.prev_icode, self.prev_altloc,
self.prev_resname)
def as_string(self):
return "%-12s to %-15s %-12s %6.2f %-13s %6.2f" % (
self.prev_id_str(),self.id_str(),self.residue_type(),self.omega, self.omegalyze_type(),self.highest_mc_b)
#For backwards compatibility
def id_str_old(self):
return "%s%4s%1s %1s%s" % (self.chain_id, self.resseq, self.icode,
self.altloc, self.resname)
def format_old(self):
return "%s to %s: %s :%s:%s:%s" % (self.prev_id_str(),self.id_str(), self.residue_type(),
('%.2f'%self.omega).rjust(7), self.omegalyze_type().ljust(8),self.highest_mc_b)
def as_kinemage(self, triangles=False, vectors=False):
ca1,c,n,ca2 = self.markup_atoms[0].xyz, self.markup_atoms[1].xyz, self.markup_atoms[2].xyz, self.markup_atoms[3].xyz
o = 0.1
d1 = dist(ca1,ca2)
d2 = dist(n,c)
d3 = dist(ca1,c)
d4 = dist(n,ca2)
d_diag = dist(ca1,n)
c_n_vec = (n[0]-c[0], n[1]-c[1], n[2]-c[2])
c_ca1_vec = (ca1[0]-c[0], ca1[1]-c[1], ca1[2]-c[2])
ca2_ca1_vec = (ca1[0]-ca2[0], ca1[1]-ca2[1], ca1[2]-ca2[2])
ca2_n_vec = (n[0]-ca2[0], n[1]-ca2[1], n[2]-ca2[2])
diag_vec = (n[0]-ca1[0], n[1]-ca1[1], n[2]-ca1[2])
theta = numpy.arccos(numpy.dot(c_ca1_vec,diag_vec)/d_diag/d3)
ca1_offset_len = o/numpy.sin(theta)
ca1_offset = single_offset(ca1,n,ca1_offset_len)
theta = numpy.arccos(numpy.dot(ca2_n_vec,diag_vec)/d_diag/d4)
n_offset_len = o/numpy.sin(theta)
n_offset = single_offset(n,ca1,n_offset_len)
theta_ca2 = numpy.arccos(numpy.dot(ca2_n_vec,ca2_ca1_vec)/d4/d1)
ca2_to_ca1_offset_len = o/numpy.sin(theta_ca2)
ca2_to_ca1_offset = single_offset(ca2,ca1,ca2_to_ca1_offset_len)
vec_from_offset = (ca2_to_ca1_offset[0]+n[0]-ca2[0], ca2_to_ca1_offset[1]+n[1]-ca2[1], ca2_to_ca1_offset[2]+n[2]-ca2[2])
theta = -numpy.arccos(numpy.dot(diag_vec,ca2_ca1_vec)/d_diag/d1)
v1 = -dist(ca1,ca1_offset)*numpy.sin(theta)
x = v1/numpy.sin(theta_ca2)
ca2_offset = single_offset(ca2_to_ca1_offset,vec_from_offset, x)
theta_n = numpy.arccos(numpy.dot(diag_vec,c_n_vec)/d_diag/d2)
n_to_c_offset_len = o/numpy.sin(theta_n)
n_to_c_offset = single_offset(c,n,n_to_c_offset_len)
v2 = dist(n,n_offset)*numpy.sin(theta_n)
theta_c = numpy.arccos(numpy.dot(c_ca1_vec,c_n_vec)/d2/d3)
c_to_n_offset_len = o/numpy.sin(theta_c)
c_to_n_offset = single_offset(c,n,c_to_n_offset_len)
x2 = v2/numpy.sin(theta_c)
vec_from_offset = (c_to_n_offset[0]+ca1[0]-c[0], c_to_n_offset[1]+ca1[1]-c[1], c_to_n_offset[2]+ca1[2]-c[2])
c_offset = single_offset(c_to_n_offset,vec_from_offset,x2)
#This commented block was the first pass at offseting the triangles
# it was abandoned due to moving the drawn ca1-n diagonal off of the actual diagonal
#o = 0.1 #this is the offset from the backbone in angstrom
#d1 = ((ca1[0]-ca2[0])**2 + (ca1[1]-ca2[1])**2 + (ca1[2]-ca2[2])**2)**0.5
#d2 = ((n[0]-c[0])**2 + (n[1]-c[1])**2 + (n[2]-c[2])**2)**0.5
#d3 = ((ca1[0]-c[0])**2 + (ca1[1]-c[1])**2 + (ca1[2]-c[2])**2)**0.5
#d4 = ((n[0]-ca2[0])**2 + (n[1]-ca2[1])**2 + (n[2]-ca2[2])**2)**0.5
#ca1_offset = (ca1[0]+(ca2[0]-ca1[0])/d1*o - (ca1[0]-c[0])/d3*o, ca1[1]+(ca2[1]-ca1[1])/d1*o - (ca1[1]-c[1])/d3*o, ca1[2]+(ca2[2]-ca1[2])/d1*o - (ca1[2]-c[2])/d3*o)
#ca2_offset = (ca2[0]-(ca2[0]-ca1[0])/d1*o - (ca2[0]-n[0])/d4*o, ca2[1]-(ca2[1]-ca1[1])/d1*o - (ca2[1]-n[1])/d4*o, ca2[2]-(ca2[2]-ca1[2])/d1*o - (ca2[2]-n[2])/d4*o)
#c_offset = (c[0]+(n[0]-c[0])/d2*o+(ca1[0]-c[0])/d3*o, c[1]+(n[1]-c[1])/d2*o+(ca1[1]-c[1])/d3*o, c[2]+(n[2]-c[2])/d2*o+(ca1[2]-c[2])/d3*o)
#n_offset = (n[0]-(n[0]-c[0])/d2*o+(ca2[0]-n[0])/d4*o, n[1]-(n[1]-c[1])/d2*o+(ca2[1]-n[1])/d4*o, n[2]-(n[2]-c[2])/d2*o+(ca2[2]-n[2])/d4*o)
if triangles:
triangle1_line1 = "{%s CA (%s %s, omega= %.2f)} P X %s\n" % (
self.markup_atoms[0].id_str, self.omegalyze_type(),
self.residue_type_kin(), self.omega,
"%.3f %.3f %.3f" % ca1_offset)
triangle1_line2 = "{%s C (%s %s, omega= %.2f)} %s\n" % (
self.markup_atoms[1].id_str, self.omegalyze_type(),
self.residue_type_kin(), self.omega,
"%.3f %.3f %.3f" % c_offset)
triangle1_line3 = "{%s N (%s %s, omega= %.2f)} %s\n" % (
self.markup_atoms[2].id_str, self.omegalyze_type(),
self.residue_type_kin(), self.omega,
"%.3f %.3f %.3f" % n_offset)
triangle2_line1 = "{%s CA (%s %s, omega= %.2f)} P X %s\n" % (
self.markup_atoms[0].id_str, self.omegalyze_type(),
self.residue_type_kin(), self.omega,
"%.3f %.3f %.3f" % ca1_offset)
triangle2_line2 = "{%s N (%s %s, omega= %.2f)} %s\n" % (
self.markup_atoms[2].id_str, self.omegalyze_type(),
self.residue_type_kin(), self.omega,
"%.3f %.3f %.3f" % n_offset)
triangle2_line3 = "{%s CA (%s %s, omega= %.2f)} %s\n" % (
self.markup_atoms[3].id_str, self.omegalyze_type(),
self.residue_type_kin(), self.omega,
"%.3f %.3f %.3f" % ca2_offset)
out_this = triangle1_line1 + triangle1_line2 + triangle1_line3 + triangle2_line1 + triangle2_line2 + triangle2_line3
elif vectors:
vector_line1 = "{%s CA (%s %s, omega= %.2f)} P %s\n" % (
self.markup_atoms[0].id_str, self.omegalyze_type(),
self.residue_type_kin(), self.omega,
"%.3f %.3f %.3f" % ca1_offset)
if self.omega_type == OMEGALYZE_CIS:
vector_line2 = "{%s CA (%s %s, omega= %.2f)} %s\n" % (
self.markup_atoms[3].id_str, self.omegalyze_type(),
self.residue_type_kin(), self.omega,
"%.3f %.3f %.3f" % ca2_offset)
elif self.omega_type == OMEGALYZE_TWISTED:
vector_line2 = "{%s N (%s %s, omega= %.2f)} %s\n" % (
self.markup_atoms[2].id_str, self.omegalyze_type(),
self.residue_type_kin(), self.omega,
"%.3f %.3f %.3f" % n_offset)
else:
return ""
out_this = vector_line1 + vector_line2
return out_this
#def as_table_row_phenix(self):
# return [ self.chain_id, "%s %s" % (self.resname, self.resid),
# res_types[self.res_type], self.omega, omega_types[self.omega_type] ]
def as_table_row_phenix(self):
#'%4s%1s' string formatting for previous residue matched string formatting within self.resid
return [ self.chain_id, "%1s%s %4s%1s to %1s%s %s" % (self.prev_altloc, self.prev_resname, self.prev_resseq, self.prev_icode, self.altloc, self.resname, self.resid),
res_types[self.res_type], self.omega, omega_types[self.omega_type] ]
#the ramachandran_ensemble class is only called in mmtbx/validation/ensembles
# and does not seem to provide functionality useful to omega analysis
#So it is omitted for the moment
class omegalyze(validation):
"""
Frontend for calculating omega angle statistics for a model.
"""
__slots__ = validation.__slots__ + [
"residue_count",
"omega_count",
"_outlier_i_seqs"
]
program_description = "Analyze protein backbone peptide dihedrals (omega)"
output_header = "residues:type:omega:conformation:mc_bmax"
gui_list_headers = ["Chain","Residues","Residue type","omega","conformation"]
gui_formats = ["%s", "%s", "%s", "%.2f", "%s"]
wx_column_widths = [75,200,125,125,125]
def get_result_class(self): return omega_result
def __init__(self,
pdb_hierarchy,
nontrans_only=False,
out=sys.stdout,
quiet=True):
validation.__init__(self)
self.residue_count = [0, 0]
#[OMEGA_GENERAL, OMEGA_PRO]
self.omega_count = [[0,0,0], [0,0,0]]
#[OMEGA_GENERAL, OMEGA_PRO], then
#[OMEGALYZE_TRANS, OMEGALYZE_CIS, OMEGALYZE_TWISTED]
from mmtbx.validation import utils
from scitbx.array_family import flex
self._outlier_i_seqs = flex.size_t()
pdb_atoms = pdb_hierarchy.atoms()
all_i_seqs = pdb_atoms.extract_i_seq()
if all_i_seqs.all_eq(0):
pdb_atoms.reset_i_seq()
use_segids = utils.use_segids_in_place_of_chainids(
hierarchy=pdb_hierarchy)
first_conf_altloc = None
prev_chain_id = None
for twores in generate_protein_fragments(
pdb_hierarchy,
length=2,
geometry=None,
include_non_standard_peptides=True):
main_residue = twores[1] #this is the relevant residue for id-ing cis-Pro
conf_altloc = get_conformer_altloc(twores)
prevres_altloc, mainres_altloc = get_local_omega_altlocs(twores)
twores_altloc = prevres_altloc or mainres_altloc #default '' evals False
chain = main_residue.parent().parent()
if use_segids:
chain_id = utils.get_segid_as_chainid(chain=chain)
else:
chain_id = chain.id
if chain_id != prev_chain_id: #if we've moved to a new chain...
first_conf_altloc = conf_altloc #...reset reference altloc
prev_chain_id = chain_id
if (conf_altloc != first_conf_altloc) and twores_altloc == '':
#skip non-alternate residues unless this is the first time thru a chain
continue
omega_atoms = get_omega_atoms(twores)
#omega_atoms is the list [CA1 C1 N2 CA2], with None for missing atoms
if None in omega_atoms:
continue
omega = get_omega(omega_atoms)
if omega is None: continue
omega_type = find_omega_type(omega)
if omega_type == OMEGALYZE_TRANS:
is_nontrans = False
else:
is_nontrans = True
self.n_outliers += 1
if main_residue.resname == "PRO": res_type = OMEGA_PRO
else: res_type = OMEGA_GENERAL
self.residue_count[res_type] += 1
self.omega_count[res_type][omega_type] += 1
highest_mc_b = get_highest_mc_b(twores[0].atoms(),twores[1].atoms())
coords = get_center(main_residue)
markup_atoms = []
for omega_atom in omega_atoms:
markup_atoms.append(kin_atom(omega_atom.parent().id_str(), omega_atom.xyz))
result = omega_result(
model_id=twores[0].parent().parent().parent().id,
chain_id=chain_id,
resseq=main_residue.resseq,
icode=main_residue.icode,
resname=main_residue.resname,
altloc=mainres_altloc,
prev_resseq=twores[0].resseq,
prev_icode=twores[0].icode,
prev_resname=twores[0].resname,
prev_altloc=prevres_altloc,
segid=None,
omega=omega,
omega_type=omega_type,
res_type=res_type,
is_nontrans=is_nontrans,
outlier=is_nontrans,
highest_mc_b=highest_mc_b,
xyz=coords,
markup_atoms=markup_atoms)
if is_nontrans or not nontrans_only: #(not nontrans_only or is_nontrans)
self.results.append(result)
if is_nontrans:
i_seqs = main_residue.atoms().extract_i_seq()
assert (not i_seqs.all_eq(0)) #This assert copied from ramalyze
self._outlier_i_seqs.extend(i_seqs)
self.results.sort(key=lambda x: x.model_id+':'+x.id_str())
def _get_count_and_fraction(self, res_type, omega_type):
total = self.residue_count[res_type]
if total == 0:
return 0, 0.0
else:
count = self.omega_count[res_type][omega_type]
fraction = float(count) / total
return count, fraction
def as_kinemage(self):
outlist = []
cisprolist = []
cisnonprolist = []
cisprovectorlist = []
cisnonprovectorlist = []
twistlist = []
twistvectorlist = []
cisprohead = ["@subgroup {Cis proline} dominant master= {Cis proline} off\n",
"@trianglelist {cis pro omega triangles} color= sea\n"]
cisnonprohead = [
"@subgroup {Cis peptides} dominant master= {Cis non-proline}\n",
"@trianglelist {cis nonpro omega triangles} color= lime\n"]
twisthead = [
"@subgroup {Twisted peptides} dominant master= {Twisted peptides}\n",
"@trianglelist {twisted omega triangles} color= yellow\n"]
cisprovectorhead = ["@vectorlist {cis pro omega vectors} color= sea width=3\n"]
cisnonprovectorhead = ["@vectorlist {cis nonpro omega vectors} color= lime width=3\n"]
twistvectorhead=[
"@vectorlist {twisted omega vectors} color= yellow width=3\n"]
for result in self.results:
if result.omega_type == OMEGALYZE_CIS:
if result.res_type == OMEGA_PRO:
cisprolist.append(result.as_kinemage(triangles=True))
cisprovectorlist.append(result.as_kinemage(vectors=True))
else:
cisnonprolist.append(result.as_kinemage(triangles=True))
cisnonprovectorlist.append(result.as_kinemage(vectors=True))
elif result.omega_type == OMEGALYZE_TWISTED:
twistlist.append(result.as_kinemage(triangles=True))
twistvectorlist.append(result.as_kinemage(vectors=True))
if cisprolist:
outlist = outlist + cisprohead + cisprolist + cisprovectorhead + cisprovectorlist
if cisnonprolist:
outlist = outlist + cisnonprohead + cisnonprolist + cisnonprovectorhead + cisnonprovectorlist
if twistlist:
outlist = outlist + twisthead + twistlist + twistvectorhead + twistvectorlist
return "".join(outlist)
#it's my understanding that .join(list) is more efficient than string concat
def as_coot_data(self):
data = []
for result in self.results:
if result.is_nontrans:
data.append((result.chain_id, result.resid, result.resname, result.score, result.xyz))
return data
def show_summary(self, out=sys.stdout, prefix=""):
print >> out, prefix + 'SUMMARY: %i cis prolines out of %i PRO' % (
self.n_cis_proline(),
self.n_proline())
print >> out, prefix + 'SUMMARY: %i twisted prolines out of %i PRO' % (
self.n_twisted_proline(),
self.n_proline())
print >> out, prefix + 'SUMMARY: %i other cis residues out of %i nonPRO' % (
self.n_cis_general(),
self.n_general())
print >> out, prefix + 'SUMMARY: %i other twisted residues out of %i nonPRO' % (
self.n_twisted_general(),
self.n_general())
def summary_only(self, out=sys.stdout, pdbid="pdbid"):
out.write(os.path.basename(pdbid) + ":")
if self.n_cis_proline() == 0:
out.write("0:")
else:
out.write('%.3f' % (self.n_cis_proline()/self.n_proline()*100)+":")
if self.n_twisted_proline() == 0:
out.write("0:")
else:
out.write('%.3f' % (self.n_twisted_proline()/self.n_proline()*100)+":")
out.write("%i" % self.n_proline() + ":")
if self.n_cis_general() == 0:
out.write("0:")
else:
out.write('%.3f' % (self.n_cis_general()/self.n_general()*100)+":")
if self.n_twisted_general() == 0:
out.write("0:")
else:
out.write('%.3f' % (self.n_twisted_general()/self.n_general()*100)+":")
out.write("%i" % self.n_general() + "\n")
def gui_summary(self):
output = []
if self.n_cis_proline() or self.n_proline():
output.append('%i cis prolines out of %i PRO' % (self.n_cis_proline(),self.n_proline()))
if self.n_twisted_proline():
output.append('%i twisted prolines out of %i PRO' % (self.n_twisted_proline(),self.n_proline()))
if self.n_cis_general():
output.append('%i cis residues out of %i nonPRO' % (self.n_cis_general(),self.n_general()))
if self.n_twisted_general():
output.append('%i twisted residues out of %i nonPRO' % (self.n_twisted_general(),self.n_general()))
return "\n".join(output)
def n_proline(self):
return self.residue_count[OMEGA_PRO]
def n_trans_proline(self):
return self.omega_count[OMEGA_PRO][OMEGALYZE_TRANS]
def n_cis_proline(self):
return self.omega_count[OMEGA_PRO][OMEGALYZE_CIS]
def n_twisted_proline(self):
return self.omega_count[OMEGA_PRO][OMEGALYZE_TWISTED]
def n_general(self):
return self.residue_count[OMEGA_GENERAL]
def n_trans_general(self):
return self.omega_count[OMEGA_GENERAL][OMEGALYZE_TRANS]
def n_cis_general(self):
return self.omega_count[OMEGA_GENERAL][OMEGALYZE_CIS]
def n_twisted_general(self):
return self.omega_count[OMEGA_GENERAL][OMEGALYZE_TWISTED]
def write_header(writeto=sys.stdout):
writeto.write("residue:omega:evaluation\n")
def find_omega_type(omega):
if (omega > -30) and (omega < 30): omega_type = OMEGALYZE_CIS
elif (omega < -150) or (omega > 150): omega_type = OMEGALYZE_TRANS
else: omega_type = OMEGALYZE_TWISTED
return omega_type
def get_omega(omega_atoms):
#omega_atoms = [CA1 C1 N2 CA2]
if None in omega_atoms:
return None
import mmtbx.rotamer
return mmtbx.rotamer.omega_from_atoms(omega_atoms[0], omega_atoms[1], omega_atoms[2], omega_atoms[3])
def get_highest_mc_b(prev_atoms, atoms):
highest_mc_b = 0
if (prev_atoms is not None):
for atom in prev_atoms:
if atom is not None and atom.name in [" CA "," C "," N "," O ","CB"]:
if atom.b > highest_mc_b:
highest_mc_b = atom.b
if (atoms is not None):
for atom in atoms:
if atom is not None and atom.name in [" CA "," C "," N "," O ","CB"]:
if atom.b > highest_mc_b:
highest_mc_b = atom.b
return highest_mc_b
def get_center(ag):
for atom in ag.atoms():
if (atom.name == " N "):
return atom.xyz
return ag.atoms().extract_xyz().mean()
def get_conformer_altloc(twores):
return twores[0].parent().altloc #go to conformer level
def get_local_omega_altlocs(twores):
#in conformer world, where threes come from, altlocs are most accurately
# stored at the atom level, in the .id_str()
#look at all atoms in the main residues, plus the atoms used in calculations
# from adjacent residues to find if any have altlocs
prevres_alt = ''
mainres_alt = ''
for atom in twores[1].atoms():
if atom.name not in [" N ", " CA "]:
continue
altchar = atom.id_str()[9:10]
if altchar != ' ':
mainres_alt = altchar
break
for atom in twores[0].atoms():
if atom.name not in [" CA "," C "]:
continue
altchar = atom.id_str()[9:10]
if altchar != ' ':
prevres_alt = altchar
break
return prevres_alt, mainres_alt
def get_omega_atoms(twores):
#atomlist = [CA1 C1 N2 CA2]
atomlist = [None, None, None, None]
for atom in twores[0].atoms():
if atom.name == " CA ":
atomlist[0] = atom
elif atom.name == " C ":
atomlist[1] = atom
for atom in twores[1].atoms():
if atom.name == " N ":
atomlist[2] = atom
elif atom.name == " CA ":
atomlist[3] = atom
return atomlist
def run (args, out=sys.stdout, quiet=False) :
cmdline = iotbx.phil.process_command_line_with_files(
args=args,
master_phil=get_master_phil(),
pdb_file_def="model",
usage_string=usage_string)
params = cmdline.work.extract()
#if (params.model is None or params.help) :
#help printing is handled in iotbx.phil.process_command_line_with_files()
pdb_in = cmdline.get_file(params.model, force_type="pdb")
hierarchy = pdb_in.file_object.hierarchy
hierarchy.atoms().reset_i_seq()
result = omegalyze(
pdb_hierarchy=hierarchy,
nontrans_only=params.nontrans_only,
out=out,
quiet=quiet)
if params.kinemage:
print >> out, result.as_kinemage()
elif params.oneline:
result.summary_only(out=out, pdbid=params.model)
elif params.text:
result.show_old_output(out=out, verbose=True)
if (__name__ == "__main__") :
run(sys.argv[1:])
| []
| []
| [
"LIBTBX_DISPATCHER_NAME"
]
| [] | ["LIBTBX_DISPATCHER_NAME"] | python | 1 | 0 | |
pkg/cli/server/server.go | package server
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"strings"
"time"
systemd "github.com/coreos/go-systemd/daemon"
"github.com/erikdubbelboer/gspt"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/agent"
"github.com/rancher/k3s/pkg/cli/cmds"
"github.com/rancher/k3s/pkg/clientaccess"
"github.com/rancher/k3s/pkg/datadir"
"github.com/rancher/k3s/pkg/etcd"
"github.com/rancher/k3s/pkg/netutil"
"github.com/rancher/k3s/pkg/rootless"
"github.com/rancher/k3s/pkg/server"
"github.com/rancher/k3s/pkg/token"
"github.com/rancher/k3s/pkg/util"
"github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler/pkg/signals"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
utilnet "k8s.io/apimachinery/pkg/util/net"
kubeapiserverflag "k8s.io/component-base/cli/flag"
"k8s.io/kubernetes/pkg/controlplane"
utilsnet "k8s.io/utils/net"
_ "github.com/go-sql-driver/mysql" // ensure we have mysql
_ "github.com/lib/pq" // ensure we have postgres
_ "github.com/mattn/go-sqlite3" // ensure we have sqlite
)
func Run(app *cli.Context) error {
if err := cmds.InitLogging(); err != nil {
return err
}
return run(app, &cmds.ServerConfig, server.CustomControllers{}, server.CustomControllers{})
}
func RunWithControllers(app *cli.Context, leaderControllers server.CustomControllers, controllers server.CustomControllers) error {
if err := cmds.InitLogging(); err != nil {
return err
}
return run(app, &cmds.ServerConfig, leaderControllers, controllers)
}
func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomControllers, controllers server.CustomControllers) error {
var (
err error
)
// hide process arguments from ps output, since they may contain
// database credentials or other secrets.
gspt.SetProcTitle(os.Args[0] + " server")
if !cfg.DisableAgent && os.Getuid() != 0 && !cfg.Rootless {
return fmt.Errorf("must run as root unless --disable-agent is specified")
}
if cfg.Rootless {
dataDir, err := datadir.LocalHome(cfg.DataDir, true)
if err != nil {
return err
}
cfg.DataDir = dataDir
if err := rootless.Rootless(dataDir); err != nil {
return err
}
}
if cfg.Token == "" && cfg.ClusterSecret != "" {
cfg.Token = cfg.ClusterSecret
}
serverConfig := server.Config{}
serverConfig.DisableAgent = cfg.DisableAgent
serverConfig.ControlConfig.Token = cfg.Token
serverConfig.ControlConfig.AgentToken = cfg.AgentToken
serverConfig.ControlConfig.JoinURL = cfg.ServerURL
if cfg.AgentTokenFile != "" {
serverConfig.ControlConfig.AgentToken, err = token.ReadFile(cfg.AgentTokenFile)
if err != nil {
return err
}
}
if cfg.TokenFile != "" {
serverConfig.ControlConfig.Token, err = token.ReadFile(cfg.TokenFile)
if err != nil {
return err
}
}
serverConfig.ControlConfig.DataDir = cfg.DataDir
serverConfig.ControlConfig.KubeConfigOutput = cfg.KubeConfigOutput
serverConfig.ControlConfig.KubeConfigMode = cfg.KubeConfigMode
serverConfig.Rootless = cfg.Rootless
serverConfig.ControlConfig.SANs = knownIPs(cfg.TLSSan)
serverConfig.ControlConfig.BindAddress = cfg.BindAddress
serverConfig.ControlConfig.SupervisorPort = cfg.SupervisorPort
serverConfig.ControlConfig.HTTPSPort = cfg.HTTPSPort
serverConfig.ControlConfig.APIServerPort = cfg.APIServerPort
serverConfig.ControlConfig.APIServerBindAddress = cfg.APIServerBindAddress
serverConfig.ControlConfig.ExtraAPIArgs = cfg.ExtraAPIArgs
serverConfig.ControlConfig.ExtraControllerArgs = cfg.ExtraControllerArgs
serverConfig.ControlConfig.ExtraSchedulerAPIArgs = cfg.ExtraSchedulerArgs
serverConfig.ControlConfig.ClusterDomain = cfg.ClusterDomain
serverConfig.ControlConfig.Datastore.Endpoint = cfg.DatastoreEndpoint
serverConfig.ControlConfig.Datastore.CAFile = cfg.DatastoreCAFile
serverConfig.ControlConfig.Datastore.CertFile = cfg.DatastoreCertFile
serverConfig.ControlConfig.Datastore.KeyFile = cfg.DatastoreKeyFile
serverConfig.ControlConfig.AdvertiseIP = cfg.AdvertiseIP
serverConfig.ControlConfig.AdvertisePort = cfg.AdvertisePort
serverConfig.ControlConfig.FlannelBackend = cfg.FlannelBackend
serverConfig.ControlConfig.ExtraCloudControllerArgs = cfg.ExtraCloudControllerArgs
serverConfig.ControlConfig.DisableCCM = cfg.DisableCCM
serverConfig.ControlConfig.DisableNPC = cfg.DisableNPC
serverConfig.ControlConfig.DisableHelmController = cfg.DisableHelmController
serverConfig.ControlConfig.DisableKubeProxy = cfg.DisableKubeProxy
serverConfig.ControlConfig.DisableETCD = cfg.DisableETCD
serverConfig.ControlConfig.DisableAPIServer = cfg.DisableAPIServer
serverConfig.ControlConfig.DisableScheduler = cfg.DisableScheduler
serverConfig.ControlConfig.DisableControllerManager = cfg.DisableControllerManager
serverConfig.ControlConfig.ClusterInit = cfg.ClusterInit
serverConfig.ControlConfig.EncryptSecrets = cfg.EncryptSecrets
serverConfig.ControlConfig.EtcdExposeMetrics = cfg.EtcdExposeMetrics
serverConfig.ControlConfig.EtcdDisableSnapshots = cfg.EtcdDisableSnapshots
if !cfg.EtcdDisableSnapshots {
serverConfig.ControlConfig.EtcdSnapshotName = cfg.EtcdSnapshotName
serverConfig.ControlConfig.EtcdSnapshotCron = cfg.EtcdSnapshotCron
serverConfig.ControlConfig.EtcdSnapshotDir = cfg.EtcdSnapshotDir
serverConfig.ControlConfig.EtcdSnapshotRetention = cfg.EtcdSnapshotRetention
serverConfig.ControlConfig.EtcdS3 = cfg.EtcdS3
serverConfig.ControlConfig.EtcdS3Endpoint = cfg.EtcdS3Endpoint
serverConfig.ControlConfig.EtcdS3EndpointCA = cfg.EtcdS3EndpointCA
serverConfig.ControlConfig.EtcdS3SkipSSLVerify = cfg.EtcdS3SkipSSLVerify
serverConfig.ControlConfig.EtcdS3AccessKey = cfg.EtcdS3AccessKey
serverConfig.ControlConfig.EtcdS3SecretKey = cfg.EtcdS3SecretKey
serverConfig.ControlConfig.EtcdS3BucketName = cfg.EtcdS3BucketName
serverConfig.ControlConfig.EtcdS3Region = cfg.EtcdS3Region
serverConfig.ControlConfig.EtcdS3Folder = cfg.EtcdS3Folder
} else {
logrus.Info("ETCD snapshots are disabled")
}
if cfg.ClusterResetRestorePath != "" && !cfg.ClusterReset {
return errors.New("invalid flag use; --cluster-reset required with --cluster-reset-restore-path")
}
// make sure components are disabled so we only perform a restore
// and bail out
if cfg.ClusterResetRestorePath != "" && cfg.ClusterReset {
serverConfig.ControlConfig.ClusterInit = true
serverConfig.ControlConfig.DisableAPIServer = true
serverConfig.ControlConfig.DisableControllerManager = true
serverConfig.ControlConfig.DisableScheduler = true
serverConfig.ControlConfig.DisableCCM = true
}
serverConfig.ControlConfig.ClusterReset = cfg.ClusterReset
serverConfig.ControlConfig.ClusterResetRestorePath = cfg.ClusterResetRestorePath
serverConfig.ControlConfig.SystemDefaultRegistry = cfg.SystemDefaultRegistry
if serverConfig.ControlConfig.SupervisorPort == 0 {
serverConfig.ControlConfig.SupervisorPort = serverConfig.ControlConfig.HTTPSPort
}
if serverConfig.ControlConfig.DisableETCD && serverConfig.ControlConfig.JoinURL == "" {
return errors.New("invalid flag use; --server is required with --disable-etcd")
}
if serverConfig.ControlConfig.DisableAPIServer {
// Servers without a local apiserver need to connect to the apiserver via the proxy load-balancer.
serverConfig.ControlConfig.APIServerPort = cmds.AgentConfig.LBServerPort
// If the supervisor and externally-facing apiserver are not on the same port, the proxy will
// have a separate load-balancer for the apiserver that we need to use instead.
if serverConfig.ControlConfig.SupervisorPort != serverConfig.ControlConfig.HTTPSPort {
serverConfig.ControlConfig.APIServerPort = cmds.AgentConfig.LBServerPort - 1
}
}
if cmds.AgentConfig.FlannelIface != "" && len(cmds.AgentConfig.NodeIP) == 0 {
cmds.AgentConfig.NodeIP.Set(netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface))
}
if serverConfig.ControlConfig.PrivateIP == "" && len(cmds.AgentConfig.NodeIP) != 0 {
// ignoring the error here is fine since etcd will fall back to the interface's IPv4 address
serverConfig.ControlConfig.PrivateIP, _ = util.GetFirst4String(cmds.AgentConfig.NodeIP)
}
// if not set, try setting advertise-ip from agent node-external-ip
if serverConfig.ControlConfig.AdvertiseIP == "" && len(cmds.AgentConfig.NodeExternalIP) != 0 {
serverConfig.ControlConfig.AdvertiseIP, _ = util.GetFirst4String(cmds.AgentConfig.NodeExternalIP)
}
// if not set, try setting advertise-up from agent node-ip
if serverConfig.ControlConfig.AdvertiseIP == "" && len(cmds.AgentConfig.NodeIP) != 0 {
serverConfig.ControlConfig.AdvertiseIP, _ = util.GetFirst4String(cmds.AgentConfig.NodeIP)
}
// if we ended up with any advertise-ips, ensure they're added to the SAN list;
// note that kube-apiserver does not support dual-stack advertise-ip as of 1.21.0:
/// https://github.com/kubernetes/kubeadm/issues/1612#issuecomment-772583989
if serverConfig.ControlConfig.AdvertiseIP != "" {
serverConfig.ControlConfig.SANs = append(serverConfig.ControlConfig.SANs, serverConfig.ControlConfig.AdvertiseIP)
}
// configure ClusterIPRanges
if len(cmds.ServerConfig.ClusterCIDR) == 0 {
cmds.ServerConfig.ClusterCIDR.Set("10.42.0.0/16")
}
for _, cidr := range cmds.ServerConfig.ClusterCIDR {
for _, v := range strings.Split(cidr, ",") {
_, parsed, err := net.ParseCIDR(v)
if err != nil {
return errors.Wrapf(err, "invalid cluster-cidr %s", v)
}
serverConfig.ControlConfig.ClusterIPRanges = append(serverConfig.ControlConfig.ClusterIPRanges, parsed)
}
}
// set ClusterIPRange to the first IPv4 block, for legacy clients
clusterIPRange, err := util.GetFirst4Net(serverConfig.ControlConfig.ClusterIPRanges)
if err != nil {
return errors.Wrap(err, "cannot configure IPv4 cluster-cidr")
}
serverConfig.ControlConfig.ClusterIPRange = clusterIPRange
// configure ServiceIPRanges
if len(cmds.ServerConfig.ServiceCIDR) == 0 {
cmds.ServerConfig.ServiceCIDR.Set("10.43.0.0/16")
}
for _, cidr := range cmds.ServerConfig.ServiceCIDR {
for _, v := range strings.Split(cidr, ",") {
_, parsed, err := net.ParseCIDR(v)
if err != nil {
return errors.Wrapf(err, "invalid service-cidr %s", v)
}
serverConfig.ControlConfig.ServiceIPRanges = append(serverConfig.ControlConfig.ServiceIPRanges, parsed)
}
}
// set ServiceIPRange to the first IPv4 block, for legacy clients
serviceIPRange, err := util.GetFirst4Net(serverConfig.ControlConfig.ServiceIPRanges)
if err != nil {
return errors.Wrap(err, "cannot configure IPv4 service-cidr")
}
serverConfig.ControlConfig.ServiceIPRange = serviceIPRange
serverConfig.ControlConfig.ServiceNodePortRange, err = utilnet.ParsePortRange(cfg.ServiceNodePortRange)
if err != nil {
return errors.Wrapf(err, "invalid port range %s", cfg.ServiceNodePortRange)
}
// the apiserver service does not yet support dual-stack operation
_, apiServerServiceIP, err := controlplane.ServiceIPRange(*serverConfig.ControlConfig.ServiceIPRange)
if err != nil {
return err
}
serverConfig.ControlConfig.SANs = append(serverConfig.ControlConfig.SANs, apiServerServiceIP.String())
// If cluster-dns CLI arg is not set, we set ClusterDNS address to be the first IPv4 ServiceCIDR network + 10,
// i.e. when you set service-cidr to 192.168.0.0/16 and don't provide cluster-dns, it will be set to 192.168.0.10
// If there are no IPv4 ServiceCIDRs, an error will be raised.
if len(cmds.ServerConfig.ClusterDNS) == 0 {
clusterDNS, err := utilsnet.GetIndexedIP(serverConfig.ControlConfig.ServiceIPRange, 10)
if err != nil {
return errors.Wrap(err, "cannot configure default cluster-dns address")
}
serverConfig.ControlConfig.ClusterDNS = clusterDNS
serverConfig.ControlConfig.ClusterDNSs = []net.IP{serverConfig.ControlConfig.ClusterDNS}
} else {
for _, ip := range cmds.ServerConfig.ClusterDNS {
for _, v := range strings.Split(ip, ",") {
parsed := net.ParseIP(v)
if parsed == nil {
return fmt.Errorf("invalid cluster-dns address %s", v)
}
serverConfig.ControlConfig.ClusterDNSs = append(serverConfig.ControlConfig.ClusterDNSs, parsed)
}
}
// Set ClusterDNS to the first IPv4 address, for legacy clients
clusterDNS, err := util.GetFirst4(serverConfig.ControlConfig.ClusterDNSs)
if err != nil {
return errors.Wrap(err, "cannot configure IPv4 cluster-dns address")
}
serverConfig.ControlConfig.ClusterDNS = clusterDNS
}
if err := validateNetworkConfiguration(serverConfig); err != nil {
return err
}
if cfg.DefaultLocalStoragePath == "" {
dataDir, err := datadir.LocalHome(cfg.DataDir, false)
if err != nil {
return err
}
serverConfig.ControlConfig.DefaultLocalStoragePath = filepath.Join(dataDir, "/storage")
} else {
serverConfig.ControlConfig.DefaultLocalStoragePath = cfg.DefaultLocalStoragePath
}
serverConfig.ControlConfig.Skips = map[string]bool{}
for _, noDeploy := range app.StringSlice("no-deploy") {
for _, v := range strings.Split(noDeploy, ",") {
v = strings.TrimSpace(v)
serverConfig.ControlConfig.Skips[v] = true
}
}
serverConfig.ControlConfig.Disables = map[string]bool{}
for _, disable := range app.StringSlice("disable") {
for _, v := range strings.Split(disable, ",") {
v = strings.TrimSpace(v)
serverConfig.ControlConfig.Skips[v] = true
serverConfig.ControlConfig.Disables[v] = true
}
}
if serverConfig.ControlConfig.Skips["servicelb"] {
serverConfig.DisableServiceLB = true
}
if serverConfig.ControlConfig.DisableCCM {
serverConfig.ControlConfig.Skips["ccm"] = true
serverConfig.ControlConfig.Disables["ccm"] = true
}
tlsMinVersionArg := getArgValueFromList("tls-min-version", cfg.ExtraAPIArgs)
serverConfig.ControlConfig.TLSMinVersion, err = kubeapiserverflag.TLSVersion(tlsMinVersionArg)
if err != nil {
return errors.Wrap(err, "invalid tls-min-version")
}
serverConfig.StartupHooks = append(serverConfig.StartupHooks, cfg.StartupHooks...)
serverConfig.LeaderControllers = append(serverConfig.LeaderControllers, leaderControllers...)
serverConfig.Controllers = append(serverConfig.Controllers, controllers...)
// TLS config based on mozilla ssl-config generator
// https://ssl-config.mozilla.org/#server=golang&version=1.13.6&config=intermediate&guideline=5.4
// Need to disable the TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 Cipher for TLS1.2
tlsCipherSuitesArg := getArgValueFromList("tls-cipher-suites", cfg.ExtraAPIArgs)
tlsCipherSuites := strings.Split(tlsCipherSuitesArg, ",")
for i := range tlsCipherSuites {
tlsCipherSuites[i] = strings.TrimSpace(tlsCipherSuites[i])
}
if len(tlsCipherSuites) == 0 || tlsCipherSuites[0] == "" {
tlsCipherSuites = []string{
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
}
}
serverConfig.ControlConfig.TLSCipherSuites, err = kubeapiserverflag.TLSCipherSuites(tlsCipherSuites)
if err != nil {
return errors.Wrap(err, "invalid tls-cipher-suites")
}
logrus.Info("Starting " + version.Program + " " + app.App.Version)
ctx := signals.SetupSignalHandler(context.Background())
if err := server.StartServer(ctx, &serverConfig); err != nil {
return err
}
go func() {
if !serverConfig.ControlConfig.DisableAPIServer {
<-serverConfig.ControlConfig.Runtime.APIServerReady
logrus.Info("Kube API server is now running")
} else {
<-serverConfig.ControlConfig.Runtime.ETCDReady
logrus.Info("ETCD server is now running")
}
logrus.Info(version.Program + " is up and running")
if cfg.DisableAgent && os.Getenv("NOTIFY_SOCKET") != "" {
systemd.SdNotify(true, "READY=1\n")
}
}()
if cfg.DisableAgent {
<-ctx.Done()
return nil
}
ip := serverConfig.ControlConfig.BindAddress
if ip == "" {
ip = "127.0.0.1"
}
url := fmt.Sprintf("https://%s:%d", ip, serverConfig.ControlConfig.SupervisorPort)
token, err := clientaccess.FormatToken(serverConfig.ControlConfig.Runtime.AgentToken, serverConfig.ControlConfig.Runtime.ServerCA)
if err != nil {
return err
}
agentConfig := cmds.AgentConfig
agentConfig.Debug = app.GlobalBool("debug")
agentConfig.DataDir = filepath.Dir(serverConfig.ControlConfig.DataDir)
agentConfig.ServerURL = url
agentConfig.Token = token
agentConfig.DisableLoadBalancer = !serverConfig.ControlConfig.DisableAPIServer
agentConfig.ETCDAgent = serverConfig.ControlConfig.DisableAPIServer
agentConfig.ClusterReset = serverConfig.ControlConfig.ClusterReset
agentConfig.Rootless = cfg.Rootless
if agentConfig.Rootless {
// let agent specify Rootless kubelet flags, but not unshare twice
agentConfig.RootlessAlreadyUnshared = true
}
if serverConfig.ControlConfig.DisableAPIServer {
// initialize the apiAddress Channel for receiving the api address from etcd
agentConfig.APIAddressCh = make(chan string, 1)
setAPIAddressChannel(ctx, &serverConfig, &agentConfig)
defer close(agentConfig.APIAddressCh)
}
return agent.Run(ctx, agentConfig)
}
// validateNetworkConfig ensures that the network configuration values make sense.
func validateNetworkConfiguration(serverConfig server.Config) error {
// Dual-stack operation requires fairly extensive manual configuration at the moment - do some
// preflight checks to make sure that the user isn't trying to use flannel/npc, or trying to
// enable dual-stack DNS (which we don't currently support since it's not easy to template)
dualCluster, err := utilsnet.IsDualStackCIDRs(serverConfig.ControlConfig.ClusterIPRanges)
if err != nil {
return errors.Wrap(err, "failed to validate cluster-cidr")
}
dualService, err := utilsnet.IsDualStackCIDRs(serverConfig.ControlConfig.ServiceIPRanges)
if err != nil {
return errors.Wrap(err, "failed to validate service-cidr")
}
dualDNS, err := utilsnet.IsDualStackIPs(serverConfig.ControlConfig.ClusterDNSs)
if err != nil {
return errors.Wrap(err, "failed to validate cluster-dns")
}
if (serverConfig.ControlConfig.FlannelBackend != "none" || serverConfig.ControlConfig.DisableNPC == false) && (dualCluster || dualService) {
return errors.New("flannel CNI and network policy enforcement are not compatible with dual-stack operation; server must be restarted with --flannel-backend=none --disable-network-policy and an alternative CNI plugin deployed")
}
if dualDNS == true {
return errors.New("dual-stack cluster-dns is not supported")
}
return nil
}
func knownIPs(ips []string) []string {
ips = append(ips, "127.0.0.1")
ip, err := utilnet.ChooseHostInterface()
if err == nil {
ips = append(ips, ip.String())
}
return ips
}
func getArgValueFromList(searchArg string, argList []string) string {
var value string
for _, arg := range argList {
splitArg := strings.SplitN(arg, "=", 2)
if splitArg[0] == searchArg {
value = splitArg[1]
// break if we found our value
break
}
}
return value
}
// setAPIAddressChannel will try to get the api address key from etcd and when it succeed it will
// set the APIAddressCh channel with its value, the function works for both k3s and rke2 in case
// of k3s we block returning back to the agent.Run until we get the api address, however in rke2
// the code will not block operation and will run the operation in a goroutine
func setAPIAddressChannel(ctx context.Context, serverConfig *server.Config, agentConfig *cmds.Agent) {
// start a goroutine to check for the server ip if set from etcd in case of rke2
if serverConfig.ControlConfig.HTTPSPort != serverConfig.ControlConfig.SupervisorPort {
go getAPIAddressFromEtcd(ctx, serverConfig, agentConfig)
return
}
getAPIAddressFromEtcd(ctx, serverConfig, agentConfig)
}
func getAPIAddressFromEtcd(ctx context.Context, serverConfig *server.Config, agentConfig *cmds.Agent) {
t := time.NewTicker(5 * time.Second)
defer t.Stop()
for range t.C {
serverAddress, err := etcd.GetAPIServerURLFromETCD(ctx, &serverConfig.ControlConfig)
if err == nil {
agentConfig.ServerURL = "https://" + serverAddress
agentConfig.APIAddressCh <- agentConfig.ServerURL
break
}
logrus.Warn(err)
}
}
| [
"\"NOTIFY_SOCKET\""
]
| []
| [
"NOTIFY_SOCKET"
]
| [] | ["NOTIFY_SOCKET"] | go | 1 | 0 | |
pkg/globalconfig/global_config.go | package globalconfig
import (
"context"
"fmt"
"github.com/drud/ddev/pkg/nodeps"
"github.com/drud/ddev/pkg/output"
"github.com/mitchellh/go-homedir"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
)
// DdevGlobalConfigName is the name of the global config file.
const DdevGlobalConfigName = "global_config.yaml"
var (
// DdevGlobalConfig is the currently active global configuration struct
DdevGlobalConfig GlobalConfig
)
func init() {
DdevGlobalConfig.ProjectList = make(map[string]*ProjectInfo)
}
type ProjectInfo struct {
AppRoot string `yaml:"approot"`
UsedHostPorts []string `yaml:"used_host_ports,omitempty,flow"`
}
// GlobalConfig is the struct defining ddev's global config
type GlobalConfig struct {
OmitContainersGlobal []string `yaml:"omit_containers,flow"`
NFSMountEnabledGlobal bool `yaml:"nfs_mount_enabled"`
MutagenEnabledGlobal bool `yaml:"mutagen_enabled"`
InstrumentationOptIn bool `yaml:"instrumentation_opt_in"`
RouterBindAllInterfaces bool `yaml:"router_bind_all_interfaces"`
InternetDetectionTimeout int64 `yaml:"internet_detection_timeout_ms"`
DeveloperMode bool `yaml:"developer_mode,omitempty"`
InstrumentationUser string `yaml:"instrumentation_user,omitempty"`
LastStartedVersion string `yaml:"last_started_version"`
MkcertCARoot string `yaml:"mkcert_caroot"`
UseHardenedImages bool `yaml:"use_hardened_images"`
UseLetsEncrypt bool `yaml:"use_letsencrypt"`
LetsEncryptEmail string `yaml:"letsencrypt_email"`
AutoRestartContainers bool `yaml:"auto_restart_containers"`
FailOnHookFailGlobal bool `yaml:"fail_on_hook_fail"`
WebEnvironment []string `yaml:"web_environment"`
DisableHTTP2 bool `yaml:"disable_http2"`
ProjectList map[string]*ProjectInfo `yaml:"project_info"`
}
// GetGlobalConfigPath gets the path to global config file
func GetGlobalConfigPath() string {
return filepath.Join(GetGlobalDdevDir(), DdevGlobalConfigName)
}
// GetMutagenDir returns the directory of the mutagen config and binary
func GetMutagenDir() string {
return filepath.Join(GetGlobalDdevDir(), "bin")
}
// GetMutagenPath gets the full path to the mutagen binary
func GetMutagenPath() string {
mutagenBinary := "mutagen"
if runtime.GOOS == "windows" {
mutagenBinary = mutagenBinary + ".exe"
}
return filepath.Join(GetMutagenDir(), mutagenBinary)
}
// ValidateGlobalConfig validates global config
func ValidateGlobalConfig() error {
if !IsValidOmitContainers(DdevGlobalConfig.OmitContainersGlobal) {
return fmt.Errorf("Invalid omit_containers: %s, must contain only %s", strings.Join(DdevGlobalConfig.OmitContainersGlobal, ","), strings.Join(GetValidOmitContainers(), ",")).(InvalidOmitContainers)
}
return nil
}
// ReadGlobalConfig reads the global config file into DdevGlobalConfig
func ReadGlobalConfig() error {
globalConfigFile := GetGlobalConfigPath()
// Can't use fileutil.FileExists() here because of import cycle.
if _, err := os.Stat(globalConfigFile); err != nil {
// ~/.ddev doesn't exist and running as root (only ddev hostname could do this)
// Then create global config.
if os.Geteuid() == 0 {
logrus.Warning("not reading global config file because running with root privileges")
return nil
}
if os.IsNotExist(err) {
err := WriteGlobalConfig(DdevGlobalConfig)
if err != nil {
return err
}
} else {
return err
}
}
source, err := os.ReadFile(globalConfigFile)
if err != nil {
return fmt.Errorf("Unable to read ddev global config file %s: %v", source, err)
}
// ReadConfig config values from file.
DdevGlobalConfig = GlobalConfig{InternetDetectionTimeout: nodeps.InternetDetectionTimeoutDefault}
err = yaml.Unmarshal(source, &DdevGlobalConfig)
if err != nil {
return err
}
if DdevGlobalConfig.ProjectList == nil {
DdevGlobalConfig.ProjectList = map[string]*ProjectInfo{}
}
// Set/read the CAROOT if it's unset or different from $CAROOT (perhaps $CAROOT changed)
caRootEnv := os.Getenv("CAROOT")
if DdevGlobalConfig.MkcertCARoot == "" || (caRootEnv != "" && caRootEnv != DdevGlobalConfig.MkcertCARoot) {
DdevGlobalConfig.MkcertCARoot = readCAROOT()
}
// This is added just so we can see it in global; not checked.
// Make sure that LastStartedVersion always has a valid value
if DdevGlobalConfig.LastStartedVersion == "" {
DdevGlobalConfig.LastStartedVersion = "v0.0"
}
// If they set the internetdetectiontimeout below default, just reset to default
// and ignore the setting.
if DdevGlobalConfig.InternetDetectionTimeout < nodeps.InternetDetectionTimeoutDefault {
DdevGlobalConfig.InternetDetectionTimeout = nodeps.InternetDetectionTimeoutDefault
}
err = ValidateGlobalConfig()
if err != nil {
return err
}
return nil
}
// WriteGlobalConfig writes the global config into ~/.ddev.
func WriteGlobalConfig(config GlobalConfig) error {
err := ValidateGlobalConfig()
if err != nil {
return err
}
cfgbytes, err := yaml.Marshal(config)
if err != nil {
return err
}
// Append current image information
instructions := `
# You can turn off usage of the dba (phpmyadmin) container and/or
# ddev-ssh-agent containers with
# omit_containers["dba", "ddev-ssh-agent"]
# and you can opt in or out of sending instrumentation the ddev developers with
# instrumentation_opt_in: true # or false
#
# You can enable nfs mounting for all projects with
# nfs_mount_enabled: true
#
# You can inject environment variables into the web container with:
# web_environment:
# - SOMEENV=somevalue
# - SOMEOTHERENV=someothervalue
# In unusual cases the default value to wait to detect internet availability is too short.
# You can adjust this value higher to make it less likely that ddev will declare internet
# unavailable, but ddev may wait longer on some commands. This should not be set below the default 750
# ddev will ignore low values, as they're not useful
# internet_detection_timeout_ms: 750
# You can enable 'ddev start' to be interrupted by a failing hook with
# fail_on_hook_fail: true
# disable_http2: false
# Disable http2 on ddev-router if true
# instrumentation_user: <your_username> # can be used to give ddev specific info about who you are
# developer_mode: true # (defaults to false) is not used widely at this time.
# router_bind_all_interfaces: false # (defaults to false)
# If true, ddev-router will bind http/s, PHPMyAdmin, and MailHog ports on all
# network interfaces instead of just localhost, so others on your local network can
# access those ports. Note that this exposes the PHPMyAdmin and MailHog ports as well, which
# can be a major security issue, so choose wisely. Consider omit_containers[dba] to avoid
# exposing PHPMyAdmin.
# use_hardened_images: false
# With hardened images a container that is exposed to the internet is
# a harder target, although not as hard as a fully-secured host.
# sudo is removed, mailhog is removed, and since the web container
# is run only as the owning user, only project files might be changed
# if a CMS or PHP bug allowed creating or altering files, and
# permissions should not allow escalation.
# Let's Encrypt:
# This integration is entirely experimental; your mileage may vary.
# * Your host must be directly internet-connected.
# * DNS for the hostname must be set to point to the host in question
# * You must have router_bind_all_interfaces: true or else the Let's Encrypt certbot
# process will not be able to process the IP address of the host (and nobody will be able to access your site)
# * You will need to add a startup script to start your sites after a host reboot.
# * If using several sites at a single top-level domain, you'll probably want to set
# project_tld to that top-level domain. Otherwise, you can use additional-hostnames or
# additional_fqdns.
#
# use_letsencrypt: false
# (Experimental, only useful on an internet-based server)
# Set to true if certificates are to be obtained via certbot on https://letsencrypt.org/
# letsencrypt_email: <email>
# Email to be used for experimental letsencrypt certificates
# auto_restart_containers: false
# Experimental
# If true, attempt to automatically restart projects/containers after reboot or docker restart.
# fail_on_hook_fail: false
# Decide whether 'ddev start' should be interrupted by a failing hook
`
cfgbytes = append(cfgbytes, instructions...)
err = os.WriteFile(GetGlobalConfigPath(), cfgbytes, 0644)
if err != nil {
return err
}
return nil
}
// GetGlobalDdevDir returns ~/.ddev, the global caching directory
func GetGlobalDdevDir() string {
userHome, err := homedir.Dir()
if err != nil {
logrus.Fatal("could not get home directory for current user. is it set?")
}
ddevDir := filepath.Join(userHome, ".ddev")
// Create the directory if it is not already present.
if _, err := os.Stat(ddevDir); os.IsNotExist(err) {
// If they happen to be running as root/sudo, we won't create the directory
// but act like we did. This should only happen for ddev hostname, which
// doesn't need config or access to this dir anyway.
if os.Geteuid() == 0 {
return ddevDir
}
err = os.MkdirAll(ddevDir, 0755)
if err != nil {
logrus.Fatalf("Failed to create required directory %s, err: %v", ddevDir, err)
}
}
// config.yaml is not allowed in ~/.ddev, can only result in disaster
globalConfigYaml := filepath.Join(ddevDir, "config.yaml")
if _, err := os.Stat(globalConfigYaml); err == nil {
_ = os.Remove(filepath.Join(globalConfigYaml))
}
return ddevDir
}
// IsValidOmitContainers is a helper function to determine if a the OmitContainers array is valid
func IsValidOmitContainers(containerList []string) bool {
for _, containerName := range containerList {
if _, ok := ValidOmitContainers[containerName]; !ok {
return false
}
}
return true
}
// GetValidOmitContainers is a helper function that returns a list of valid containers for OmitContainers.
func GetValidOmitContainers() []string {
s := make([]string, 0, len(ValidOmitContainers))
for p := range ValidOmitContainers {
s = append(s, p)
}
return s
}
// HostPostIsAllocated returns the project name that has allocated
// the port, or empty string.
func HostPostIsAllocated(port string) string {
for project, item := range DdevGlobalConfig.ProjectList {
if nodeps.ArrayContainsString(item.UsedHostPorts, port) {
return project
}
}
return ""
}
// CheckHostPortsAvailable checks GlobalDdev UsedHostPorts to see if requested ports are available.
func CheckHostPortsAvailable(projectName string, ports []string) error {
for _, port := range ports {
allocatedProject := HostPostIsAllocated(port)
if allocatedProject != projectName && allocatedProject != "" {
return fmt.Errorf("host port %s has already been allocated to project %s", port, allocatedProject)
}
}
return nil
}
// GetFreePort gets an ephemeral port currently available, but also not
// listed in DdevGlobalConfig.UsedHostPorts
func GetFreePort(localIPAddr string) (string, error) {
// Limit tries arbitrarily. It will normally succeed on first try.
for i := 1; i < 1000; i++ {
// From https://github.com/phayes/freeport/blob/master/freeport.go#L8
// Ignores that the actual listener may be on a docker toolbox interface,
// so this is just a heuristic.
addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0")
if err != nil {
return "", err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return "", err
}
port := strconv.Itoa(l.Addr().(*net.TCPAddr).Port)
// nolint: errcheck
l.Close()
// In the case of Docker Toolbox, the actual listening IP may be something else
// like 192.168.99.100, so check that to make sure it's not currently occupied.
conn, _ := net.Dial("tcp", localIPAddr+":"+port)
if conn != nil {
continue
}
if HostPostIsAllocated(port) != "" {
continue
}
return port, nil
}
return "-1", fmt.Errorf("GetFreePort() failed to find a free port")
}
// ReservePorts adds the ProjectInfo if necessary and assigns the reserved ports
func ReservePorts(projectName string, ports []string) error {
// If the project doesn't exist, add it.
_, ok := DdevGlobalConfig.ProjectList[projectName]
if !ok {
DdevGlobalConfig.ProjectList[projectName] = &ProjectInfo{}
}
DdevGlobalConfig.ProjectList[projectName].UsedHostPorts = ports
err := WriteGlobalConfig(DdevGlobalConfig)
return err
}
// SetProjectAppRoot sets the approot in the ProjectInfo of global config
func SetProjectAppRoot(projectName string, appRoot string) error {
// If the project doesn't exist, add it.
_, ok := DdevGlobalConfig.ProjectList[projectName]
if !ok {
DdevGlobalConfig.ProjectList[projectName] = &ProjectInfo{}
}
// Can't use fileutil.FileExists because of import cycle.
if _, err := os.Stat(appRoot); err != nil {
return fmt.Errorf("project %s project root %s does not exist", projectName, appRoot)
}
if DdevGlobalConfig.ProjectList[projectName].AppRoot != "" && DdevGlobalConfig.ProjectList[projectName].AppRoot != appRoot {
return fmt.Errorf("project %s project root is already set to %s, refusing to change it to %s; you can `ddev stop --unlist %s` and start again if the listed project root is in error", projectName, DdevGlobalConfig.ProjectList[projectName].AppRoot, appRoot, projectName)
}
DdevGlobalConfig.ProjectList[projectName].AppRoot = appRoot
err := WriteGlobalConfig(DdevGlobalConfig)
return err
}
// GetProject returns a project given name provided,
// or nil if not found.
func GetProject(projectName string) *ProjectInfo {
project, ok := DdevGlobalConfig.ProjectList[projectName]
if !ok {
return nil
}
return project
}
// RemoveProjectInfo removes the ProjectInfo line for a project
func RemoveProjectInfo(projectName string) error {
_, ok := DdevGlobalConfig.ProjectList[projectName]
if ok {
delete(DdevGlobalConfig.ProjectList, projectName)
err := WriteGlobalConfig(DdevGlobalConfig)
if err != nil {
return err
}
}
return nil
}
// GetGlobalProjectList returns the global project list map
func GetGlobalProjectList() map[string]*ProjectInfo {
return DdevGlobalConfig.ProjectList
}
// GetCAROOT is just a wrapper on global config
func GetCAROOT() string {
return DdevGlobalConfig.MkcertCARoot
}
// readCAROOT() verifies that the mkcert command is available and its CA keys readable.
// 1. Find out CAROOT
// 2. Look there to see if key/crt are readable
// 3. If not, see if mkcert is even available, return empty
func readCAROOT() string {
_, err := exec.LookPath("mkcert")
if err != nil {
return ""
}
out, err := exec.Command("mkcert", "-CAROOT").Output()
if err != nil {
return ""
}
root := strings.Trim(string(out), "\n")
if !fileIsReadable(filepath.Join(root, "rootCA-key.pem")) || !fileExists(filepath.Join(root, "rootCA.pem")) {
return ""
}
return root
}
// fileIsReadable checks to make sure a file exists and is readable
// Copied from fileutil because of import cycles
func fileIsReadable(name string) bool {
file, err := os.OpenFile(name, os.O_RDONLY, 0666)
if err != nil {
return false
}
file.Close()
return true
}
// fileExists checks a file's existence
// Copied from fileutil because of import cycles
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// IsInternetActiveAlreadyChecked just flags whether it's been checked
var IsInternetActiveAlreadyChecked = false
// IsInternetActiveResult is the result of the check
var IsInternetActiveResult = false
// IsInternetActiveNetResolver wraps the standard DNS resolver.
// In order to override net.DefaultResolver with a stub, we have to define an
// interface on our own since there is none from the standard library.
var IsInternetActiveNetResolver interface {
LookupHost(ctx context.Context, host string) (addrs []string, err error)
} = net.DefaultResolver
//IsInternetActive checks to see if we have a viable
// internet connection. It just tries a quick DNS query.
// This requires that the named record be query-able.
// This check will only be made once per command run.
func IsInternetActive() bool {
// if this was already checked, return the result
if IsInternetActiveAlreadyChecked {
return IsInternetActiveResult
}
timeout := time.Duration(DdevGlobalConfig.InternetDetectionTimeout) * time.Millisecond
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
randomURL := nodeps.RandomString(10) + ".ddev.site"
addrs, err := IsInternetActiveNetResolver.LookupHost(ctx, randomURL)
// Internet is active (active == true) if both err and ctx.Err() were nil
active := err == nil && ctx.Err() == nil
if os.Getenv("DDEV_DEBUG") != "" {
if active == false {
output.UserErr.Println("Internet connection not detected, DNS may not work, see https://ddev.readthedocs.io/en/stable/users/faq/ for info.")
}
output.UserErr.Printf("IsInternetActive DEBUG: err=%v ctx.Err()=%v addrs=%v IsInternetactive==%v, randomURL=%v internet_detection_timeout_ms=%dms\n", err, ctx.Err(), addrs, active, randomURL, DdevGlobalConfig.InternetDetectionTimeout)
}
// remember the result to not call this twice
IsInternetActiveAlreadyChecked = true
IsInternetActiveResult = active
return active
}
| [
"\"CAROOT\"",
"\"DDEV_DEBUG\""
]
| []
| [
"DDEV_DEBUG",
"CAROOT"
]
| [] | ["DDEV_DEBUG", "CAROOT"] | go | 2 | 0 | |
kolibri/utils/tests/test_cli.py | """
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import tempfile
import pytest
from django.db.utils import OperationalError
from mock import patch
import kolibri
from kolibri.plugins.utils import autoremove_unavailable_plugins
from kolibri.utils import cli
from kolibri.utils import options
logger = logging.getLogger(__name__)
LOG_LOGGER = []
def log_logger(logger_instance, LEVEL, msg, args, **kwargs):
"""
Monkeypatching for logging.Logger._log to scoop up log messages if we wanna
test something specific was logged.
"""
LOG_LOGGER.append((LEVEL, msg))
# Call the original function
logger_instance.__log(LEVEL, msg, args, **kwargs)
def activate_log_logger(monkeypatch):
"""
Activates logging everything to ``LOG_LOGGER`` with the monkeypatch pattern
of py.test (test accepts a ``monkeypatch`` argument)
"""
monkeypatch.setattr(logging.Logger, "__log", logging.Logger._log, raising=False)
monkeypatch.setattr(logging.Logger, "_log", log_logger)
@pytest.fixture
def plugins():
from kolibri import plugins
_, config_file = tempfile.mkstemp(suffix="json")
old_config_file = plugins.conf_file
plugins.conf_file = config_file
plugins.config.set_defaults()
yield plugins
plugins.conf_file = old_config_file
def test_bogus_plugin_autoremove(plugins):
"""
Checks that a plugin is auto-removed when it cannot be imported
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_autoremove_no_path(plugins):
"""
Checks that a plugin without a dotted path is also auto-removed
"""
plugin_name = "giraffehorse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
disabled_apps_before = plugins.config["DISABLED_PLUGINS"].copy()
try:
cli.disable.callback(("i_do_not_exist",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
assert disabled_apps_before == plugins.config["DISABLED_PLUGINS"]
def test_plugin_cannot_be_imported_disable(plugins):
"""
A plugin may be in plugins.config['INSTALLED_PLUGINS'] but broken or uninstalled
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
try:
cli.disable.callback((plugin_name,), False)
except Exception:
pass
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
# We also don't want to endlessly add cruft to the disabled apps
assert plugin_name not in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable_twice(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_plugin_with_no_plugin_class(plugins):
"""
Expected behavior is that nothing blows up with exceptions, user just gets
a warning and nothing is enabled or changed in the configuration.
"""
# For fun, we pass in a system library
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
try:
cli.enable.callback(("os.path",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
@pytest.mark.django_db
def test_kolibri_listen_port_env(monkeypatch):
"""
Starts and stops the server, mocking the actual server.start()
Checks that the correct fallback port is used from the environment.
"""
with patch("django.core.management.call_command"), patch(
"kolibri.utils.server.start"
) as start:
from kolibri.utils import server
def start_mock(port, *args, **kwargs):
assert port == test_port
try:
os.remove(server.STARTUP_LOCK)
except OSError:
pass
activate_log_logger(monkeypatch)
start.side_effect = start_mock
test_port = 1234
os.environ["KOLIBRI_HTTP_PORT"] = str(test_port)
# force a reload of plugins.OPTIONS so the environment variable will be read in
from kolibri.utils import conf
conf.OPTIONS.update(options.read_options_file(conf.KOLIBRI_HOME))
cli.start.callback(test_port, False)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
# Stop the server AGAIN, asserting that we can call the stop command
# on an already stopped server and will be gracefully informed about
# it.
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
assert "Already stopped" in LOG_LOGGER[-1][1]
def status_starting_up():
raise server.NotRunning(server.STATUS_STARTING_UP)
# Ensure that if a server is reported to be 'starting up', it doesn't
# get killed while doing that.
monkeypatch.setattr(server, "get_status", status_starting_up)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == server.STATUS_STARTING_UP
assert "Not stopped" in LOG_LOGGER[-1][1]
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="")
@patch("kolibri.utils.cli.update")
@patch("kolibri.utils.cli.plugin.callback")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_first_run(dbbackup, plugin, update, get_version):
"""
Tests that the first_run() function performs as expected
"""
cli.initialize()
update.assert_called_once()
dbbackup.assert_not_called()
# Check that it got called for each default plugin
from kolibri import plugins
assert set(plugins.config["INSTALLED_PLUGINS"]) == set(plugins.DEFAULT_PLUGINS)
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
@patch("kolibri.utils.cli.update")
def test_update(update, get_version):
"""
Tests that update() function performs as expected
"""
cli.initialize()
update.assert_called_once()
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
def test_update_exits_if_running(get_version):
"""
Tests that update() function performs as expected
"""
with patch("kolibri.utils.cli.server.get_status"):
try:
cli.initialize()
pytest.fail("Update did not exit when Kolibri was already running")
except SystemExit:
pass
@pytest.mark.django_db
def test_version_updated():
"""
Tests our db backup logic: version_updated gets any change, backup gets only non-dev changes
"""
assert cli.version_updated("0.10.0", "0.10.1")
assert not cli.version_updated("0.10.0", "0.10.0")
assert not cli.should_back_up("0.10.0-dev0", "")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0")
assert not cli.should_back_up("0.10.0", "0.10.0-dev0")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0-dev0")
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value=kolibri.__version__)
@patch("kolibri.utils.cli.update")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_update_no_version_change(dbbackup, update, get_version):
"""
Tests that when the version doesn't change, we are not doing things we
shouldn't
"""
cli.initialize()
update.assert_not_called()
dbbackup.assert_not_called()
def test_cli_usage():
# Test the -h
with pytest.raises(SystemExit) as excinfo:
cli.main("-h")
assert excinfo.code == 0
with pytest.raises(SystemExit) as excinfo:
cli.main("--version")
assert excinfo.code == 0
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
any(
map(
lambda x: test_plugin in x[0] and "ENABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins_disabled(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
cli.disable.callback((test_plugin,), False)
any(
map(
lambda x: test_plugin in x[0] and "DISABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli._migrate_databases")
@patch("kolibri.utils.cli.version_updated")
def test_migrate_if_unmigrated(version_updated, _migrate_databases):
# No matter what, ensure that version_updated returns False
version_updated.return_value = False
from morango.models import InstanceIDModel
with patch.object(
InstanceIDModel, "get_or_create_current_instance"
) as get_or_create_current_instance:
get_or_create_current_instance.side_effect = OperationalError("Test")
cli.initialize()
_migrate_databases.assert_called_once()
| []
| []
| [
"KOLIBRI_HTTP_PORT"
]
| [] | ["KOLIBRI_HTTP_PORT"] | python | 1 | 0 | |
ppmessage/api/handlers/setdeviceinfohandler.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, [email protected]
#
from .basehandler import BaseHandler
from ppmessage.db.models import DeviceInfo
from ppmessage.db.models import DeviceUser
from ppmessage.core.redis import redis_hash_to_dict
from ppmessage.core.constant import INVALID_IOS_TOKEN
from ppmessage.core.constant import API_LEVEL
from ppmessage.api.error import API_ERR
import logging
import json
class SetDeviceInfoHandler(BaseHandler):
"""
requst:
header with device uuid
body {fullname:, ostype:, osversion:, apilevel:, phone:, iostoken:, iosmodel:,}
ostype: IOS/ANDROID/WP/MAC/WIN/LINUX
response:
device id/ device uuid
"""
def _post(self, _request):
_device_uuid = _request.get("device_uuid")
if _device_uuid == None:
self.setErrorCode(API_ERR.ERR_PARAM)
return
_redis = self.application.redis
_device = redis_hash_to_dict(_redis, DeviceInfo, _device_uuid)
if _device is None:
logging.error("Error NO DEVICE for key [%s] ." % (_key))
self.setErrorCode(API_ERR.NO_DEVICE)
return
_values = {
"uuid": _device_uuid
}
if "fullname" in _request:
_values["device_fullname"]= _request["fullname"]
if "ostype" in _request:
_values["device_ostype"] = _request["ostype"].upper()
if "osversion" in _request:
_values["device_osversion"] = _request["osversion"]
# apilevel for android
if "apilevel" in _request:
_values["device_android_apilevel"] = _request["apilevel"]
# phone number for phone
if "phone" in _request:
_values["device_phonenumber"] = _request["phone"]
if "iosmodel" in _request:
_values["device_ios_model"] = _request["iosmodel"]
if "iostoken" in _request:
_values["device_ios_token"] = _request["iostoken"]
self.application.redis.srem(INVALID_IOS_TOKEN, _request["iostoken"])
if "device_android_gcmtoken" in _request:
_values["device_android_gcmtoken"] = _request["device_android_gcmtoken"]
if "device_android_gcmpush" in _request:
_values["device_android_gcmpush"] = _request["device_android_gcmpush"]
_row = DeviceInfo(**_values)
_row.update_redis_keys(_redis)
_row.async_update(_redis)
return
def initialize(self):
self.addPermission(api_level=API_LEVEL.PPKEFU)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_KEFU)
return
def _Task(self):
super(SetDeviceInfoHandler, self)._Task()
self._post(json.loads(self.request.body))
| []
| []
| []
| [] | [] | python | null | null | null |
pb_run_status.py | #!/usr/bin/env python3
import os.path
from glob import glob
import sys
import logging as L
import datetime
class RunStatus:
"""This Class provides information about a PacBio sequel run, given a run folder.
It will parse information from the following sources:
*/*.subreadset.xml ?? for what ??
Run directory content (including pbpipeline subdir) - to obtain status information
The status will correspond to a state in the state diagram - see the design doc.
"""
CELL_PENDING = 0 # waiting for data from the sequencer
CELL_READY = 1 # the pipeline should process this cell now
CELL_PROCESSING = 2 # the pipeline is working on this cell
CELL_PROCESSED = 3 # the pipeline has finished on this cell
CELL_FAILED = 4 # the pipeline failed to process this cell
CELL_ABORTED = 5 # cell aborted - disregard it
def __init__( self, pbrun_dir, opts = '', to_location=None, stall_time=None ):
# Now that all the touch files are living in the output location, we need to work
# out both the output location and the input location for this run. The former may
# not yet exist.
self.stall_time = int(stall_time) if stall_time is not None else None
# We need this so we can meaningfully inspect basename(pbrun_dir)
pbrun_dir = os.path.abspath(pbrun_dir)
self._assertion_error = False
if os.path.exists(os.path.join(pbrun_dir, 'pbpipeline', 'from')):
# ok, pbrun_dir was an existing output directory
self.to_path = pbrun_dir
self.from_path = os.path.join(pbrun_dir, 'pbpipeline', 'from')
elif to_location:
if os.path.isdir(os.path.join(to_location,
os.path.basename(pbrun_dir),
'pbpipeline', 'from')):
# The link we just found should be pointing back to us!
if not os.path.realpath(
os.path.join(to_location,
os.path.basename(pbrun_dir),
'pbpipeline', 'from') ) == os.path.realpath( pbrun_dir ):
self._assertion_error = True
# If the above check works I definitely found the output directory for this run
self.to_path = os.path.join(to_location, os.path.basename(pbrun_dir))
self.from_path = pbrun_dir
else:
# In that case there should be no directory at all
if os.path.exists(os.path.join(to_location, os.path.basename(pbrun_dir))):
self._assertion_error = True
# Or else conclude the run is new
self.to_path = os.path.join(to_location, os.path.basename(pbrun_dir))
self.from_path = pbrun_dir
else:
# We dunno
raise Exception("Location {} does not look like an output directory and no TO_LOCATION is set.".format(
pbrun_dir) )
# This is redundant as we never parse the XML anyway.
self.quick_mode = 'q' in opts
self._clear_cache()
def _clear_cache( self ):
self._exists_cache = dict()
self._cells_cache = None
def _exists_from( self, glob_pattern ):
""" Returns if a file exists in from_path and caches the result.
"""
return self._exists(glob_pattern, self.from_path)
def _exists_to( self, glob_pattern ):
""" Returns if a file exists in to_path and caches the result.
"""
return self._exists(glob_pattern, self.to_path)
def _exists( self, glob_pattern, root_path ):
""" Returns if a file exists in root_path and caches the result.
The check will be done with glob() so wildcards can be used, and
the result will be the number of matches.
"""
full_pattern = os.path.join(root_path, glob_pattern)
if full_pattern not in self._exists_cache:
self._exists_cache[full_pattern] = glob(full_pattern)
L.debug("_exists {} => {}".format(full_pattern, self._exists_cache[full_pattern]))
return len( self._exists_cache[full_pattern] )
def get_cells( self ):
""" Returns a dict of { cellname: status } where status is one of the constants
defined above
We assume that all of the directories appear right when the run starts, and
that a .transferdone file signals the cell is ready
"""
if self._cells_cache is not None:
return self._cells_cache
# OK, we need to work it out...
res = dict()
cells = glob( os.path.join(self.from_path, '[0-9]_???/') )
for cell in cells:
cellname = cell.rstrip('/').split('/')[-1]
if self._exists_to( 'pbpipeline/' + cellname + '.aborted' ):
res[cellname] = self.CELL_ABORTED
elif self._exists_to( 'pbpipeline/' + cellname + '.failed' ):
# Not sure if we need this?
res[cellname] = self.CELL_FAILED
elif self._exists_to( 'pbpipeline/' + cellname + '.done' ):
res[cellname] = self.CELL_PROCESSED
elif self._exists_to( 'pbpipeline/' + cellname + '.started' ):
res[cellname] = self.CELL_PROCESSING
elif self._exists_from( cellname + '/*.transferdone' ):
res[cellname] = self.CELL_READY
else:
res[cellname] = self.CELL_PENDING
self._cells_cache = res
return res
def _was_aborted(self):
if self._exists_to( 'pbpipeline/aborted' ):
return True
# Or if all idividual cells were aborted...
all_cell_statuses = self.get_cells().values()
if all_cell_statuses and all( v == self.CELL_ABORTED for v in all_cell_statuses ):
return True
return False
def _is_stalled(self):
if self.stall_time is None:
# Nothing is ever stalled then.
return False
# Now some datetime tinkering...
# If I find something dated later than stall_time then this run is not stalled.
# It's simpler to just get this as a Unix time that I can compare with stat() output.
stall_time = ( datetime.datetime.now(datetime.timezone.utc)
- datetime.timedelta(hours=self.stall_time)
).timestamp()
for cell in glob( os.path.join(self.from_path, '[0-9]_???') ):
if os.stat(cell).st_mtime > stall_time:
# I only need to see one thing
return False
# I found no evidence.
return True
def get_status( self ):
""" Work out the status of a run by checking the existence of various touchfiles
found in the run folder.
Behaviour with the touchfiles in invalid states is undefined, but we'll always
report a valid status and in general, if in doubt, we'll report a status that
does not trigger an action.
** This logic is convoluted. Before modifying anything, make a test that reflects
the change you want to see, then after making the change always run the tests.
Otherwise you will get bitten in the ass!
"""
# If one of the sanity checks failed the status must be unknown - any action would
# be dangerous.
if self._assertion_error:
return "unknown"
# Otherwise, 'new' takes precedence
if not self._exists_to( 'pbpipeline' ):
return "new"
# Run in aborted state should not be subject to any further processing
if self._was_aborted():
return "aborted"
# At this point we need to know which SMRT cells are ready/done. Disregard aborted cells.
# If everything was aborted we'll already have decided status='aborted'
# As with Illuminatus, this logic is a little contorted. The tests reassure me that all is
# well. If you see a problem add a test case before attempting a fix.
# No provision for 'redo' state just now, but if there was this would need to
# go in here to override the failed and complete statuses.
all_cell_statuses = [ v for v in self.get_cells().values() if v != self.CELL_ABORTED ]
if self._exists_to( 'pbpipeline/report.done' ):
if self._exists_to( 'pbpipeline/failed' ):
return "failed"
elif any( v == self.CELL_READY for v in all_cell_statuses ):
# Not right - see unit tests
return "unknown"
else:
return "complete"
if self._exists_to( 'pbpipeline/report.started' ):
# Even if reporting is very quick, we need a state for the run to be in while
# it is happening. Alternative would be that driver triggers report after processing
# the last SMRT cell, before marking the cell done, but this seems a bit flakey.
if self._exists_to( 'pbpipeline/failed' ):
return "failed"
else:
return "reporting"
# The 'failed' flag is going to be set if a report fails to generate or there is an
# RT error or summat like that.
# But until the final report is generated, the master 'failed' flag is ignored, so it's
# possible that an interim report fails but then a new cell gets processed and the report
# is re-triggered and this time it works and the flag can be cleared. Yeah.
# If any cell is ready we need to get it processed
if any( v == self.CELL_READY for v in all_cell_statuses ):
return "cell_ready"
# If all are processed we're in state processed, and ready to trigger the final report
if all_cell_statuses and all( v == self.CELL_PROCESSED for v in all_cell_statuses ):
return "processed"
# If all cells are processed or failed we're in state failed
# (otherwise delay failure until all cells are accounted for)
if all_cell_statuses and all( v in [self.CELL_FAILED, self.CELL_PROCESSED] for v in all_cell_statuses ):
return "failed"
# If none are processing we're in state 'idle_awaiting_cells'. This also applies if,
# for some reason, the list of cells is empty.
# At this point, we should also check if the run might be stalled.
if all( v not in [self.CELL_PROCESSING] for v in all_cell_statuses ):
if self._is_stalled():
return "stalled"
else:
return "idle_awaiting_cells"
# If any are pending we're in state 'processing_awaiting_cells'
if any( v == self.CELL_PENDING for v in all_cell_statuses ):
return "processing_awaiting_cells"
# Otherwise we're processing but not expecting any more data
return "processing"
def get_cells_ready(self):
""" Get a list of the cells which are ready to be processed, if any.
"""
return [c for c, v in self.get_cells().items() if v == self.CELL_READY]
def get_cells_aborted(self):
""" Get a list of the cells that were aborted, if any.
"""
return [c for c, v in self.get_cells().items() if v == self.CELL_ABORTED]
def get_run_id(self):
""" We can read this from RunDetails in any of the subreadset.xml files, but it's
easier to just assume the directory name is the run name. Allow a .xxx extension
since there are no '.'s is PacBio run names.
"""
realdir = os.path.basename(os.path.realpath(self.from_path))
return realdir.split('.')[0]
def get_instrument(self):
""" We have only one and the serial number is in the run ID
"""
foo = self.get_run_id().split('_')[0]
if foo.startswith('r') and len(foo) > 1:
return "Sequel_" + foo[1:]
else:
return 'unknown'
def get_start_time(self):
""" Look for the oldest *.txt or *.xml file in any subdirectory.
"""
txtfiles = [ f for extn in ['txt', 'xml'] for f in
glob(os.path.join(self.from_path, '[0-9]_???/*.'+extn)) ]
try:
oldest_time = min( os.stat(t).st_mtime for t in txtfiles )
return datetime.datetime.fromtimestamp(oldest_time).ctime()
except Exception:
return 'unknown'
def get_yaml(self, debug=True):
try:
return '\n'.join([ 'RunID: ' + self.get_run_id(),
'Instrument: ' + self.get_instrument(),
'Cells: ' + ' '.join(sorted(self.get_cells())),
'CellsReady: ' + ' '.join(sorted(self.get_cells_ready())),
'CellsAborted: ' + ' '.join(sorted(self.get_cells_aborted())),
'StartTime: ' + self.get_start_time(),
'PipelineStatus: ' + self.get_status() ])
except Exception: # if we can't read something just produce a blank reply.
if debug: raise
pstatus = 'aborted' if self._was_aborted() else 'unknown'
return '\n'.join([ 'RunID: unknown',
'Instrument: unknown',
'Cells: ',
'CellsReady: ',
'CellsAborted: ',
'StartTime: unknown',
'PipelineStatus: ' + pstatus ])
if __name__ == '__main__':
#Very cursory option parsing
optind = 1 ; opts = ''
if sys.argv[optind:] and sys.argv[optind].startswith('-'):
opts = sys.argv[optind][1:]
optind += 1
L.basicConfig(level=L.WARNING, stream=sys.stderr)
#If no run specified, examine the CWD.
runs = sys.argv[optind:] or ['.']
for run in runs:
run_info = RunStatus(run, opts,
to_location = os.environ.get('TO_LOCATION'),
stall_time = os.environ.get('STALL_TIME') or None)
print ( run_info.get_yaml( debug=os.environ.get('DEBUG', '0') != '0' ) )
| []
| []
| [
"TO_LOCATION",
"STALL_TIME",
"DEBUG"
]
| [] | ["TO_LOCATION", "STALL_TIME", "DEBUG"] | python | 3 | 0 | |
Gallery/wsgi.py | """
WSGI config for Gallery project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Gallery.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneAccumuloCluster.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.cluster.standalone;
import static com.google.common.base.Preconditions.checkArgument;
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import org.apache.accumulo.cluster.AccumuloCluster;
import org.apache.accumulo.cluster.ClusterUser;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.clientImpl.ClientConfConverter;
import org.apache.accumulo.core.clientImpl.ClientInfo;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.SiteConfiguration;
import org.apache.accumulo.core.manager.thrift.ManagerGoalState;
import org.apache.accumulo.minicluster.ServerType;
import org.apache.accumulo.server.ServerContext;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* AccumuloCluster implementation to connect to an existing deployment of Accumulo
*/
public class StandaloneAccumuloCluster implements AccumuloCluster {
static final List<ServerType> ALL_SERVER_TYPES =
Collections.unmodifiableList(Arrays.asList(ServerType.MANAGER, ServerType.TABLET_SERVER,
ServerType.TRACER, ServerType.GARBAGE_COLLECTOR, ServerType.MONITOR));
private ClientInfo info;
private String accumuloHome, clientAccumuloConfDir, serverAccumuloConfDir, hadoopConfDir;
private Path tmp;
private List<ClusterUser> users;
private String clientCmdPrefix;
private String serverCmdPrefix;
private SiteConfiguration siteConfig;
private ServerContext context;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "code runs in same security context as user who provided input file name")
public StandaloneAccumuloCluster(ClientInfo info, Path tmp, List<ClusterUser> users,
String serverAccumuloConfDir) {
this.info = info;
this.tmp = tmp;
this.users = users;
this.serverAccumuloConfDir = serverAccumuloConfDir;
siteConfig =
SiteConfiguration.fromFile(new File(serverAccumuloConfDir, "accumulo.properties")).build();
}
public String getAccumuloHome() {
return accumuloHome;
}
public void setAccumuloHome(String accumuloHome) {
this.accumuloHome = accumuloHome;
}
public String getClientAccumuloConfDir() {
return clientAccumuloConfDir;
}
public void setClientAccumuloConfDir(String accumuloConfDir) {
this.clientAccumuloConfDir = accumuloConfDir;
}
public String getServerAccumuloConfDir() {
return serverAccumuloConfDir;
}
public void setServerCmdPrefix(String serverCmdPrefix) {
this.serverCmdPrefix = serverCmdPrefix;
}
public void setClientCmdPrefix(String clientCmdPrefix) {
this.clientCmdPrefix = clientCmdPrefix;
}
public String getHadoopConfDir() {
if (hadoopConfDir == null) {
hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
}
if (hadoopConfDir == null) {
throw new IllegalArgumentException("Cannot determine HADOOP_CONF_DIR for standalone cluster");
}
return hadoopConfDir;
}
public void setHadoopConfDir(String hadoopConfDir) {
this.hadoopConfDir = hadoopConfDir;
}
@Override
public String getInstanceName() {
return info.getInstanceName();
}
@Override
public String getZooKeepers() {
return info.getZooKeepers();
}
@Override
public synchronized ServerContext getServerContext() {
if (context == null) {
context = ServerContext.override(siteConfig, info.getInstanceName(), info.getZooKeepers(),
info.getZooKeepersSessionTimeOut());
}
return context;
}
@Override
public AccumuloClient createAccumuloClient(String user, AuthenticationToken token) {
return Accumulo.newClient().to(getInstanceName(), getZooKeepers()).as(user, token).build();
}
@Override
@Deprecated(since = "2.0.0")
public org.apache.accumulo.core.client.ClientConfiguration getClientConfig() {
return ClientConfConverter.toClientConf(info.getProperties());
}
@Override
public Properties getClientProperties() {
return info.getProperties();
}
@Override
public StandaloneClusterControl getClusterControl() {
return new StandaloneClusterControl(accumuloHome, clientAccumuloConfDir, serverAccumuloConfDir,
clientCmdPrefix, serverCmdPrefix);
}
@Override
public void start() throws IOException {
StandaloneClusterControl control = getClusterControl();
// TODO We can check the hosts files, but that requires us to be on a host with the
// installation. Limitation at the moment.
control.setGoalState(ManagerGoalState.NORMAL.toString());
for (ServerType type : ALL_SERVER_TYPES) {
control.startAllServers(type);
}
}
@Override
public void stop() throws IOException {
StandaloneClusterControl control = getClusterControl();
// TODO We can check the hosts files, but that requires us to be on a host with the
// installation. Limitation at the moment.
for (ServerType type : ALL_SERVER_TYPES) {
control.stopAllServers(type);
}
}
public Configuration getHadoopConfiguration() {
String confDir = getHadoopConfDir();
// Using CachedConfiguration will make repeatedly calling this method much faster
final Configuration conf = getServerContext().getHadoopConf();
conf.addResource(new Path(confDir, "core-site.xml"));
// Need hdfs-site.xml for NN HA
conf.addResource(new Path(confDir, "hdfs-site.xml"));
return conf;
}
@Override
public FileSystem getFileSystem() {
Configuration conf = getHadoopConfiguration();
try {
return FileSystem.get(conf);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public Path getTemporaryPath() {
return getFileSystem().makeQualified(tmp);
}
public ClusterUser getUser(int offset) {
checkArgument(offset >= 0 && offset < users.size(),
"Invalid offset, should be non-negative and less than " + users.size());
return users.get(offset);
}
@Override
public AccumuloConfiguration getSiteConfiguration() {
return new ConfigurationCopy(siteConfig);
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "code runs in same security context as user who provided input file name")
@Override
public String getAccumuloPropertiesPath() {
return new File(serverAccumuloConfDir, "accumulo.properties").toString();
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "code runs in same security context as user who provided input file name")
@Override
public String getClientPropsPath() {
return new File(clientAccumuloConfDir, "accumulo-client.properties").toString();
}
}
| [
"\"HADOOP_CONF_DIR\""
]
| []
| [
"HADOOP_CONF_DIR"
]
| [] | ["HADOOP_CONF_DIR"] | java | 1 | 0 | |
qa/L0_e2e/test_model.py | # Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from collections import defaultdict, namedtuple
from functools import lru_cache
try:
import cuml
except Exception:
cuml = None
import numpy as np
import pytest
import treelite
from hypothesis import given, settings, assume, HealthCheck
from hypothesis import strategies as st
from hypothesis.extra.numpy import arrays as st_arrays
from rapids_triton import Client
from rapids_triton.testing import get_random_seed, arrays_close
import xgboost as xgb
TOTAL_SAMPLES = 20
MODELS = (
'xgboost',
'xgboost_shap',
'xgboost_json',
'lightgbm',
'regression',
'sklearn',
'cuml'
)
ModelData = namedtuple('ModelData', (
'name',
'input_shapes',
'output_sizes',
'max_batch_size',
'ground_truth_model',
'config'
))
# TODO(wphicks): Replace with cache in 3.9
@lru_cache()
def valid_shm_modes():
"""Return a tuple of allowed shared memory modes"""
modes = [None]
if os.environ.get('CPU_ONLY', 0) == 0:
modes.append('cuda')
return tuple(modes)
@pytest.fixture(scope='session')
def client():
"""A RAPIDS-Triton client for submitting inference requests"""
client = Client()
client.wait_for_server(120)
return client
@pytest.fixture(scope='session')
def model_repo(pytestconfig):
"""The path to the model repository directory"""
return pytestconfig.getoption('repo')
@pytest.fixture(scope='session')
def skip_shap(pytestconfig):
return pytestconfig.getoption('no_shap')
def get_model_parameter(config, param, default=None):
"""Retrieve custom model parameters from config"""
param_str = config.parameters[param].string_value
if param_str:
return param_str
else:
return default
class GTILModel:
"""A compatibility wrapper for executing models with GTIL"""
def __init__(self, model_path, model_format, output_class):
if model_format == 'treelite_checkpoint':
self.tl_model = treelite.Model.deserialize(model_path)
else:
self.tl_model = treelite.Model.load(model_path, model_format)
self.output_class = output_class
def _predict(self, arr):
return treelite.gtil.predict(self.tl_model, arr)
def predict_proba(self, arr):
result = self._predict(arr)
if len(result.shape) > 1:
return result
else:
return np.transpose(np.vstack((1 - result, result)))
def predict(self, arr):
if self.output_class:
return np.argmax(self.predict_proba(arr), axis=1)
else:
return self._predict(arr)
class GroundTruthModel:
"""A reference model used for comparison against results returned from
Triton"""
def __init__(
self,
name,
model_repo,
model_format,
predict_proba,
output_class,
use_cpu,
*,
model_version=1):
model_dir = os.path.join(model_repo, name, f'{model_version}')
self.predict_proba = predict_proba
self._run_treeshap = False
if model_format == 'xgboost':
model_path = os.path.join(model_dir, 'xgboost.model')
elif model_format == 'xgboost_json':
model_path = os.path.join(model_dir, 'xgboost.json')
elif model_format == 'lightgbm':
model_path = os.path.join(model_dir, 'model.txt')
elif model_format == 'treelite_checkpoint':
if use_cpu:
model_path = os.path.join(model_dir, 'checkpoint.tl')
else:
model_path = os.path.join(model_dir, 'model.pkl')
else:
raise RuntimeError('Model format not recognized')
if use_cpu:
self._base_model = GTILModel(
model_path, model_format, output_class
)
else:
if model_format == 'treelite_checkpoint':
with open(model_path, 'rb') as pkl_file:
self._base_model = pickle.load(pkl_file)
else:
self._base_model = cuml.ForestInference.load(
model_path, output_class=output_class, model_type=model_format
)
if name == 'xgboost_shap':
self._xgb_model = xgb.Booster()
self._xgb_model.load_model(model_path)
self._run_treeshap = True
def predict(self, inputs):
if self.predict_proba:
result = self._base_model.predict_proba(inputs['input__0'])
else:
result = self._base_model.predict(inputs['input__0'])
output = {'output__0' : result}
if self._run_treeshap:
treeshap_result = \
self._xgb_model.predict(xgb.DMatrix(inputs['input__0']),
pred_contribs=True)
output['treeshap_output'] = treeshap_result
return output
@pytest.fixture(scope='session', params=MODELS)
def model_data(request, client, model_repo, skip_shap):
"""All data associated with a model required for generating examples and
comparing with ground truth results"""
name = request.param
if skip_shap and name == 'xgboost_shap':
pytest.skip("GPU Treeshap tests not enabled")
config = client.get_model_config(name)
input_shapes = {
input_.name: list(input_.dims) for input_ in config.input
}
output_sizes = {
output.name: np.product(output.dims) * np.dtype('float32').itemsize
for output in config.output
}
max_batch_size = config.max_batch_size
model_format = get_model_parameter(
config, 'model_type', default='xgboost'
)
predict_proba = get_model_parameter(
config, 'predict_proba', default='false'
)
predict_proba = (predict_proba == 'true')
output_class = get_model_parameter(
config, 'output_class', default='true'
)
output_class = (output_class == 'true')
use_cpu = (config.instance_group[0].kind != 1)
ground_truth_model = GroundTruthModel(
name, model_repo, model_format, predict_proba, output_class, use_cpu,
model_version=1
)
return ModelData(
name,
input_shapes,
output_sizes,
max_batch_size,
ground_truth_model,
config
)
@given(hypothesis_data=st.data())
@settings(
deadline=None,
suppress_health_check=(HealthCheck.too_slow, HealthCheck.filter_too_much)
)
def test_small(client, model_data, hypothesis_data):
"""Test Triton-served model on many small Hypothesis-generated examples"""
all_model_inputs = defaultdict(list)
total_output_sizes = {}
all_triton_outputs = defaultdict(list)
default_arrays = {
name: np.random.rand(TOTAL_SAMPLES, *shape).astype('float32')
for name, shape in model_data.input_shapes.items()
}
for i in range(TOTAL_SAMPLES):
model_inputs = {
name: hypothesis_data.draw(
st.one_of(
st.just(default_arrays[name][i:i+1, :]),
st_arrays('float32', [1] + shape)
)
) for name, shape in model_data.input_shapes.items()
}
if model_data.name == 'sklearn' or model_data.name == 'xgboost_shap':
for array in model_inputs.values():
assume(not np.any(np.isnan(array)))
model_output_sizes = {
name: size
for name, size in model_data.output_sizes.items()
}
shared_mem = hypothesis_data.draw(st.one_of(
st.just(mode) for mode in valid_shm_modes()
))
result = client.predict(
model_data.name, model_inputs, model_data.output_sizes,
shared_mem=shared_mem
)
for name, input_ in model_inputs.items():
all_model_inputs[name].append(input_)
for name, size in model_output_sizes.items():
total_output_sizes[name] = total_output_sizes.get(name, 0) + size
for name, output in result.items():
all_triton_outputs[name].append(output)
all_model_inputs = {
name: np.concatenate(arrays)
for name, arrays in all_model_inputs.items()
}
all_triton_outputs = {
name: np.concatenate(arrays)
for name, arrays in all_triton_outputs.items()
}
try:
ground_truth = model_data.ground_truth_model.predict(all_model_inputs)
except Exception:
assume(False)
for output_name in sorted(ground_truth.keys()):
if model_data.ground_truth_model.predict_proba:
arrays_close(
all_triton_outputs[output_name],
ground_truth[output_name],
rtol=1e-3,
atol=1e-2,
assert_close=True
)
else:
arrays_close(
all_triton_outputs[output_name],
ground_truth[output_name],
atol=0.1,
total_atol=3,
assert_close=True
)
# Test entire batch of Hypothesis-generated inputs at once
shared_mem = hypothesis_data.draw(st.one_of(
st.just(mode) for mode in valid_shm_modes()
))
all_triton_outputs = client.predict(
model_data.name, all_model_inputs, total_output_sizes,
shared_mem=shared_mem
)
for output_name in sorted(ground_truth.keys()):
if model_data.ground_truth_model.predict_proba:
arrays_close(
all_triton_outputs[output_name],
ground_truth[output_name],
rtol=1e-3,
atol=1e-2,
assert_close=True
)
else:
arrays_close(
all_triton_outputs[output_name],
ground_truth[output_name],
atol=0.1,
total_atol=3,
assert_close=True
)
@pytest.mark.parametrize("shared_mem", valid_shm_modes())
def test_max_batch(client, model_data, shared_mem):
"""Test processing of a single maximum-sized batch"""
max_inputs = {
name: np.random.rand(model_data.max_batch_size, *shape).astype('float32')
for name, shape in model_data.input_shapes.items()
}
model_output_sizes = {
name: size * model_data.max_batch_size
for name, size in model_data.output_sizes.items()
}
shared_mem = valid_shm_modes()[0]
result = client.predict(
model_data.name, max_inputs, model_output_sizes, shared_mem=shared_mem
)
ground_truth = model_data.ground_truth_model.predict(max_inputs)
for output_name in sorted(ground_truth.keys()):
if model_data.ground_truth_model.predict_proba:
arrays_close(
result[output_name],
ground_truth[output_name],
rtol=1e-3,
atol=1e-2,
assert_close=True
)
else:
arrays_close(
result[output_name],
ground_truth[output_name],
atol=0.1,
total_rtol=3,
assert_close=True
)
| []
| []
| [
"CPU_ONLY"
]
| [] | ["CPU_ONLY"] | python | 1 | 0 | |
setup_db.py | import os, sys
import json
if __name__ == '__main__':
# Setup environ
sys.path.append(os.getcwd())
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zapier.settings")
# Setup django
import django
django.setup()
from core_engine.models import *
#### Apps database objects ####
extra_data = {"project": "dropbox_app", "class": "DropBoxObject"}
dropbox_app, _ = Apps.objects.get_or_create(name="DropboxApp", defaults={"description":"Dropbox is a file hosting service operated by the American company Dropbox, Inc., headquartered in San Francisco, California, that offers cloud storage, file synchronization, personal cloud, and client software."
, "website":"https://dropbox.com", "extra_data":json.dumps(extra_data)})
extra_data = {"project": "google_apps", "class": "GoogleMail"}
gmail_app, _ = Apps.objects.get_or_create(name="GMailApp", defaults={"description":"Gmail is a free email service developed by Google."
, "website":"https://gmail.com", "extra_data":json.dumps(extra_data)})
extra_data = {"project": "google_apps", "class": "GoogleDrive"}
gdrive_app, _ = Apps.objects.get_or_create(name="GoogleDriveApp", defaults={"description":"Google Drive is a file storage and synchronization service developed by Google. Google Drive allows users to store files on their servers, synchronize files across devices, and share files."
, "website":"https://drive.google.com", "extra_data":json.dumps(extra_data)})
extra_data = {"project": "google_apps", "class": "GoogleSheets"}
gsheets_app, _ = Apps.objects.get_or_create(name="GoogleSheetsApp", defaults={"description":"Google Sheets is a spreadsheet program included as part of a free, web-based software office suite offered by Google within its Google Drive service."
, "website":"https://docs.google.com/spreadsheets/", "extra_data":json.dumps(extra_data)})
#### Triggers database apps ####
setup_data = {"text_input": [{"name":"filename", "not_empty": True, "type":"str"}, {"name":"path", "type":"str"}, {"name":"check_update_duration_seconds", "not_empty": True, "type":"int"}]}
dropbox_file_update_trigger, _ = Triggers.objects.get_or_create(app=dropbox_app, name='FileUpdateTrigger', defaults={"description":"Checks when a file is updated in last certain seconds in dropbox", "setup_data":setup_data})
setup_data = {"text_input": [{"name":"file_id", "not_empty": True, "type":"str"}, {"name":"check_update_duration_seconds", "not_empty": True, "type":"int"}]}
gdrive_file_modified_trigger, _ = Triggers.objects.get_or_create(app=gdrive_app, name='FileModifiedTrigger', defaults={"description":"Checks when a file is modified in last certain seconds in google drive", "setup_data":setup_data})
setup_data = {"text_input": [{"name":"headers", "type":"str", "options": ["From", "To", "Date", "Subject"], "input_string":"Select the headers you wanna add to your trigger data, if any, in a comma separated way.", "example":"From,To or Subject,To"},
{"name":"mailBodyBool", "type":"bool", "not_empty": True, "input_string":"Should we include the mail body text in trigger data?\nPlease answer in yes or no."}, {"name":"search_query", "type":"str", "example": "from:[email protected] rfc822msgid:<[email protected]> is:unread"}]}
gmail_email_trigger, _ = Triggers.objects.get_or_create(app=gmail_app, name='EMailTrigger', defaults={"description":"Checks every few minutes and then processes the emails matching a certain query with the collected data sent to an action", "setup_data":setup_data})
#### Actions database apps ####
setup_data = {"text_input": [{"name":"filename", "not_empty": True, "type":"str", "input_string":"Enter the file's name on dropbox in which the data will be uploaded."}, {"name":"path", "type":"str"}]}
dropbox_file_upload_action, _ = Actions.objects.get_or_create(app=dropbox_app, name='UploadAction', defaults={"description":"Uploads certain data to a file in dropbox(makes an update)", "setup_data":setup_data})
setup_data = {"text_input": [{"name":"filename", "type":"str", "not_empty": True, "input_string":"Enter the file's name on dropbox."}, {"name":"path", "type":"str", "input_string":"Enter the file's path on dropbox."}]}
dropbox_file_download_action, _ = Actions.objects.get_or_create(app=dropbox_app, name='DownloadAction', defaults={"description":"Downloads a file from dropbox", "setup_data":setup_data})
setup_data = {"text_input": [{"name":"file_id", "type":"str", "not_empty": True}]}
gdrive_file_download_action, _ = Actions.objects.get_or_create(app=gdrive_app, name='DownloadFileAction', defaults={"description":"Downloads a file from google drive and stores in a location on server", "setup_data":setup_data})
setup_data = {"text_input": [{"name":"sheet_id", "not_empty": True, "type":"str"}, {"name":"sheet_name", "type":"str", "input_string": "Please enter sheet name such as sheet1, sheet2 etc. Do NOT enter the entire spreadsheet name."},
{"name":"sheet_range", "type":"str", "not_empty": True, "input_string":"Enter sheet range in A1 notation, the rows will be appended after the range.", "example": "A1:E1"}]}
gsheets_appendrow_action, _ = Actions.objects.get_or_create(app=gsheets_app, name='AppendRowAction', defaults={"description":"Takes the received data and adds a row to the google sheets", "setup_data":setup_data}) | []
| []
| []
| [] | [] | python | 0 | 0 | |
Godeps/_workspace/src/github.com/appc/goaci/proj2aci/go.go | package proj2aci
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/schema"
"github.com/coreos/rkt/Godeps/_workspace/src/github.com/appc/spec/schema/types"
)
type GoConfiguration struct {
CommonConfiguration
GoBinary string
GoPath string
}
type GoPaths struct {
CommonPaths
project string
realGo string
fakeGo string
goRoot string
goBin string
}
type GoCustomizations struct {
Configuration GoConfiguration
paths GoPaths
app string
}
func (custom *GoCustomizations) Name() string {
return "go"
}
func (custom *GoCustomizations) GetCommonConfiguration() *CommonConfiguration {
return &custom.Configuration.CommonConfiguration
}
func (custom *GoCustomizations) GetCommonPaths() *CommonPaths {
return &custom.paths.CommonPaths
}
func (custom *GoCustomizations) ValidateConfiguration() error {
if custom.Configuration.GoBinary == "" {
return fmt.Errorf("Go binary not found")
}
return nil
}
func (custom *GoCustomizations) SetupPaths() error {
custom.paths.realGo, custom.paths.fakeGo = custom.getGoPath()
if os.Getenv("GOPATH") != "" {
Warn("GOPATH env var is ignored, use --go-path=\"$GOPATH\" option instead")
}
custom.paths.goRoot = os.Getenv("GOROOT")
if custom.paths.goRoot != "" {
Warn("Overriding GOROOT env var to ", custom.paths.goRoot)
}
projectName := getProjectName(custom.Configuration.Project)
// Project name is path-like string with slashes, but slash is
// not a file separator on every OS.
custom.paths.project = filepath.Join(custom.paths.realGo, "src", filepath.Join(strings.Split(projectName, "/")...))
custom.paths.goBin = filepath.Join(custom.paths.fakeGo, "bin")
return nil
}
// getGoPath returns go path and fake go path. The former is either in
// /tmp (which is a default) or some other path as specified by
// --go-path parameter. The latter is always in /tmp.
func (custom *GoCustomizations) getGoPath() (string, string) {
fakeGoPath := filepath.Join(custom.paths.TmpDir, "gopath")
if custom.Configuration.GoPath == "" {
return fakeGoPath, fakeGoPath
}
return custom.Configuration.GoPath, fakeGoPath
}
func getProjectName(project string) string {
if filepath.Base(project) != "..." {
return project
}
return filepath.Dir(project)
}
func (custom *GoCustomizations) GetDirectoriesToMake() []string {
return []string{
custom.paths.fakeGo,
custom.paths.goBin,
}
}
func (custom *GoCustomizations) PrepareProject() error {
Info("Running go get")
// Construct args for a go get that does a static build
args := []string{
"go",
"get",
"-a",
custom.Configuration.Project,
}
env := []string{
"GOPATH=" + custom.paths.realGo,
"GOBIN=" + custom.paths.goBin,
"PATH=" + os.Getenv("PATH"),
}
if custom.paths.goRoot != "" {
env = append(env, "GOROOT="+custom.paths.goRoot)
}
cmd := exec.Cmd{
Env: env,
Path: custom.Configuration.GoBinary,
Args: args,
Stderr: os.Stderr,
Stdout: os.Stdout,
}
Debug("env: ", cmd.Env)
Debug("running command: ", strings.Join(cmd.Args, " "))
if err := cmd.Run(); err != nil {
return err
}
return nil
}
func (custom *GoCustomizations) GetPlaceholderMapping() map[string]string {
return map[string]string{
"<PROJPATH>": custom.paths.project,
"<GOPATH>": custom.paths.realGo,
}
}
func (custom *GoCustomizations) GetAssets(aciBinDir string) ([]string, error) {
name, err := custom.GetBinaryName()
if err != nil {
return nil, err
}
aciAsset := filepath.Join(aciBinDir, name)
localAsset := filepath.Join(custom.paths.goBin, name)
return []string{GetAssetString(aciAsset, localAsset)}, nil
}
func (custom *GoCustomizations) GetImageACName() (*types.ACName, error) {
imageACName := custom.Configuration.Project
if filepath.Base(imageACName) == "..." {
imageACName = filepath.Dir(imageACName)
if custom.Configuration.UseBinary != "" {
imageACName += "-" + custom.Configuration.UseBinary
}
}
return types.NewACName(strings.ToLower(imageACName))
}
func (custom *GoCustomizations) GetBinaryName() (string, error) {
if err := custom.findBinaryName(); err != nil {
return "", err
}
return custom.app, nil
}
func (custom *GoCustomizations) findBinaryName() error {
if custom.app != "" {
return nil
}
binaryName, err := GetBinaryName(custom.paths.goBin, custom.Configuration.UseBinary)
if err != nil {
return err
}
custom.app = binaryName
return nil
}
func (custom *GoCustomizations) GetRepoPath() (string, error) {
return custom.paths.project, nil
}
func (custom *GoCustomizations) GetImageFileName() (string, error) {
base := filepath.Base(custom.Configuration.Project)
if base == "..." {
base = filepath.Base(filepath.Dir(custom.Configuration.Project))
if custom.Configuration.UseBinary != "" {
base += "-" + custom.Configuration.UseBinary
}
}
return base + schema.ACIExtension, nil
}
| [
"\"GOPATH\"",
"\"GOROOT\"",
"\"PATH\""
]
| []
| [
"GOPATH",
"GOROOT",
"PATH"
]
| [] | ["GOPATH", "GOROOT", "PATH"] | go | 3 | 0 | |
utils/test/test_tutorials.py | # -*- coding: utf-8 -*-
# Copyright 2018 IBM and its contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Authors: Diego M. Rodriguez <[email protected]>
"""Helper for running the notebooks as unit tests.
Convenience script for running the notebooks as individual `unittest` tests
using the standard Python facilites. By default, only the notebooks under
`reference/` are automatically discovered (can be modified via the
`NOTEBOOK_PATH` variable).
The test can be run by using the regular unittest facilities from the root
folder of the repository:
python -m unittest --verbose
python -m unittest utils.test.test_tutorials.TutorialsTestCase.\
test_reference_algorithms_bernstein_vazirani_ipynb
Tested under the following Jupyter versions:
ipython==6.3.1
nbconvert==5.3.1
nbformat==4.4.0
"""
import glob
import os
import re
import unittest
import warnings
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
# Configurable parameters.
# List of manual exclusion (for example, ["reference/foo/problematic.ipynb"]).
EXCLUDED_NOTEBOOKS = []
# Timeout (in seconds) for a single notebook.
TIMEOUT = os.getenv('TIMEOUT', 6000)
# Jupyter kernel to execute the notebook in.
JUPYTER_KERNEL = os.getenv('JUPYTER_KERNEL', 'python3')
# Glob expression for discovering the notebooks.
NOTEBOOK_PATH = os.getenv('NOTEBOOK_PATH', 'qiskit/**/*.ipynb')
# Retrieve the notebooks recursively.
NOTEBOOK_FILENAMES = [f for f in sorted(glob.glob(NOTEBOOK_PATH,
recursive=True))
if not os.path.basename(f) in EXCLUDED_NOTEBOOKS]
class TutorialsTestCaseMeta(type):
"""
Metaclass that dynamically appends a "test_TUTORIAL_NAME" method to the
class.
"""
def __new__(mcs, name, bases, dict_):
def _str_to_identifier(string):
"""Convert a string to a valid Python identifier."""
return re.sub(r'\W|^(?=\d)', '_', string)
def create_test(filename):
"""Return a new test function."""
def test_function(self):
self._run_notebook(filename)
return test_function
for filename in NOTEBOOK_FILENAMES:
# Add a new "test_file_name_ipynb()" function to the test case.
test_name = "test_%s" % _str_to_identifier(filename)
dict_[test_name] = create_test(filename)
dict_[test_name].__doc__ = 'Test tutorial "%s"' % filename
return type.__new__(mcs, name, bases, dict_)
class TutorialsTestCase(unittest.TestCase,
metaclass=TutorialsTestCaseMeta):
"""
TestCase for running the tutorials.
"""
@staticmethod
def _run_notebook(filename):
# Create the preprocessor.
execute_preprocessor = ExecutePreprocessor(timeout=TIMEOUT,
kernel_name=JUPYTER_KERNEL)
# Open the notebook.
file_path = os.path.dirname(os.path.abspath(filename))
with open(filename) as file_:
notebook = nbformat.read(file_, as_version=4)
with warnings.catch_warnings():
# Silence some spurious warnings.
warnings.filterwarnings('ignore', category=DeprecationWarning)
# Finally, run the notebook.
execute_preprocessor.preprocess(notebook,
{'metadata': {'path': file_path}})
| []
| []
| [
"JUPYTER_KERNEL",
"NOTEBOOK_PATH",
"TIMEOUT"
]
| [] | ["JUPYTER_KERNEL", "NOTEBOOK_PATH", "TIMEOUT"] | python | 3 | 0 | |
frameworklauncher/src/test/java/com/microsoft/frameworklauncher/testutils/FeatureTestUtils.java | // Copyright (c) Microsoft Corporation
// All rights reserved.
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
// to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package com.microsoft.frameworklauncher.testutils;
import com.microsoft.frameworklauncher.common.GlobalConstants;
import com.microsoft.frameworklauncher.common.model.*;
import com.microsoft.frameworklauncher.common.utils.CommonUtils;
import com.microsoft.frameworklauncher.common.web.WebCommon;
import com.microsoft.frameworklauncher.zookeeperstore.MockZooKeeperClient;
import com.microsoft.frameworklauncher.zookeeperstore.ZooKeeperClient;
import com.microsoft.frameworklauncher.zookeeperstore.ZookeeperStore;
import com.microsoft.frameworklauncher.zookeeperstore.ZookeeperStoreStructure;
import org.apache.commons.io.FilenameUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.*;
import org.apache.hadoop.yarn.util.ConverterUtils;
import java.io.File;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
public class FeatureTestUtils {
public static final String ZK_BASE_DIR =
TestUtils.RESOURCE_ROOT + File.separator + "zkDir";
public static final String HDFS_BASE_DIR =
TestUtils.RESOURCE_ROOT + File.separator + "hdfsDir";
static {
new File(ZK_BASE_DIR).mkdir();
new File(HDFS_BASE_DIR).mkdir();
}
public static void setEnvsVariables(
String frameworkName, FrameworkStatus frameworkStatus)
throws Exception {
LauncherConfiguration config = new LauncherConfiguration();
Integer frameworkVersion = frameworkStatus.getFrameworkVersion();
// SetupLocalEnvironment
Map<String, String> localEnvs = new HashMap<>();
localEnvs.put(GlobalConstants.ENV_VAR_FRAMEWORK_NAME, frameworkName);
localEnvs.put(GlobalConstants.ENV_VAR_FRAMEWORK_VERSION, frameworkVersion.toString());
localEnvs.put(GlobalConstants.ENV_VAR_ZK_CONNECT_STRING, config.getZkConnectString());
localEnvs.put(GlobalConstants.ENV_VAR_ZK_ROOT_DIR, config.getZkRootDir());
localEnvs.put(GlobalConstants.ENV_VAR_ZK_COMPRESSION_ENABLE, config.getZkCompressionEnable().toString());
localEnvs.put(GlobalConstants.ENV_VAR_AM_VERSION, config.getAmVersion().toString());
localEnvs.put(GlobalConstants.ENV_VAR_AM_RM_HEARTBEAT_INTERVAL_SEC, config.getAmRmHeartbeatIntervalSec().toString());
localEnvs.put(GlobalConstants.ENV_VAR_CONTAINER_ID, "container_" + System.currentTimeMillis() + "_0001_000001_1");
// For now setting all required classpaths including
// the classpath to "." for the application jar
StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$$())
.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
localEnvs.put("CLASSPATH", classPathEnv.toString());
Map<String, String> envMap = System.getenv();
Field f;
try {
f = Class.forName("java.lang.ProcessEnvironment").
getDeclaredField("theCaseInsensitiveEnvironment");
} catch (NoSuchFieldException e) {
f = envMap.getClass().getDeclaredField("m");
}
f.setAccessible(true);
Map<String, String> map = (Map<String, String>) f.get(envMap);
map.putAll(localEnvs);
}
public static void initZK(ZookeeperStore zkStore, FrameworkRequest frameworkRequest, FrameworkStatus frameworkStatus)
throws Exception {
String frameworkName = frameworkRequest.getFrameworkName();
LauncherConfiguration launcherConfiguration = new LauncherConfiguration();
launcherConfiguration.setHdfsRootDir(HDFS_BASE_DIR);
launcherConfiguration.setAmStatusPushIntervalSec(10);
LauncherStatus launcherStatus = new LauncherStatus();
launcherStatus.setLauncherConfiguration(launcherConfiguration);
zkStore.setLauncherStatus(launcherStatus);
zkStore.setFrameworkStatus(frameworkName, frameworkStatus);
zkStore.setLauncherRequest(new LauncherRequest());
zkStore.setFrameworkRequest(frameworkName, frameworkRequest);
}
public static void initContainerList(List<Container> containerList, int length, Resource resource) {
for (int i = 0; i < length; i++) {
String containerIdStr = "container_" + System.currentTimeMillis() + "_0001_000001_" + (i + 2);
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
NodeId nodeId = NodeId.newInstance(GlobalConstants.LOCAL_HOST_NAME, 3215);
Container container = Container.newInstance(containerId,
nodeId, GlobalConstants.LOCAL_HOST_NAME, resource, Priority.newInstance(1), null);
containerList.add(container);
}
}
public static void waitForTaskStatusesPathCreate(String frameworkName, String taskRoleName)
throws Exception {
ZooKeeperClient zkClient = new MockZooKeeperClient();
ZookeeperStoreStructure zkStruct = new ZookeeperStoreStructure(FeatureTestUtils.ZK_BASE_DIR);
while (!zkClient.exists(zkStruct.getTaskStatusesPath(frameworkName, taskRoleName))) {
Thread.sleep(2000);
}
}
public static FrameworkStatus getFrameworkStatusFromRequest(FrameworkRequest frameworkRequest) {
return FrameworkStatus.newInstance(frameworkRequest);
}
public static FrameworkRequest getFrameworkRequestFromJson(
String frameworkName, String descriptionFile, String hostName, String user)
throws Exception {
String descriptionContent = CommonUtils.readFile(descriptionFile);
String descriptionFileExtension = FilenameUtils.getExtension(descriptionFile).toLowerCase();
if (!descriptionFileExtension.equals("json")) {
throw new Exception("Unsupported FrameworkDescriptionFile Type: " + descriptionFileExtension);
}
FrameworkRequest frameworkRequest = new FrameworkRequest();
frameworkRequest.setFrameworkName(frameworkName);
frameworkRequest.setFrameworkDescriptor(WebCommon.toObject(descriptionContent, FrameworkDescriptor.class));
frameworkRequest.setLaunchClientHostName(hostName);
frameworkRequest.setLaunchClientUserName(user);
return frameworkRequest;
}
public static ApplicationAttemptId newApplicationAttemptId() {
Random r = new Random();
ApplicationId appId = ApplicationId.newInstance(
System.currentTimeMillis(), r.nextInt(10000));
return ApplicationAttemptId.newInstance(appId, r.nextInt(10));
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
slack-bot-versions/03.slackbot.py | #!/usr/bin/env python3
"""
Slack bot using the Slack API and python SlackClient
"""
import random
import os
import time
import sys
from slackclient import SlackClient
ACCESS_TOKEN = os.environ.get("SLACK_API_TOKEN")
RTM_READ_DELAY = 1 # 1 second delay between reading from RTM
def main():
"""
main is the main program that will run when the script is executed.
"""
client = SlackClient(ACCESS_TOKEN)
if not client.rtm_connect(with_team_state=False):
print("could not connect to slack")
sys.exit(1)
print("connected to network!")
bot_id = client.api_call("auth.test")["user_id"]
for data in tagged_messages(client, bot_id):
if "lunch poll" in data.get("text"):
create_message(client, data)
continue
if any(x in data.get("text") for x in ["lunch", "eat", "hungry"]):
whats_for_lunch(client, data)
continue
client.rtm_send_message(
data.get("channel"), "don't know what to say about that..."
)
def tagged_messages(client: SlackClient, user_id: str):
"""
Check the connection and parse all events. If the event is of desired type
or content yield the message to the consumer of the iterator.
"""
while True:
for data in client.rtm_read():
if "text" not in data:
continue
if data.get("type") == "message" and user_id in data.get("text"):
yield data
time.sleep(RTM_READ_DELAY)
def whats_for_lunch(client: SlackClient, data: dict):
"""
Fetch a random value of what's for lunch!
"""
restaurants = ["Textas Longhorn", "Sushi!", "I think pizza!"]
client.rtm_send_message(data.get("channel"), random.choice(restaurants))
def create_message(client, data):
"""
Create a blocked message with available restaurants to vote for.
"""
restaurants = {
":hamburger: Texas Longhorn": {
"description": "Some nice burgers here!"
},
":sushi: Sushi Sun": {"description": "Here we can enjoy sushi!"},
":seedling: Re-orient": {"description": "Meze for us!"},
}
blocks = [
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*Where should we eat lunch?*"},
},
{"type": "divider"},
]
for restaurant, info in restaurants.items():
blocks.extend(
[
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "{}\n{}".format(
restaurant, info.get("description")
),
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"emoji": True,
"text": "Vote",
},
"value": "click_me_123",
},
},
{
"type": "context",
"elements": [{"type": "mrkdwn", "text": "No votes"}],
},
]
)
client.api_call(
"chat.postMessage", channel=data.get("channel"), blocks=blocks
)
if __name__ == "__main__":
main()
| []
| []
| [
"SLACK_API_TOKEN"
]
| [] | ["SLACK_API_TOKEN"] | python | 1 | 0 | |
api/client.go | package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"math"
"net/http"
"os"
"runtime"
"strings"
"time"
"github.com/chuckpreslar/emission"
"github.com/sourcegraph/jsonrpc2"
"github.com/uscott/go-api-deribit/inout"
syncgrp "github.com/uscott/go-syncgrp"
"github.com/uscott/go-tools/errs"
"github.com/uscott/go-tools/tm"
"github.com/uscott/go-tools/tmath"
"nhooyr.io/websocket"
)
// Test and Production URLs
const (
ProdBaseURL = "wss://www.deribit.com/ws/api/v2/"
TestBaseURL = "wss://test.deribit.com/ws/api/v2/"
)
// MaxTries is the max number of reconnect attempts
const MaxTries = 10
const prvt string = "private"
var (
// ErrAuthRequired is an error value corresponding to authorization
// being required for a request
ErrAuthRequired = errors.New("AUTHENTICATION IS REQUIRED")
matchEngineRequest = []string{
"buy", "sell", "edit", "cancel", "close_position",
"verify_block_trade", "execute_block_trade",
}
)
var (
ceil = math.Ceil
clamp = tmath.Clamp
imin = tmath.Imin
max = math.Max
min = math.Min
trunc = math.Trunc
)
// Event is wrapper of received event
type Event struct {
Channel string `json:"channel"`
Data json.RawMessage `json:"data"`
}
// Configuration contains data for creating
// a client
type Configuration struct {
Ctx context.Context
Address string `json:"address"`
AutoReconnect bool `json:"autoReconnect"`
AutoRefillMatch float64 `json:"auto_refill_match"`
AutoRefillNonmatch float64 `json:"auto_refill_nonmatch"`
Currency string `json:"currency"`
DebugMode bool `json:"debugMode"`
Key string `json:"api_key"`
Production bool `json:"production"`
Secret string `json:"secret_key"`
UseLogFile bool `json:"use_log_file"`
}
// DfltCnfg returns a default Configuration
func DfltCnfg() *Configuration {
return &Configuration{
Address: TestBaseURL,
AutoReconnect: true,
AutoRefillMatch: 0.8,
AutoRefillNonmatch: 0.8,
Currency: BTC,
DebugMode: true,
Key: os.Getenv("DERIBIT_TEST_MAIN_KEY"),
Production: false,
Secret: os.Getenv("DERIBIT_TEST_MAIN_SECRET"),
UseLogFile: false,
}
}
// Client is the base client for connecting to the exchange
type Client struct {
auth struct {
token string
refresh string
}
autoRefill rqstCntData
conn *websocket.Conn
emitter *emission.Emitter
heartCancel chan struct{}
isConnected bool
rpcConn *jsonrpc2.Conn
rqstCnt rqstCntData
rqstTmr rqstTmrData
subscriptions []string
subscriptionsMap map[string]byte
Acct inout.AcctSummaryOut
Config *Configuration
Logger *log.Logger
SG *syncgrp.SyncGrp
StartTime time.Time
Sub *Subordinate
}
func (c *Client) NewMinimal(cfg *Configuration) (err error) {
if cfg == nil {
return errs.ErrNilPtr
}
if cfg.Ctx == nil {
cfg.Ctx = context.Background()
}
c.Config = cfg
if err = c.CreateLogger(); err != nil {
log.Println(err.Error())
return err
}
if c.emitter == nil {
c.emitter = emission.NewEmitter()
}
if c.SG == nil {
c.SG = syncgrp.New()
}
if c.Sub == nil {
c.Sub = NewSubordinate()
}
if c.subscriptionsMap == nil {
c.subscriptionsMap = make(map[string]byte)
}
if c.heartCancel == nil {
c.heartCancel = make(chan struct{})
}
c.StartTime = tm.UTC()
return nil
}
func NewMinimal(cfg *Configuration) (*Client, error) {
if cfg == nil {
return nil, errs.ErrNilPtr
}
var c *Client = new(Client)
if err := c.NewMinimal(cfg); err != nil {
return c, err
}
return c, nil
}
// New returns pointer to new Client
func New(cfg *Configuration) (*Client, error) {
if cfg == nil {
return nil, errs.ErrNilPtr
}
var (
c *Client = new(Client)
err error
)
if err = c.New(cfg); err != nil {
return c, err
}
return c, nil
}
func (c *Client) New(cfg *Configuration) (err error) {
if err = c.NewMinimal(cfg); err != nil {
return err
}
if err = c.Start(); err != nil {
c.Logger.Println(err.Error())
return err
}
c.rqstTmr = rqstTmrData{t0: c.StartTime, t1: c.StartTime, dt: 0}
if err = c.GetAccountSummary(c.Config.Currency, true, &c.Acct); err != nil {
go c.Logger.Println(err.Error())
return err
}
var ub float64
lmts := &c.Acct.Limits
ub = clamp(
float64(lmts.MatchingEngine.Rate)*cfg.AutoRefillMatch,
0,
float64(lmts.MatchingEngine.Rate))
c.autoRefill.mch = int(math.Floor(ub))
ub = clamp(
float64(lmts.NonMatchingEngine.Rate)*cfg.AutoRefillNonmatch,
0,
float64(lmts.NonMatchingEngine.Rate))
c.autoRefill.non = int(math.Floor(ub))
c.resetRqstTmr()
return nil
}
func (c *Client) Connect() (*websocket.Conn, *http.Response, error) {
ctx, cncl := context.WithTimeout(context.Background(), 10*time.Second)
defer cncl()
conn, resp, err := websocket.Dial(ctx, c.Config.Address, &websocket.DialOptions{})
if err == nil {
conn.SetReadLimit(32768 * 64)
}
return conn, resp, err
}
func (c *Client) CreateLogger() error {
var (
dir, logFilePath, testprod string
err error
logFile *os.File
)
if c.Config.Production {
dir = "log-prod/"
testprod = "prod"
} else {
dir = "log-test/"
testprod = "test"
}
_ = os.Mkdir(dir, os.ModeDir)
_ = os.Chmod(dir, 0754)
if c.Config.UseLogFile {
stamp := tm.Format2(tm.UTC())
s := fmt.Sprintf("%v%v-%v-%v.%v", dir, "api-log", testprod, stamp, "log")
logFilePath = strings.ReplaceAll(s, " ", "-")
logFilePath = strings.ReplaceAll(logFilePath, ":", "")
logFile, err = os.OpenFile(logFilePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
return err
}
} else {
logFile = os.Stdout
}
lflags := log.LstdFlags | log.LUTC | log.Lmsgprefix | log.Lshortfile
c.Logger = log.New(logFile, "", lflags)
return nil
}
func (c *Client) DebugPrintf(format string, a ...interface{}) {
if c.Config.DebugMode {
c.SG.Lock()
defer c.SG.Unlock()
var prefix string
_, f, l, ok := runtime.Caller(1)
if ok {
prefix = c.Logger.Prefix()
for i := len(f) - 1; i > 0; i-- {
if f[i] == '/' {
f = f[i+1:]
break
}
}
c.Logger.SetPrefix(fmt.Sprintf("%v\n%v: %d: ", prefix, f, l))
}
c.Logger.Printf(format, a...)
if ok {
c.Logger.SetPrefix(prefix)
}
}
}
func (c *Client) DebugPrintln(a ...interface{}) {
if c.Config.DebugMode {
c.SG.Lock()
defer c.SG.Unlock()
var prefix string
_, f, l, ok := runtime.Caller(1)
if ok {
prefix = c.Logger.Prefix()
for i := len(f) - 1; i > 0; i-- {
if f[i] == '/' {
f = f[i+1:]
break
}
}
c.Logger.SetPrefix(fmt.Sprintf("%v\n%v: %d: ", prefix, f, l))
}
c.Logger.Println(a...)
if ok {
c.Logger.SetPrefix(prefix)
}
}
}
func (c *Client) decrementRqstCnt(nsecs int) {
if nsecs > 0 {
c.SG.Lock()
lmts := &c.Acct.Limits
c.rqstCnt.mch = imax(0, c.rqstCnt.mch-nsecs*lmts.MatchingEngine.Rate)
c.rqstCnt.non = imax(0, c.rqstCnt.non-nsecs*lmts.NonMatchingEngine.Rate)
c.SG.Unlock()
}
}
func (c *Client) heartbeat() {
t := time.NewTicker(3 * time.Second)
for {
select {
case <-t.C:
_, _ = c.Test()
case <-c.heartCancel:
return
}
}
}
func (c *Client) Reconnect() {
notify := c.rpcConn.DisconnectNotify()
<-notify
c.setIsConnected(false)
c.Logger.Println("disconnect, reconnect...")
close(c.heartCancel)
time.Sleep(4 * time.Second)
if err := c.Start(); err != nil {
go c.Logger.Println(err.Error())
}
}
func (c *Client) resetRqstTmr() {
c.SG.Lock()
t0 := c.rqstTmr.t1
t1 := tm.UTC()
c.rqstTmr.t0, c.rqstTmr.t1, c.rqstTmr.dt = t0, t1, t1.Sub(t0)
c.SG.Unlock()
}
// setIsConnected sets state for isConnected
func (c *Client) setIsConnected(state bool) {
c.SG.RWLock()
c.isConnected = state
c.SG.RWUnlock()
}
func (c *Client) Start() (err error) {
c.setIsConnected(false)
c.subscriptionsMap = make(map[string]byte)
c.conn, c.rpcConn = nil, nil
c.heartCancel = make(chan struct{})
for i := 0; i < MaxTries; i++ {
conn, _, err := c.Connect()
if err != nil {
c.Logger.Println(err.Error())
tm := time.Duration(i+1) * 5 * time.Second
c.Logger.Printf("Sleeping %v\n", tm)
time.Sleep(tm)
continue
}
c.conn = conn
break
}
if c.conn == nil {
return errs.ErrNotConnected
}
c.rpcConn = jsonrpc2.NewConn(
context.Background(), NewObjectStream(c.conn), c)
c.setIsConnected(true)
// auth
if c.Config.Key != "" && c.Config.Secret != "" {
if err = c.Auth(c.Config.Key, c.Config.Secret); err != nil {
return err
}
}
// subscribe
if err = c.subscribe(c.subscriptions); err != nil {
return err
}
_, err = c.SetHeartbeat(&inout.Heartbeat{Interval: 30})
if err != nil {
return err
}
if c.Config.AutoReconnect {
go c.Reconnect()
}
go c.heartbeat()
return nil
}
func (c *Client) subscribe(channels []string) (e error) {
var (
pblcChannels []string
prvtChannels []string
)
c.SG.Lock()
for _, v := range c.subscriptions {
if _, ok := c.subscriptionsMap[v]; ok {
continue
}
if strings.HasPrefix(v, "user.") {
prvtChannels = append(prvtChannels, v)
} else {
pblcChannels = append(pblcChannels, v)
}
}
c.SG.Unlock()
if len(pblcChannels) > 0 {
_, e = c.SubPblc(pblcChannels)
if e != nil {
return e
}
c.SG.Lock()
for _, v := range pblcChannels {
c.subscriptionsMap[v] = 0
}
c.SG.Unlock()
}
if len(prvtChannels) > 0 {
_, e = c.SubPrvt(prvtChannels)
if e != nil {
return e
}
c.SG.Lock()
for _, v := range prvtChannels {
c.subscriptionsMap[v] = 0
}
c.SG.Unlock()
}
return nil
}
func (c *Client) updtRqstTmr() {
c.SG.Lock()
c.rqstTmr.t1 = tm.UTC()
c.rqstTmr.dt = c.rqstTmr.t1.Sub(c.rqstTmr.t0)
c.SG.Unlock()
}
// AutoRefillRqsts automatically refills rate limit if request counts
// are above certain threshold
func (c *Client) AutoRefillRqsts() {
c.RefillRqstsCndtnl(c.autoRefill.mch, c.autoRefill.non)
}
// Call issues JSONRPC v2 calls
func (c *Client) Call(method string, params interface{}, result interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("%v", r)
c.Logger.Println(err.Error())
}
}()
if !c.IsConnected() {
return errs.ErrNotConnected
}
if params == nil {
params = emptyParams
}
if token, ok := params.(privateParams); ok {
if c.auth.token == "" {
return ErrAuthRequired
}
token.setToken(c.auth.token)
}
c.SG.Lock()
ml, pl, engine := len(method), len(prvt), false
if ml >= pl && method[:pl] == prvt {
rmdr := method[pl+1:]
rl := len(rmdr)
for _, s := range matchEngineRequest {
if sl := len(s); rl >= sl && rmdr[:sl] == s {
engine = true
break
}
}
}
if engine {
c.rqstCnt.mch++
} else {
c.rqstCnt.non++
}
c.SG.Unlock()
return c.rpcConn.Call(c.Config.Ctx, method, params, result)
}
// Handle implements jsonrpc2.Handler
func (c *Client) Handle(
ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) {
if req.Method == "subscription" { // update events
if req.Params != nil && len(*req.Params) > 0 {
var event Event
if err := json.Unmarshal(*req.Params, &event); err != nil {
go c.Logger.Println(err.Error())
return
}
_, err := c.subscriptionsProcess(&event)
if err != nil {
go c.Logger.Println(err.Error())
}
}
}
}
// IsConnected returns the WebSocket connection state
func (c *Client) IsConnected() bool {
c.SG.RLock()
defer c.SG.RUnlock()
return c.isConnected
}
// IsProduction returns whether the client is connected
// to the production server
func (c *Client) IsProduction() bool {
return c.Config.Production
}
// RefillRqsts will sleep long enough to refill all rate limits
func (c *Client) RefillRqsts() {
c.SG.Lock()
lmts := &c.Acct.Limits
m, n := lmts.MatchingEngine.Rate, lmts.NonMatchingEngine.Rate
mb, nb := lmts.MatchingEngine.Burst, lmts.NonMatchingEngine.Burst
if m <= 0 || n <= 0 || mb <= 0 || nb <= 0 {
c.SG.Unlock()
return
}
const (
fnanosecs float64 = float64(time.Second) / float64(time.Nanosecond)
minSleepTm time.Duration = 250 * time.Millisecond
)
tmch := float64(c.rqstCnt.mch) / float64(m) * fnanosecs
tnon := float64(c.rqstCnt.non) / float64(n) * fnanosecs
c.SG.Unlock()
c.updtRqstTmr()
c.SG.Lock()
ub := imin(mb/m, nb/n) // seconds
tacm := trunc(min(float64(ub), c.rqstTmr.dt.Seconds())) * fnanosecs
tnet := time.Duration(max(tmch, tnon) - tacm) // Nanoseconds
c.SG.Unlock()
if tnet > minSleepTm {
time.Sleep(tnet)
c.resetRqstTmr()
c.SG.Lock()
c.rqstCnt.mch, c.rqstCnt.non = 0, 0
c.SG.Unlock()
}
}
// RefillRqstsCndtnl refills requests if request count
// are above given amounts
func (c *Client) RefillRqstsCndtnl(match int, nonmatch int) {
if c.rqstCnt.mch > match || c.rqstCnt.non > nonmatch {
c.RefillRqsts()
}
}
// RqstCnts returns the number of requsts accumulated
func (c *Client) RqstCnts() (cntMch, cntNon int) {
cntMch, cntNon = c.rqstCnt.mch, c.rqstCnt.non
return
}
// SubscribeToChannels subscribes to channels
func (c *Client) SubscribeToChannels(channels []string) (e error) {
c.SG.Lock()
c.subscriptions = append(c.subscriptions, channels...)
c.SG.Unlock()
if e = c.subscribe(channels); e != nil {
return e
}
// Remove any dupes in c.subscriptions
c.SG.Lock()
l := len(c.subscriptionsMap)
if cap(c.subscriptions) < l {
c.subscriptions = make([]string, l)
} else {
c.subscriptions = c.subscriptions[:l]
}
i := 0
for s := range c.subscriptionsMap {
c.subscriptions[i] = s
i++
}
c.SG.Unlock()
return nil
}
// UnsubscribeFromChannels unsubscribes from channels
func (c *Client) UnsubscribeFromChannels(channels []string) {
var (
pblcChannels []string
prvtChannels []string
)
for _, v := range c.subscriptions {
if _, ok := c.subscriptionsMap[v]; ok {
if strings.HasPrefix(v, "user.") {
prvtChannels = append(prvtChannels, v)
} else {
pblcChannels = append(pblcChannels, v)
}
}
}
if len(pblcChannels) > 0 {
_, err := c.UnsubPblc(pblcChannels)
if err != nil {
go c.Logger.Println(err.Error())
}
}
if len(prvtChannels) > 0 {
_, err := c.UnsubPrvt(prvtChannels)
if err != nil {
go c.Logger.Println(err.Error())
}
}
}
| [
"\"DERIBIT_TEST_MAIN_KEY\"",
"\"DERIBIT_TEST_MAIN_SECRET\""
]
| []
| [
"DERIBIT_TEST_MAIN_SECRET",
"DERIBIT_TEST_MAIN_KEY"
]
| [] | ["DERIBIT_TEST_MAIN_SECRET", "DERIBIT_TEST_MAIN_KEY"] | go | 2 | 0 | |
core/polyaxon/connections/azure/base.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Optional, Union
from polyaxon.connections.base import BaseService
from polyaxon.connections.reader import read_keys
def get_account_name(
keys: Optional[Union[str, List[str]]] = None, context_path: Optional[str] = None
):
keys = keys or ["AZURE_ACCOUNT_NAME"]
return read_keys(context_path=context_path, keys=keys)
def get_account_key(
keys: Optional[Union[str, List[str]]] = None, context_path: Optional[str] = None
):
keys = keys or ["AZURE_ACCOUNT_KEY"]
return read_keys(context_path=context_path, keys=keys)
def get_connection_string(
keys: Optional[Union[str, List[str]]] = None, context_path: Optional[str] = None
):
keys = keys or ["AZURE_CONNECTION_STRING"]
return read_keys(context_path=context_path, keys=keys)
class AzureService(BaseService):
def __init__(self, connection=None, **kwargs):
super().__init__(connection=connection, **kwargs)
self._account_name = kwargs.get("account_name") or kwargs.get(
"AZURE_ACCOUNT_NAME"
)
self._account_key = kwargs.get("account_key") or kwargs.get("AZURE_ACCOUNT_KEY")
self._connection_string = kwargs.get("connection_string") or kwargs.get(
"AZURE_CONNECTION_STRING"
)
def set_connection(
self,
connection=None,
connection_name=None,
account_name=None,
account_key=None,
connection_string=None,
):
raise NotImplementedError
def set_env_vars(self):
if self._account_name:
os.environ["AZURE_ACCOUNT_NAME"] = self._account_name
if self._account_key:
os.environ["AZURE_ACCOUNT_KEY"] = self._account_key
if self._connection_string:
os.environ["AZURE_CONNECTION_STRING"] = self._connection_string
| []
| []
| [
"AZURE_ACCOUNT_KEY",
"AZURE_ACCOUNT_NAME",
"AZURE_CONNECTION_STRING"
]
| [] | ["AZURE_ACCOUNT_KEY", "AZURE_ACCOUNT_NAME", "AZURE_CONNECTION_STRING"] | python | 3 | 0 | |
lib/install.go | package lib
import (
"fmt"
"log"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"github.com/hashicorp/go-version"
)
const (
installFile = "terraform"
installVersion = "terraform_"
installPath = ".terraform.versions"
recentFile = "RECENT"
defaultBin = "/usr/local/bin/terraform" //default bin installation dir
tfDarwinArm64StartVersion = "1.0.2"
)
var (
installLocation = "/tmp"
)
// initialize : removes existing symlink to terraform binary// I Don't think this is needed
func initialize() {
/* Step 1 */
/* initilize default binary path for terraform */
/* assumes that terraform is installed here */
/* we will find the terraform path instalation later and replace this variable with the correct installed bin path */
installedBinPath := "/usr/local/bin/terraform"
/* find terraform binary location if terraform is already installed*/
cmd := NewCommand("terraform")
next := cmd.Find()
/* overrride installation default binary path if terraform is already installed */
/* find the last bin path */
for path := next(); len(path) > 0; path = next() {
installedBinPath = path
}
/* check if current symlink to terraform binary exist */
symlinkExist := CheckSymlink(installedBinPath)
/* remove current symlink if exist*/
if symlinkExist {
RemoveSymlink(installedBinPath)
}
}
// getInstallLocation : get location where the terraform binary will be installed,
// will create a directory in the home location if it does not exist
func getInstallLocation() string {
/* get current user */
usr, errCurr := user.Current()
if errCurr != nil {
log.Fatal(errCurr)
}
userCommon := usr.HomeDir
/* For snapcraft users, SNAP_USER_COMMON environment variable is set by default.
* tfswitch does not have permission to save to $HOME/.terraform.versions for snapcraft users
* tfswitch will save binaries into $SNAP_USER_COMMON/.terraform.versions */
if os.Getenv("SNAP_USER_COMMON") != "" {
userCommon = os.Getenv("SNAP_USER_COMMON")
}
/* set installation location */
installLocation = filepath.Join(userCommon, installPath)
/* Create local installation directory if it does not exist */
CreateDirIfNotExist(installLocation)
return installLocation
}
//Install : Install the provided version in the argument
func Install(tfversion string, binPath string, mirrorURL string) {
if !ValidVersionFormat(tfversion) {
fmt.Printf("The provided terraform version format does not exist - %s. Try `tfswitch -l` to see all available versions.\n", tfversion)
os.Exit(1)
}
pathDir := Path(binPath) //get path directory from binary path
//binDirExist := CheckDirExist(pathDir) //check bin path exist
/* Check to see if user has permission to the default bin location which is "/usr/local/bin/terraform"
* If user does not have permission to default bin location, proceed to create $HOME/bin and install the tfswitch there
* Inform user that they dont have permission to default location, therefore tfswitch was installed in $HOME/bin
* Tell users to add $HOME/bin to their path
*/
binPath = InstallableBinLocation(pathDir)
initialize() //initialize path
installLocation = getInstallLocation() //get installation location - this is where we will put our terraform binary file
goarch := runtime.GOARCH
goos := runtime.GOOS
// Terraform darwin arm64 comes with 1.0.2 and next version
tfver, _ := version.NewVersion(tfversion)
tf102, _ := version.NewVersion(tfDarwinArm64StartVersion)
if goos == "darwin" && goarch == "arm64" && tfver.LessThan(tf102) {
goarch = "amd64"
}
/* check if selected version already downloaded */
installFileVersionPath := ConvertExecutableExt(filepath.Join(installLocation, installVersion+tfversion))
fileExist := CheckFileExist(installFileVersionPath)
/* if selected version already exist, */
if fileExist {
/* remove current symlink if exist*/
symlinkExist := CheckSymlink(binPath)
if symlinkExist {
RemoveSymlink(binPath)
}
/* set symlink to desired version */
CreateSymlink(installFileVersionPath, binPath)
fmt.Printf("Switched terraform to version %q \n", tfversion)
AddRecent(tfversion) //add to recent file for faster lookup
os.Exit(0)
}
//if does not have slash - append slash
hasSlash := strings.HasSuffix(mirrorURL, "/")
if !hasSlash {
mirrorURL = fmt.Sprintf("%s/", mirrorURL)
}
/* if selected version already exist, */
/* proceed to download it from the hashicorp release page */
url := mirrorURL + tfversion + "/" + installVersion + tfversion + "_" + goos + "_" + goarch + ".zip"
zipFile, errDownload := DownloadFromURL(installLocation, url)
/* If unable to download file from url, exit(1) immediately */
if errDownload != nil {
fmt.Println(errDownload)
os.Exit(1)
}
/* unzip the downloaded zipfile */
_, errUnzip := Unzip(zipFile, installLocation)
if errUnzip != nil {
fmt.Println("[Error] : Unable to unzip downloaded zip file")
log.Fatal(errUnzip)
os.Exit(1)
}
/* rename unzipped file to terraform version name - terraform_x.x.x */
installFilePath := ConvertExecutableExt(filepath.Join(installLocation, installFile))
RenameFile(installFilePath, installFileVersionPath)
/* remove zipped file to clear clutter */
RemoveFiles(zipFile)
/* remove current symlink if exist*/
symlinkExist := CheckSymlink(binPath)
if symlinkExist {
RemoveSymlink(binPath)
}
/* set symlink to desired version */
CreateSymlink(installFileVersionPath, binPath)
fmt.Printf("Switched terraform to version %q \n", tfversion)
AddRecent(tfversion) //add to recent file for faster lookup
os.Exit(0)
}
// AddRecent : add to recent file
func AddRecent(requestedVersion string) {
installLocation = getInstallLocation() //get installation location - this is where we will put our terraform binary file
versionFile := filepath.Join(installLocation, recentFile)
fileExist := CheckFileExist(versionFile)
if fileExist {
lines, errRead := ReadLines(versionFile)
if errRead != nil {
fmt.Printf("[Error] : %s\n", errRead)
return
}
for _, line := range lines {
if !ValidVersionFormat(line) {
fmt.Println("File dirty. Recreating cache file.")
RemoveFiles(versionFile)
CreateRecentFile(requestedVersion)
return
}
}
versionExist := VersionExist(requestedVersion, lines)
if !versionExist {
if len(lines) >= 3 {
_, lines = lines[len(lines)-1], lines[:len(lines)-1]
lines = append([]string{requestedVersion}, lines...)
WriteLines(lines, versionFile)
} else {
lines = append([]string{requestedVersion}, lines...)
WriteLines(lines, versionFile)
}
}
} else {
CreateRecentFile(requestedVersion)
}
}
// GetRecentVersions : get recent version from file
func GetRecentVersions() ([]string, error) {
installLocation = getInstallLocation() //get installation location - this is where we will put our terraform binary file
versionFile := filepath.Join(installLocation, recentFile)
fileExist := CheckFileExist(versionFile)
if fileExist {
lines, errRead := ReadLines(versionFile)
outputRecent := []string{}
if errRead != nil {
fmt.Printf("Error: %s\n", errRead)
return nil, errRead
}
for _, line := range lines {
/* checks if versions in the recent file are valid.
If any version is invalid, it will be consider dirty
and the recent file will be removed
*/
if !ValidVersionFormat(line) {
RemoveFiles(versionFile)
return nil, errRead
}
/* output can be confusing since it displays the 3 most recent used terraform version
append the string *recent to the output to make it more user friendly
*/
outputRecent = append(outputRecent, fmt.Sprintf("%s *recent", line))
}
return outputRecent, nil
}
return nil, nil
}
//CreateRecentFile : create a recent file
func CreateRecentFile(requestedVersion string) {
installLocation = getInstallLocation() //get installation location - this is where we will put our terraform binary file
WriteLines([]string{requestedVersion}, filepath.Join(installLocation, recentFile))
}
//ConvertExecutableExt : convert excutable with local OS extension
func ConvertExecutableExt(fpath string) string {
switch runtime.GOOS {
case "windows":
if filepath.Ext(fpath) == ".exe" {
return fpath
}
return fpath + ".exe"
default:
return fpath
}
}
//InstallableBinLocation : Checks if terraform is installable in the location provided by the user.
//If not, create $HOME/bin. Ask users to add $HOME/bin to $PATH
//Return $HOME/bin as install location
func InstallableBinLocation(userBin string) string {
usr, errCurr := user.Current()
if errCurr != nil {
log.Fatal(errCurr)
}
/* Setup for SNAPCRAFT Users */
SNAP := os.Getenv("SNAP_REAL_HOME")
if SNAP != "" { //if SNAP_USER_COMMON env is set, install
snapHomePath := filepath.Join(SNAP, "bin")
fmt.Println(snapHomePath)
CreateDirIfNotExist(snapHomePath)
fmt.Printf("Installing terraform at %s\n", snapHomePath)
fmt.Printf("RUN `export PATH=$PATH:%s` to append bin to $PATH\n", snapHomePath)
return filepath.Join(snapHomePath, "terraform")
}
existDefaultBin := CheckDirExist(userBin) //the default is /usr/local/bin but users can provide custom bin locations
if existDefaultBin { //if exist - now see if we can write to to it
writableToDefault := false
if runtime.GOOS != "windows" {
writableToDefault = CheckDirWritable(userBin) //check if is writable on ( only works on LINUX)
}
if !writableToDefault {
exisHomeBin := CheckDirExist(filepath.Join(usr.HomeDir, "bin"))
if exisHomeBin {
fmt.Printf("Installing terraform at %s\n", filepath.Join(usr.HomeDir, "bin"))
return filepath.Join(usr.HomeDir, "bin", "terraform")
}
PrintCreateDirStmt(userBin, filepath.Join(usr.HomeDir, "bin"))
CreateDirIfNotExist(filepath.Join(usr.HomeDir, "bin"))
return filepath.Join(usr.HomeDir, "bin", "terraform")
}
return filepath.Join(userBin, "terraform")
}
fmt.Printf("[Error] : Binary path does not exist: %s\n", userBin)
fmt.Printf("[Error] : Manually create bin directory at: %s and try again.\n", userBin)
os.Exit(1)
return ""
}
func PrintCreateDirStmt(unableDir string, writable string) {
fmt.Printf("Unable to write to: %s\n", unableDir)
fmt.Printf("Creating bin directory at: %s\n", writable)
fmt.Printf("RUN `export PATH=$PATH:%s` to append bin to $PATH\n", writable)
}
| [
"\"SNAP_USER_COMMON\"",
"\"SNAP_USER_COMMON\"",
"\"SNAP_REAL_HOME\""
]
| []
| [
"SNAP_USER_COMMON",
"SNAP_REAL_HOME"
]
| [] | ["SNAP_USER_COMMON", "SNAP_REAL_HOME"] | go | 2 | 0 | |
client_test.go | package iot
import (
"context"
"flag"
"log"
"net/url"
"os"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
cc "golang.org/x/oauth2/clientcredentials"
)
var (
ctx context.Context
client *APIClient
clientID = flag.String("client-id", "", "Client ID obtained using client credentials")
clientSecret = flag.String("client-secret", "", "Client Secreted obtained using client credentials")
)
func testCreateDevice(t *testing.T) ArduinoDevicev2 {
devicePayload := CreateDevicesV2Payload{
Name: "TestDevice",
Type: "mkr1000",
}
device, _, err := client.DevicesV2Api.DevicesV2Create(ctx, devicePayload)
assert.NoError(t, err, "No errors creating device")
assert.Equal(t, devicePayload.Name, device.Name, "Device name was correctly set")
assert.Equal(t, devicePayload.Type, device.Type, "Device type was correctly set")
assert.NotNil(t, device.Id, "Device ID was correctly generated")
return device
}
func testCreateThing(t *testing.T, name string) ArduinoThing {
thingPayload := Thing{
Name: name,
}
thing, _, err := client.ThingsV2Api.ThingsV2Create(ctx, thingPayload, nil)
assert.NoError(t, err, "No errors creating thing")
assert.Equal(t, thingPayload.Name, thing.Name, "Thing name was correctly set")
return thing
}
func testAttachDeviceThing(t *testing.T, thingID, deviceID string) ArduinoThing {
thing, _, err := client.ThingsV2Api.ThingsV2Update(ctx, thingID, Thing{
DeviceId: deviceID,
}, nil)
assert.NoError(t, err, "No errors updating thing")
assert.Equal(t, deviceID, thing.DeviceId, "Device was correctly attached")
return thing
}
func TestMain(m *testing.M) {
// Check credentials
flag.Parse()
*clientID = strings.TrimSpace(*clientID)
if *clientID == "" {
*clientID = os.Getenv("CLIENT_ID")
}
*clientSecret = strings.TrimSpace(*clientSecret)
if *clientSecret == "" {
*clientSecret = os.Getenv("CLIENT_SECRET")
}
if *clientID == "" || *clientSecret == "" {
log.Fatalf("Invalid credentials, use -client-id -client-secret")
}
// We need to pass the additional "audience" var to request an access token
additionalValues := url.Values{}
additionalValues.Add("audience", "https://api2.arduino.cc/iot")
// Set up OAuth2 configuration
config := cc.Config{
ClientID: *clientID,
ClientSecret: *clientSecret,
TokenURL: "https://api2.arduino.cc/iot/v1/clients/token",
EndpointParams: additionalValues,
}
// Get the access token in exchange of client_id and client_secret
tok, err := config.Token(context.Background())
if err != nil {
log.Fatalf("Error retrieving access token, %v", err)
}
// Confirm we got the token and print expiration time
log.Printf("Got an access token, will expire on %s", tok.Expiry)
// We use the token to create a context that will be passed to any API call
ctx = context.WithValue(context.Background(), ContextAccessToken, tok.AccessToken)
// Create an instance of the iot-api Go client, we pass an empty config
// because defaults are ok
client = NewAPIClient(NewConfiguration())
cleanup()
code := m.Run()
cleanup()
// call flag.Parse() here if TestMain uses flags
os.Exit(code)
}
func cleanup() {
log.Printf("Cleaning devices...")
// Delete devices
devices, _, err := client.DevicesV2Api.DevicesV2List(ctx, nil)
if err != nil {
panic(err)
}
for _, d := range devices {
_, err = client.DevicesV2Api.DevicesV2Delete(ctx, d.Id)
if err != nil {
panic(err)
}
}
// Delete things
log.Printf("Cleaning things...")
things, _, err := client.ThingsV2Api.ThingsV2List(ctx, nil)
if err != nil {
panic(err)
}
for _, t := range things {
_, err = client.ThingsV2Api.ThingsV2Delete(ctx, t.Id, nil)
if err != nil {
panic(err)
}
}
}
func TestDevicesAPI(t *testing.T) {
// Get the list of devices for the current user
devices, _, err := client.DevicesV2Api.DevicesV2List(ctx, nil)
assert.NoError(t, err, "No errors listing devices")
// Ensure is empty
assert.Equal(t, 0, len(devices), "Device list is empty")
// Add a new device
device := testCreateDevice(t)
// Show device
newDevice, _, err := client.DevicesV2Api.DevicesV2Show(ctx, device.Id)
assert.NoError(t, err, "No errors showing device")
assert.Equal(t, device.Name, newDevice.Name, "Device Name is correct")
assert.Equal(t, device.Type, newDevice.Type, "Device ID is correct")
assert.Equal(t, device.Id, newDevice.Id, "Device ID is correct")
// Check if there's only 1 device
devices, _, err = client.DevicesV2Api.DevicesV2List(ctx, nil)
assert.NoError(t, err, "No errors listing devices")
assert.Equal(t, 1, len(devices), "Device list should contain only 1 device")
// Update device name
newName := "TestDevice2"
device, _, err = client.DevicesV2Api.DevicesV2Update(ctx, device.Id, Devicev2{
Name: newName,
})
assert.NoError(t, err, "No error updating device")
assert.Equal(t, newName, device.Name, "Name was updated correctly")
// Delete device
_, err = client.DevicesV2Api.DevicesV2Delete(ctx, device.Id)
assert.NoError(t, err, "No errors deleting device")
// Ensure device list is empty
devices, _, err = client.DevicesV2Api.DevicesV2List(ctx, nil)
assert.NoError(t, err, "No errors listing devices")
assert.Equal(t, 0, len(devices), "Device list is empty")
// Try to get the no more existing device
device, _, err = client.DevicesV2Api.DevicesV2Show(ctx, device.Id)
assert.EqualError(t, err, "401 Unauthorized", "Error should be unauthorized")
assert.Equal(t, ArduinoDevicev2{}, device, "Device should be empty")
}
func TestThingsAPI(t *testing.T) {
// Add a new device
device := testCreateDevice(t)
// Create a thing without a device
thingName := "TestThing"
thing := testCreateThing(t, thingName)
// Attach a device to the thing
thing = testAttachDeviceThing(t, thing.Id, device.Id)
// Show thing
thing, _, err := client.ThingsV2Api.ThingsV2Show(ctx, thing.Id, nil)
assert.NoError(t, err, "No errors showing thing")
assert.Equal(t, thingName, thing.Name, "Name is correct")
assert.Equal(t, device.Id, thing.DeviceId, "Device is correct")
// Delete thing
_, err = client.ThingsV2Api.ThingsV2Delete(ctx, thing.Id, nil)
assert.NoError(t, err, "No errors deleting thing")
// Try to get the no more existing thing
thing, _, err = client.ThingsV2Api.ThingsV2Show(ctx, thing.Id, nil)
assert.EqualError(t, err, "404 Not Found", "Error should be not found")
assert.Equal(t, ArduinoThing{}, thing, "Thing should be empty")
// Delete device
_, err = client.DevicesV2Api.DevicesV2Delete(ctx, device.Id)
assert.NoError(t, err, "No errors deleting device")
}
func TestProperties(t *testing.T) {
// Create a device
device := testCreateDevice(t)
// Create a thing
thing := testCreateThing(t, "ThingName")
// Attach the device to the thing
thing = testAttachDeviceThing(t, thing.Id, device.Id)
// Create a property
propertyPayload := Property{
Name: "testInt",
Type: "INT",
Permission: "READ_WRITE",
UpdateStrategy: "ON_CHANGE",
}
property, _, err := client.PropertiesV2Api.PropertiesV2Create(ctx, thing.Id, propertyPayload)
assert.NoError(t, err, "No errors creating properties")
assert.Equal(t, propertyPayload.Name, property.Name, "Property name was set correctly")
assert.Equal(t, propertyPayload.Type, property.Type, "Property type was set correctly")
assert.Equal(t, propertyPayload.Permission, property.Permission, "Property permission was set correctly")
assert.Equal(t, propertyPayload.UpdateStrategy, property.UpdateStrategy, "Property update strategy was set correctly")
// Generate a sketch
thing, _, err = client.ThingsV2Api.ThingsV2CreateSketch(ctx, thing.Id, ThingSketch{})
assert.NoError(t, err, "No errors creating sketch")
assert.NotNil(t, thing.SketchId, "Sketch ID is not null")
// Create another property
propertyPayload = Property{
Name: "testInt2",
Type: "INT",
Permission: "READ_WRITE",
UpdateStrategy: "ON_CHANGE",
Persist: true,
}
property, _, err = client.PropertiesV2Api.PropertiesV2Create(ctx, thing.Id, propertyPayload)
assert.NoError(t, err, "No errors creating properties")
assert.Equal(t, propertyPayload.Name, property.Name, "Property name was set correctly")
assert.Equal(t, propertyPayload.Type, property.Type, "Property type was set correctly")
assert.Equal(t, propertyPayload.Permission, property.Permission, "Property permission was set correctly")
assert.Equal(t, propertyPayload.UpdateStrategy, property.UpdateStrategy, "Property update strategy was set correctly")
// Update sketch
thing, _, err = client.ThingsV2Api.ThingsV2UpdateSketch(ctx, thing.Id, thing.SketchId, nil)
assert.NoError(t, err, "No errors updating sketch")
assert.NotNil(t, thing.SketchId, "Sketch ID is not null")
// Publish property
propertyValue := float64(100)
_, err = client.PropertiesV2Api.PropertiesV2Publish(ctx, thing.Id, property.Id, PropertyValue{
Value: propertyValue,
})
assert.NoError(t, err, "No errors publishing property")
// Wait for data pipeline ingest the last value
time.Sleep(10 * time.Second)
// Get Last value
property, _, err = client.PropertiesV2Api.PropertiesV2Show(ctx, thing.Id, property.Id, nil)
assert.NoError(t, err, "No errors showing propery")
assert.Equal(t, propertyValue, property.LastValue, "Last value is correct")
// Get value from series batch query
request := BatchQueryRequestMediaV1{
From: time.Now().Add(-60 * time.Second),
To: time.Now(),
Interval: 1,
SeriesLimit: 1000,
Q: "property." + property.Id,
}
batch, _, err := client.SeriesV2Api.SeriesV2BatchQuery(ctx, BatchQueryRequestsMediaV1{
Requests: []BatchQueryRequestMediaV1{
request,
},
})
assert.NoError(t, err, "No errors in batch query")
assert.Equal(t, int64(1), batch.Responses[0].CountValues, "Only 1 value should be present")
assert.Equal(t, propertyValue, batch.Responses[0].Values[0], "Value should be correct")
// Get value from series batch query raw
batchRaw, _, err := client.SeriesV2Api.SeriesV2BatchQueryRaw(ctx, BatchQueryRawRequestsMediaV1{
Requests: []BatchQueryRawRequestMediaV1{
BatchQueryRawRequestMediaV1{
From: time.Now().Add(-60 * time.Second),
To: time.Now(),
Q: "property." + property.Id,
SeriesLimit: 1000,
Sort: "ASC",
},
},
})
assert.NoError(t, err, "No errors getting raw series")
assert.Equal(t, int64(1), batchRaw.Responses[0].CountValues, "Only 1 value should be present")
assert.Equal(t, propertyValue, batchRaw.Responses[0].Values[0], "Value should be correct")
batchLastValue, _, err := client.SeriesV2Api.SeriesV2BatchQueryRawLastValue(ctx, BatchLastValueRequestsMediaV1{
Requests: []BatchQueryRawLastValueRequestMediaV1{
BatchQueryRawLastValueRequestMediaV1{
PropertyId: property.Id,
ThingId: thing.Id,
},
},
})
assert.Equal(t, int64(1), batchLastValue.Responses[0].CountValues, "Only 1 value should be present")
assert.Equal(t, propertyValue, batchLastValue.Responses[0].Values[0], "Value should be correct")
assert.NoError(t, err, "No errors getting raw series last value")
// Delete sketch
thing, _, err = client.ThingsV2Api.ThingsV2DeleteSketch(ctx, thing.Id)
assert.NoError(t, err, "No errors updating sketch")
assert.Equal(t, "", thing.SketchId, "Sketch ID is empty")
// Delete property
_, err = client.PropertiesV2Api.PropertiesV2Delete(ctx, thing.Id, property.Id, nil)
assert.NoError(t, err, "No errors deleting property")
// Delete device and thing
_, err = client.DevicesV2Api.DevicesV2Delete(ctx, device.Id)
assert.NoError(t, err, "No errors deleting device")
_, err = client.ThingsV2Api.ThingsV2Delete(ctx, thing.Id, nil)
assert.NoError(t, err, "No errors deleting thing")
}
| [
"\"CLIENT_ID\"",
"\"CLIENT_SECRET\""
]
| []
| [
"CLIENT_SECRET",
"CLIENT_ID"
]
| [] | ["CLIENT_SECRET", "CLIENT_ID"] | go | 2 | 0 | |
testing/tests/utils_test.py | import os
import mock
import pytest
from django_concurrent_tests.errors import WrappedError
from django_concurrent_tests.utils import (
override_environment,
run_in_subprocess,
ProcessManager,
)
from .funcs_to_test import simple
def test_override_environment():
os.environ['TEST_VALUE1'] = 'val1'
os.environ['TEST_VALUE2'] = 'val2'
assert os.getenv('TEST_VALUE1') == 'val1'
assert os.getenv('TEST_VALUE2') == 'val2'
assert os.getenv('TEST_VALUE3') is None
with override_environment(TEST_VALUE2='updated', TEST_VALUE3='new'):
assert os.getenv('TEST_VALUE1') == 'val1' # no change
assert os.getenv('TEST_VALUE2') == 'updated'
assert os.getenv('TEST_VALUE3') == 'new'
# restored to original state
assert os.getenv('TEST_VALUE1') == 'val1'
assert os.getenv('TEST_VALUE2') == 'val2'
assert os.getenv('TEST_VALUE3') is None
def test_deserializer_exception():
"""
Exceptions raised when deserializing result from subprocess are wrapped
with WrappedError, providing access to the original error and traceback.
"""
with mock.patch(
'django_concurrent_tests.b64pickle.loads', side_effect=ValueError('WTF')
) as mock_loads:
run = run_in_subprocess(simple)
assert isinstance(run.manager, ProcessManager)
assert isinstance(run.result, WrappedError)
assert isinstance(run.result.error, ValueError)
assert run.result.error.args == ('WTF',)
def test_process_manager_parent_pid():
parent_pid = os.getpid()
cmd = [
'python',
'-c',
'import os; print(os.getenv("DJANGO_CONCURRENT_TESTS_PARENT_PID", "NOT FOUND"))',
]
manager = ProcessManager(cmd)
output = manager.run(30)
assert manager.process.pid != parent_pid # validate assumption
assert output.decode("utf-8").strip('\n') == str(parent_pid)
| []
| []
| [
"TEST_VALUE2",
"DJANGO_CONCURRENT_TESTS_PARENT_PID",
"TEST_VALUE3",
"TEST_VALUE1"
]
| [] | ["TEST_VALUE2", "DJANGO_CONCURRENT_TESTS_PARENT_PID", "TEST_VALUE3", "TEST_VALUE1"] | python | 4 | 0 | |
python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import unittest
import numpy as np
from enum import Enum
import paddle
import paddle.static
map_np_dtype_to_fluid_dtype = {
'bool': "bool",
'int8': "int8",
'uint8': "uint8",
"int32": "int32",
"int64": "int64",
"float16": "float16",
"float32": "float32",
"float64": "float64",
}
class ExecutionMode(Enum):
CPU_FP32 = 1
IPU_FP32 = 2
# enable_fp16 through ipu_strategy.enable_fp16
IPU_POPART_FP16 = 3
def __lt__(self, other):
return self.value < other.value
def __gt__(self, other):
return self.value > other.value
def np_dtype_to_fluid_str(dtype: np.dtype) -> str:
return map_np_dtype_to_fluid_dtype[dtype.name]
class IPUOpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Get random seeds
cls._np_rand_state = np.random.get_state()
cls._py_rand_state = random.getstate()
cls.SEED = 2021
np.random.seed(cls.SEED)
random.seed(cls.SEED)
# Enable paddle static graph mode
paddle.enable_static()
@classmethod
def tearDownClass(cls):
"""Restore random seeds"""
np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state)
@classmethod
def use_ipumodel(cls):
if 'POPLAR_IPUMODEL' not in os.environ:
return False
else:
flag = os.environ['POPLAR_IPUMODEL']
if flag.upper() in ['1', "TRUE"]:
return True
def set_atol(self):
self.atol = 1e-10
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_training(self):
self.is_training = False
self.epoch = 1
def check(self, outputs, check_shape=False):
cpu_fp32 = outputs[ExecutionMode.CPU_FP32]
ipu_fp32 = outputs[ExecutionMode.IPU_FP32]
max_diff = np.abs(cpu_fp32 - ipu_fp32).max()
fp32_flag = np.allclose(
cpu_fp32, ipu_fp32, rtol=self.rtol, atol=self.atol)
self.assertTrue(fp32_flag, "max diff is %f" % (max_diff))
if check_shape:
self.assertTrue(cpu_fp32.shape == ipu_fp32.shape)
ipu_popart_fp16 = None
if ExecutionMode.IPU_POPART_FP16 in outputs.keys():
ipu_popart_fp16 = outputs[ExecutionMode.IPU_POPART_FP16]
max_diff = np.abs(ipu_popart_fp16.astype(np.float32) -
cpu_fp32).max()
fp16_flag = np.allclose(
ipu_popart_fp16.astype(np.float32),
cpu_fp32,
rtol=self.rtol_fp16,
atol=self.atol_fp16)
self.assertTrue(fp16_flag, "max diff is %f" % (max_diff))
if check_shape:
self.assertTrue(ipu_popart_fp16.shape == cpu_fp32.shape)
| []
| []
| [
"POPLAR_IPUMODEL"
]
| [] | ["POPLAR_IPUMODEL"] | python | 1 | 0 | |
pkg/provider/kubernetes/ingress/kubernetes.go | package ingress
import (
"context"
"errors"
"fmt"
"math"
"os"
"sort"
"strconv"
"strings"
"time"
"github.com/cenkalti/backoff/v3"
"github.com/containous/traefik/v2/pkg/config/dynamic"
"github.com/containous/traefik/v2/pkg/job"
"github.com/containous/traefik/v2/pkg/log"
"github.com/containous/traefik/v2/pkg/provider"
"github.com/containous/traefik/v2/pkg/safe"
"github.com/containous/traefik/v2/pkg/tls"
"github.com/containous/traefik/v2/pkg/types"
"github.com/mitchellh/hashstructure"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/networking/v1beta1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
annotationKubernetesIngressClass = "kubernetes.io/ingress.class"
traefikDefaultIngressClass = "traefik"
defaultPathMatcher = "PathPrefix"
)
// Provider holds configurations of the provider.
type Provider struct {
Endpoint string `description:"Kubernetes server endpoint (required for external cluster client)." json:"endpoint,omitempty" toml:"endpoint,omitempty" yaml:"endpoint,omitempty"`
Token string `description:"Kubernetes bearer token (not needed for in-cluster client)." json:"token,omitempty" toml:"token,omitempty" yaml:"token,omitempty"`
CertAuthFilePath string `description:"Kubernetes certificate authority file path (not needed for in-cluster client)." json:"certAuthFilePath,omitempty" toml:"certAuthFilePath,omitempty" yaml:"certAuthFilePath,omitempty"`
DisablePassHostHeaders bool `description:"Kubernetes disable PassHost Headers." json:"disablePassHostHeaders,omitempty" toml:"disablePassHostHeaders,omitempty" yaml:"disablePassHostHeaders,omitempty" export:"true"`
Namespaces []string `description:"Kubernetes namespaces." json:"namespaces,omitempty" toml:"namespaces,omitempty" yaml:"namespaces,omitempty" export:"true"`
LabelSelector string `description:"Kubernetes Ingress label selector to use." json:"labelSelector,omitempty" toml:"labelSelector,omitempty" yaml:"labelSelector,omitempty" export:"true"`
IngressClass string `description:"Value of kubernetes.io/ingress.class annotation to watch for." json:"ingressClass,omitempty" toml:"ingressClass,omitempty" yaml:"ingressClass,omitempty" export:"true"`
IngressEndpoint *EndpointIngress `description:"Kubernetes Ingress Endpoint." json:"ingressEndpoint,omitempty" toml:"ingressEndpoint,omitempty" yaml:"ingressEndpoint,omitempty"`
ThrottleDuration types.Duration `description:"Ingress refresh throttle duration" json:"throttleDuration,omitempty" toml:"throttleDuration,omitempty" yaml:"throttleDuration,omitempty"`
lastConfiguration safe.Safe
}
// EndpointIngress holds the endpoint information for the Kubernetes provider
type EndpointIngress struct {
IP string `description:"IP used for Kubernetes Ingress endpoints." json:"ip,omitempty" toml:"ip,omitempty" yaml:"ip,omitempty"`
Hostname string `description:"Hostname used for Kubernetes Ingress endpoints." json:"hostname,omitempty" toml:"hostname,omitempty" yaml:"hostname,omitempty"`
PublishedService string `description:"Published Kubernetes Service to copy status from." json:"publishedService,omitempty" toml:"publishedService,omitempty" yaml:"publishedService,omitempty"`
}
func (p *Provider) newK8sClient(ctx context.Context, ingressLabelSelector string) (*clientWrapper, error) {
ingLabelSel, err := labels.Parse(ingressLabelSelector)
if err != nil {
return nil, fmt.Errorf("invalid ingress label selector: %q", ingressLabelSelector)
}
logger := log.FromContext(ctx)
logger.Infof("ingress label selector is: %q", ingLabelSel)
withEndpoint := ""
if p.Endpoint != "" {
withEndpoint = fmt.Sprintf(" with endpoint %v", p.Endpoint)
}
var cl *clientWrapper
switch {
case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "":
logger.Infof("Creating in-cluster Provider client%s", withEndpoint)
cl, err = newInClusterClient(p.Endpoint)
case os.Getenv("KUBECONFIG") != "":
logger.Infof("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
cl, err = newExternalClusterClientFromFile(os.Getenv("KUBECONFIG"))
default:
logger.Infof("Creating cluster-external Provider client%s", withEndpoint)
cl, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
}
if err == nil {
cl.ingressLabelSelector = ingLabelSel
}
return cl, err
}
// Init the provider.
func (p *Provider) Init() error {
return nil
}
// Provide allows the k8s provider to provide configurations to traefik
// using the given configuration channel.
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, "kubernetes"))
logger := log.FromContext(ctxLog)
logger.Debugf("Using Ingress label selector: %q", p.LabelSelector)
k8sClient, err := p.newK8sClient(ctxLog, p.LabelSelector)
if err != nil {
return err
}
pool.Go(func(stop chan bool) {
operation := func() error {
stopWatch := make(chan struct{}, 1)
defer close(stopWatch)
eventsChan, err := k8sClient.WatchAll(p.Namespaces, stopWatch)
if err != nil {
logger.Errorf("Error watching kubernetes events: %v", err)
timer := time.NewTimer(1 * time.Second)
select {
case <-timer.C:
return err
case <-stop:
return nil
}
}
throttleDuration := time.Duration(p.ThrottleDuration)
throttledChan := throttleEvents(ctxLog, throttleDuration, stop, eventsChan)
if throttledChan != nil {
eventsChan = throttledChan
}
for {
select {
case <-stop:
return nil
case event := <-eventsChan:
// Note that event is the *first* event that came in during this
// throttling interval -- if we're hitting our throttle, we may have
// dropped events. This is fine, because we don't treat different
// event types differently. But if we do in the future, we'll need to
// track more information about the dropped events.
conf := p.loadConfigurationFromIngresses(ctxLog, k8sClient)
confHash, err := hashstructure.Hash(conf, nil)
switch {
case err != nil:
logger.Error("Unable to hash the configuration")
case p.lastConfiguration.Get() == confHash:
logger.Debugf("Skipping Kubernetes event kind %T", event)
default:
p.lastConfiguration.Set(confHash)
configurationChan <- dynamic.Message{
ProviderName: "kubernetes",
Configuration: conf,
}
}
// If we're throttling, we sleep here for the throttle duration to
// enforce that we don't refresh faster than our throttle. time.Sleep
// returns immediately if p.ThrottleDuration is 0 (no throttle).
time.Sleep(throttleDuration)
}
}
}
notify := func(err error, time time.Duration) {
logger.Errorf("Provider connection error: %s; retrying in %s", err, time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
if err != nil {
logger.Errorf("Cannot connect to Provider: %s", err)
}
})
return nil
}
func (p *Provider) loadConfigurationFromIngresses(ctx context.Context, client Client) *dynamic.Configuration {
conf := &dynamic.Configuration{
HTTP: &dynamic.HTTPConfiguration{
Routers: map[string]*dynamic.Router{},
Middlewares: map[string]*dynamic.Middleware{},
Services: map[string]*dynamic.Service{},
},
TCP: &dynamic.TCPConfiguration{},
}
ingresses := client.GetIngresses()
certConfigs := make(map[string]*tls.CertAndStores)
for _, ingress := range ingresses {
ctx = log.With(ctx, log.Str("ingress", ingress.Name), log.Str("namespace", ingress.Namespace))
if !shouldProcessIngress(p.IngressClass, ingress.Annotations[annotationKubernetesIngressClass]) {
continue
}
rtConfig, err := parseRouterConfig(ingress.Annotations)
if err != nil {
log.FromContext(ctx).Errorf("Failed to parse annotations: %v", err)
continue
}
err = getCertificates(ctx, ingress, client, certConfigs)
if err != nil {
log.FromContext(ctx).Errorf("Error configuring TLS: %v", err)
}
if len(ingress.Spec.Rules) == 0 && ingress.Spec.Backend != nil {
if _, ok := conf.HTTP.Services["default-backend"]; ok {
log.FromContext(ctx).Error("The default backend already exists.")
continue
}
service, err := loadService(client, ingress.Namespace, *ingress.Spec.Backend)
if err != nil {
log.FromContext(ctx).
WithField("serviceName", ingress.Spec.Backend.ServiceName).
WithField("servicePort", ingress.Spec.Backend.ServicePort.String()).
Errorf("Cannot create service: %v", err)
continue
}
rt := &dynamic.Router{
Rule: "PathPrefix(`/`)",
Priority: math.MinInt32,
Service: "default-backend",
}
if rtConfig != nil && rtConfig.Router != nil {
rt.EntryPoints = rtConfig.Router.EntryPoints
rt.Middlewares = rtConfig.Router.Middlewares
rt.TLS = rtConfig.Router.TLS
}
conf.HTTP.Routers["default-router"] = rt
conf.HTTP.Services["default-backend"] = service
}
for _, rule := range ingress.Spec.Rules {
if err := checkStringQuoteValidity(rule.Host); err != nil {
log.FromContext(ctx).Errorf("Invalid syntax for host: %s", rule.Host)
continue
}
if err := p.updateIngressStatus(ingress, client); err != nil {
log.FromContext(ctx).Errorf("Error while updating ingress status: %v", err)
}
if rule.HTTP == nil {
continue
}
for _, pa := range rule.HTTP.Paths {
if err = checkStringQuoteValidity(pa.Path); err != nil {
log.FromContext(ctx).Errorf("Invalid syntax for path: %s", pa.Path)
continue
}
service, err := loadService(client, ingress.Namespace, pa.Backend)
if err != nil {
log.FromContext(ctx).
WithField("serviceName", pa.Backend.ServiceName).
WithField("servicePort", pa.Backend.ServicePort.String()).
Errorf("Cannot create service: %v", err)
continue
}
serviceName := provider.Normalize(ingress.Namespace + "-" + pa.Backend.ServiceName + "-" + pa.Backend.ServicePort.String())
conf.HTTP.Services[serviceName] = service
routerKey := strings.TrimPrefix(provider.Normalize(rule.Host+pa.Path), "-")
conf.HTTP.Routers[routerKey] = loadRouter(ingress, rule, pa, rtConfig, serviceName)
}
}
}
certs := getTLSConfig(certConfigs)
if len(certs) > 0 {
conf.TLS = &dynamic.TLSConfiguration{
Certificates: certs,
}
}
return conf
}
func (p *Provider) updateIngressStatus(ing *v1beta1.Ingress, k8sClient Client) error {
// Only process if an EndpointIngress has been configured
if p.IngressEndpoint == nil {
return nil
}
if len(p.IngressEndpoint.PublishedService) == 0 {
if len(p.IngressEndpoint.IP) == 0 && len(p.IngressEndpoint.Hostname) == 0 {
return errors.New("publishedService or ip or hostname must be defined")
}
return k8sClient.UpdateIngressStatus(ing, p.IngressEndpoint.IP, p.IngressEndpoint.Hostname)
}
serviceInfo := strings.Split(p.IngressEndpoint.PublishedService, "/")
if len(serviceInfo) != 2 {
return fmt.Errorf("invalid publishedService format (expected 'namespace/service' format): %s", p.IngressEndpoint.PublishedService)
}
serviceNamespace, serviceName := serviceInfo[0], serviceInfo[1]
service, exists, err := k8sClient.GetService(serviceNamespace, serviceName)
if err != nil {
return fmt.Errorf("cannot get service %s, received error: %s", p.IngressEndpoint.PublishedService, err)
}
if exists && service.Status.LoadBalancer.Ingress == nil {
// service exists, but has no Load Balancer status
log.Debugf("Skipping updating Ingress %s/%s due to service %s having no status set", ing.Namespace, ing.Name, p.IngressEndpoint.PublishedService)
return nil
}
if !exists {
return fmt.Errorf("missing service: %s", p.IngressEndpoint.PublishedService)
}
return k8sClient.UpdateIngressStatus(ing, service.Status.LoadBalancer.Ingress[0].IP, service.Status.LoadBalancer.Ingress[0].Hostname)
}
func shouldProcessIngress(ingressClass string, ingressClassAnnotation string) bool {
return ingressClass == ingressClassAnnotation ||
(len(ingressClass) == 0 && ingressClassAnnotation == traefikDefaultIngressClass)
}
func getCertificates(ctx context.Context, ingress *v1beta1.Ingress, k8sClient Client, tlsConfigs map[string]*tls.CertAndStores) error {
for _, t := range ingress.Spec.TLS {
if t.SecretName == "" {
log.FromContext(ctx).Debugf("Skipping TLS sub-section: No secret name provided")
continue
}
configKey := ingress.Namespace + "-" + t.SecretName
if _, tlsExists := tlsConfigs[configKey]; !tlsExists {
secret, exists, err := k8sClient.GetSecret(ingress.Namespace, t.SecretName)
if err != nil {
return fmt.Errorf("failed to fetch secret %s/%s: %v", ingress.Namespace, t.SecretName, err)
}
if !exists {
return fmt.Errorf("secret %s/%s does not exist", ingress.Namespace, t.SecretName)
}
cert, key, err := getCertificateBlocks(secret, ingress.Namespace, t.SecretName)
if err != nil {
return err
}
tlsConfigs[configKey] = &tls.CertAndStores{
Certificate: tls.Certificate{
CertFile: tls.FileOrContent(cert),
KeyFile: tls.FileOrContent(key),
},
}
}
}
return nil
}
func getCertificateBlocks(secret *corev1.Secret, namespace, secretName string) (string, string, error) {
var missingEntries []string
tlsCrtData, tlsCrtExists := secret.Data["tls.crt"]
if !tlsCrtExists {
missingEntries = append(missingEntries, "tls.crt")
}
tlsKeyData, tlsKeyExists := secret.Data["tls.key"]
if !tlsKeyExists {
missingEntries = append(missingEntries, "tls.key")
}
if len(missingEntries) > 0 {
return "", "", fmt.Errorf("secret %s/%s is missing the following TLS data entries: %s",
namespace, secretName, strings.Join(missingEntries, ", "))
}
cert := string(tlsCrtData)
if cert == "" {
missingEntries = append(missingEntries, "tls.crt")
}
key := string(tlsKeyData)
if key == "" {
missingEntries = append(missingEntries, "tls.key")
}
if len(missingEntries) > 0 {
return "", "", fmt.Errorf("secret %s/%s contains the following empty TLS data entries: %s",
namespace, secretName, strings.Join(missingEntries, ", "))
}
return cert, key, nil
}
func getTLSConfig(tlsConfigs map[string]*tls.CertAndStores) []*tls.CertAndStores {
var secretNames []string
for secretName := range tlsConfigs {
secretNames = append(secretNames, secretName)
}
sort.Strings(secretNames)
var configs []*tls.CertAndStores
for _, secretName := range secretNames {
configs = append(configs, tlsConfigs[secretName])
}
return configs
}
func loadService(client Client, namespace string, backend v1beta1.IngressBackend) (*dynamic.Service, error) {
service, exists, err := client.GetService(namespace, backend.ServiceName)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.New("service not found")
}
var portName string
var portSpec corev1.ServicePort
var match bool
for _, p := range service.Spec.Ports {
if (backend.ServicePort.Type == intstr.Int && backend.ServicePort.IntVal == p.Port) ||
(backend.ServicePort.Type == intstr.String && backend.ServicePort.StrVal == p.Name) {
portName = p.Name
portSpec = p
match = true
break
}
}
if !match {
return nil, errors.New("service port not found")
}
svc := &dynamic.Service{
LoadBalancer: &dynamic.ServersLoadBalancer{
PassHostHeader: func(v bool) *bool { return &v }(true),
},
}
svcConfig, err := parseServiceConfig(service.Annotations)
if err != nil {
return nil, err
}
if svcConfig != nil && svcConfig.Service != nil {
svc.LoadBalancer.Sticky = svcConfig.Service.Sticky
if svcConfig.Service.PassHostHeader != nil {
svc.LoadBalancer.PassHostHeader = svcConfig.Service.PassHostHeader
}
}
if service.Spec.Type == corev1.ServiceTypeExternalName {
protocol := getProtocol(portSpec, portSpec.Name, svcConfig)
svc.LoadBalancer.Servers = []dynamic.Server{
{URL: fmt.Sprintf("%s://%s:%d", protocol, service.Spec.ExternalName, portSpec.Port)},
}
return svc, nil
}
endpoints, endpointsExists, endpointsErr := client.GetEndpoints(namespace, backend.ServiceName)
if endpointsErr != nil {
return nil, endpointsErr
}
if !endpointsExists {
return nil, errors.New("endpoints not found")
}
if len(endpoints.Subsets) == 0 {
return nil, errors.New("subset not found")
}
var port int32
for _, subset := range endpoints.Subsets {
for _, p := range subset.Ports {
if portName == p.Name {
port = p.Port
break
}
}
if port == 0 {
return nil, errors.New("cannot define a port")
}
protocol := getProtocol(portSpec, portName, svcConfig)
for _, addr := range subset.Addresses {
svc.LoadBalancer.Servers = append(svc.LoadBalancer.Servers, dynamic.Server{
URL: fmt.Sprintf("%s://%s:%d", protocol, addr.IP, port),
})
}
}
return svc, nil
}
func getProtocol(portSpec corev1.ServicePort, portName string, svcConfig *ServiceConfig) string {
if svcConfig != nil && svcConfig.Service != nil && svcConfig.Service.ServersScheme != "" {
return svcConfig.Service.ServersScheme
}
protocol := "http"
if portSpec.Port == 443 || strings.HasPrefix(portName, "https") {
protocol = "https"
}
return protocol
}
func loadRouter(ingress *v1beta1.Ingress, rule v1beta1.IngressRule, pa v1beta1.HTTPIngressPath, rtConfig *RouterConfig, serviceName string) *dynamic.Router {
var rules []string
if len(rule.Host) > 0 {
rules = []string{"Host(`" + rule.Host + "`)"}
}
if len(pa.Path) > 0 {
matcher := defaultPathMatcher
if rtConfig != nil && rtConfig.Router != nil && rtConfig.Router.PathMatcher != "" {
matcher = rtConfig.Router.PathMatcher
}
rules = append(rules, fmt.Sprintf("%s(`%s`)", matcher, pa.Path))
}
rt := &dynamic.Router{
Rule: strings.Join(rules, " && "),
Service: serviceName,
}
if len(ingress.Spec.TLS) > 0 {
// TLS enabled for this ingress, add TLS router
rt.TLS = &dynamic.RouterTLSConfig{}
}
if rtConfig != nil && rtConfig.Router != nil {
rt.Priority = rtConfig.Router.Priority
rt.EntryPoints = rtConfig.Router.EntryPoints
rt.Middlewares = rtConfig.Router.Middlewares
if rtConfig.Router.TLS != nil {
rt.TLS = rtConfig.Router.TLS
}
}
return rt
}
func checkStringQuoteValidity(value string) error {
_, err := strconv.Unquote(`"` + value + `"`)
return err
}
func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop chan bool, eventsChan <-chan interface{}) chan interface{} {
if throttleDuration == 0 {
return nil
}
// Create a buffered channel to hold the pending event (if we're delaying processing the event due to throttling)
eventsChanBuffered := make(chan interface{}, 1)
// Run a goroutine that reads events from eventChan and does a
// non-blocking write to pendingEvent. This guarantees that writing to
// eventChan will never block, and that pendingEvent will have
// something in it if there's been an event since we read from that channel.
go func() {
for {
select {
case <-stop:
return
case nextEvent := <-eventsChan:
select {
case eventsChanBuffered <- nextEvent:
default:
// We already have an event in eventsChanBuffered, so we'll
// do a refresh as soon as our throttle allows us to. It's fine
// to drop the event and keep whatever's in the buffer -- we
// don't do different things for different events
log.FromContext(ctx).Debugf("Dropping event kind %T due to throttling", nextEvent)
}
}
}
}()
return eventsChanBuffered
}
| [
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\"",
"\"KUBECONFIG\"",
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
]
| []
| [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT",
"KUBECONFIG"
]
| [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT", "KUBECONFIG"] | go | 3 | 0 | |
src/pkgcore/ebuild/ebd.py | """
EBuild Daemon (ebd), main high level interface to ebuild execution env.
Wraps :obj:`pkgcore.ebuild.processor` functionality into a higher level
api, for example per phase methods.
"""
__all__ = (
"ebd", "setup_mixin", "install_op", "uninstall_op", "replace_op",
"buildable", "binpkg_localize")
import errno
import os
import re
import shutil
import sys
import time
from collections import defaultdict
from functools import partial
from itertools import chain
from tempfile import TemporaryFile
from snakeoil import data_source, klass
from snakeoil.compatibility import IGNORED_EXCEPTIONS
from snakeoil.contexts import chdir
from snakeoil.currying import post_curry, pretty_docs
from snakeoil.fileutils import touch
from snakeoil.osutils import ensure_dirs, listdir_files, normpath, pjoin
from snakeoil.process.spawn import is_sandbox_capable, is_userpriv_capable, spawn, spawn_bash
from snakeoil.sequences import iflatten_instance, iter_stable_unique
from .. import const
from ..log import logger
from ..operations import format, observer
from ..os_data import portage_gid, portage_uid, xargs
from ..package.mutated import MutatedPkg
from . import ebd_ipc, ebuild_built, errors
from .processor import (ProcessorError, chuck_UnhandledCommand, expected_ebuild_env,
inherit_handler, release_ebuild_processor, request_ebuild_processor)
class ebd:
def __init__(self, pkg, initial_env=None, env_data_source=None,
observer=None, clean=True, tmp_offset=None):
"""
:param pkg:
:class:`pkgcore.ebuild.ebuild_src.package`
instance this env is being setup for
:param initial_env: initial environment to use for this ebuild
:param env_data_source: a :obj:`snakeoil.data_source.base` instance
to restore the environment from- used for restoring the
state of an ebuild processing, whether for unmerging, or
walking phases during building
"""
self.pkg = pkg
self.eapi = pkg.eapi
if not hasattr(self, "observer"):
self.observer = observer
if not self.eapi.is_supported:
raise TypeError(f"package {pkg} uses unsupported EAPI: {str(self.eapi)!r}")
if initial_env is not None:
# copy.
self.env = dict(initial_env)
for x in ("USE", "ACCEPT_LICENSE"):
self.env.pop(x, None)
else:
self.env = {}
# Drop all USE_EXPAND variables from the exported environment.
for u in self.domain.profile.use_expand:
self.env.pop(u, None)
# Only export USE_EXPAND variables for the package's enabled USE flags.
d = defaultdict(list)
for u in pkg.use:
m = self.domain.use_expand_re.match(u)
if m:
use_expand, value = m.groups()
d[use_expand.upper()].append(value)
for k, v in d.items():
self.env[k] = ' '.join(sorted(v))
self.bashrc = self.env.pop("bashrc", ())
self.features = set(x.lower() for x in self.domain.features)
self.env["FEATURES"] = ' '.join(sorted(self.features))
self.set_path_vars(self.env, self.pkg, self.domain)
# internally implemented EAPI specific functions to skip when exporting env
self.env["PKGCORE_EAPI_FUNCS"] = ' '.join(self.eapi.bash_funcs)
self.env_data_source = env_data_source
if (env_data_source is not None and
not isinstance(env_data_source, data_source.base)):
raise TypeError(
"env_data_source must be None, or a pkgcore.data_source.base "
f"derivative: {env_data_source.__class__}: {env_data_source}")
iuse_effective_regex = f"^({'|'.join(re.escape(x) for x in pkg.iuse_effective)})$"
self.env["PKGCORE_IUSE_EFFECTIVE"] = iuse_effective_regex.replace("\\.\\*", ".*")
expected_ebuild_env(pkg, self.env, env_source_override=self.env_data_source)
self.env["PKGCORE_FINALIZED_RESTRICT"] = ' '.join(str(x) for x in pkg.restrict)
self.restrict = pkg.restrict
for x in ("sandbox", "userpriv"):
setattr(self, x, self.feat_or_bool(x) and not (x in self.restrict))
if self.userpriv and os.getuid() != 0:
self.userpriv = False
if "PORT_LOGDIR" in self.env:
self.logging = pjoin(
self.env["PORT_LOGDIR"],
"%s:%s:%s.log" % (
pkg.cpvstr, self.__class__.__name__,
time.strftime("%Y%m%d-%H%M%S", time.localtime())))
del self.env["PORT_LOGDIR"]
else:
self.logging = False
self.env["PKGCORE_PKG_REPO"] = pkg.source_repository
self.env["XARGS"] = xargs
# wipe variables listed in ENV_UNSET for supporting EAPIs
if self.eapi.options.has_env_unset:
for x in self.env.pop('ENV_UNSET', ()):
self.env.pop(x, None)
# wipe any remaining internal settings from the exported env
wipes = [k for k, v in self.env.items()
if not isinstance(v, str)]
for k in wipes:
del self.env[k]
self._set_op_vars(tmp_offset)
self.clean_at_start = clean
self.clean_needed = False
# various IPC command support
self._ipc_helpers = {
# bash helpers
'doins': ebd_ipc.Doins(self),
'dodoc': ebd_ipc.Dodoc(self),
'dohtml': ebd_ipc.Dohtml(self),
'doinfo': ebd_ipc.Doinfo(self),
'dodir': ebd_ipc.Dodir(self),
'doexe': ebd_ipc.Doexe(self),
'dobin': ebd_ipc.Dobin(self),
'dosbin': ebd_ipc.Dosbin(self),
'dolib': ebd_ipc.Dolib(self),
'dolib.so': ebd_ipc.Dolib_so(self),
'dolib.a': ebd_ipc.Dolib_a(self),
'doman': ebd_ipc.Doman(self),
'domo': ebd_ipc.Domo(self),
'dosym': ebd_ipc.Dosym(self),
'dohard': ebd_ipc.Dohard(self),
'keepdir': ebd_ipc.Keepdir(self),
# bash functions
'has_version': ebd_ipc.Has_Version(self),
'best_version': ebd_ipc.Best_Version(self),
'unpack': ebd_ipc.Unpack(self),
'eapply': ebd_ipc.Eapply(self),
'eapply_user': ebd_ipc.Eapply_User(self),
'docompress': ebd_ipc.Docompress(self),
'dostrip': ebd_ipc.Dostrip(self),
# internals
'filter_env': ebd_ipc.FilterEnv(self),
}
def start(self):
if self.clean_at_start:
self.clean_needed = True
if not self.cleanup():
return False
self.setup_workdir()
self._setup_env_data_source()
self.clean_needed = True
return True
@staticmethod
def set_path_vars(env, pkg, domain):
# XXX: note this is just EAPI 3 and EAPI 7 compatibility; not full prefix, soon..
trailing_slash = pkg.eapi.options.trailing_slash
env['ROOT'] = domain.root.rstrip(os.sep) + trailing_slash
env['PKGCORE_PREFIX_SUPPORT'] = 'false'
if pkg.eapi.options.prefix_capable:
env['EPREFIX'] = domain.prefix.rstrip(os.sep)
env['EROOT'] = (
pjoin(env['ROOT'].rstrip(trailing_slash), env['EPREFIX'])
+ trailing_slash)
env['PKGCORE_PREFIX_SUPPORT'] = 'true'
if pkg.eapi.options.has_sysroot:
env['SYSROOT'] = env['ROOT']
env['ESYSROOT'] = pjoin(env['SYSROOT'], env['EPREFIX'])
env['BROOT'] = env['EPREFIX']
def _set_op_vars(self, tmp_offset):
# don't fool with this, without fooling with setup.
self.tmpdir = self.domain.pm_tmpdir
if tmp_offset:
self.tmpdir = pjoin(self.tmpdir, tmp_offset.strip(os.sep))
self.builddir = pjoin(self.tmpdir, self.env["CATEGORY"], self.env["PF"])
for x, y in (("T", "temp"),
("WORKDIR", "work"),
("D", "image"),
("HOME", "homedir"),
("PKGCORE_EMPTYDIR", "empty")):
self.env[x] = normpath(pjoin(self.builddir, y))
self.env["D"] += self.eapi.options.trailing_slash
self.env["PORTAGE_LOGFILE"] = normpath(pjoin(self.env["T"], "build.log"))
# XXX: Note that this is just EAPI 3 support, not yet prefix
# full awareness.
if self.pkg.eapi.options.prefix_capable:
self.env["ED"] = normpath(
pjoin(self.env["D"].rstrip(os.sep), self.env["EPREFIX"])) \
+ self.eapi.options.trailing_slash
# temporary install dir correct for all EAPIs
self.ED = self.env.get('ED', self.env['D'])
def get_env_source(self):
with open(pjoin(self.env["T"], "environment"), "rb") as f:
return data_source.bytes_data_source(f.read())
def _setup_env_data_source(self):
if not ensure_dirs(self.env["T"], mode=0o770, gid=portage_gid, minimal=True):
raise format.FailedDirectory(
self.env['T'],
"%s doesn't fulfill minimum mode %o and gid %i" % (
self.env['T'], 0o770, portage_gid))
if self.env_data_source is not None:
fp = pjoin(self.env["T"], "environment")
# load data first (might be a local_source), *then* write
# if it's a src_ebuild being installed, trying to do two steps
# stomps the local_sources data.
data = self.env_data_source.bytes_fileobj().read()
with open(fp, "wb") as f:
f.write(data)
del data
def _set_per_phase_env(self, phase, env):
self._setup_merge_type(phase, env)
# add phase specific helper paths to PATH if they exist
ebuild_phase = self.eapi.phases.get(phase, '')
if ebuild_phase in self.eapi.helpers:
path = chain.from_iterable((
const.PATH_FORCED_PREPEND,
self.pkg.eapi.helpers.get('global', ()),
self.eapi.helpers[ebuild_phase],
os.environ.get('PATH', '').split(os.pathsep),
))
env['PATH'] = os.pathsep.join(path)
def _setup_merge_type(self, phase, env):
# only allowed in pkg_ phases.
if (not self.eapi.phases.get(phase, "").startswith("pkg_") and
not phase == 'setup-binpkg'):
return
# note all pkgs have this attribute
is_source = getattr(self.pkg, '_is_from_source', True)
if self.eapi.options.has_merge_type:
env["MERGE_TYPE"] = (is_source and "source") or "binary"
else:
# we still must export this, just via the portage var name w/
# different values. if we didn't, spec or not, kernel binpkg
# merging would be broke.
env["EMERGE_FROM"] = (is_source and "ebuild") or "binary"
def setup_logging(self):
if self.logging and not ensure_dirs(os.path.dirname(self.logging),
mode=0o2770, gid=portage_gid):
raise format.FailedDirectory(
os.path.dirname(self.logging),
"PORT_LOGDIR, desired mode 02770 and gid %i" % portage_gid)
def setup_workdir(self):
# ensure dirs.
for k in ("HOME", "T", "WORKDIR", "D"):
if not ensure_dirs(self.env[k], mode=0o4770, gid=portage_gid, minimal=True):
raise format.FailedDirectory(
self.env[k],
"%s doesn't fulfill minimum mode %o and gid %i" % (k, 0o770, portage_gid))
# XXX hack, just 'til pkgcore controls these directories
if (os.stat(self.env[k]).st_mode & 0o2000):
logger.warning(f"{self.env[k]} ( {k} ) is setgid")
def _generic_phase(self, phase, userpriv, sandbox, extra_handlers={},
failure_allowed=False, suppress_bashrc=False):
"""
:param phase: phase to execute
:param userpriv: will we drop to
:obj:`pkgcore.os_data.portage_uid` and
:obj:`pkgcore.os_data.portage_gid` access for this phase?
:param sandbox: should this phase be sandboxed?
"""
if phase not in self.pkg.mandatory_phases:
# TODO(ferringb): Note the preinst hack; this will be removed once dyn_pkg_preinst
# is dead in full (currently it has a selinux labelling and suidctl ran from there)
if phase != 'preinst':
return True
if 'selinux' not in self.features and 'suidctl' not in self.features:
return True
shutil.rmtree(self.env["PKGCORE_EMPTYDIR"], ignore_errors=True)
os.mkdir(self.env["PKGCORE_EMPTYDIR"])
userpriv = self.userpriv and userpriv
sandbox = self.sandbox and sandbox
self._set_per_phase_env(phase, self.env)
extra_handlers = extra_handlers.copy()
extra_handlers.update(self._ipc_helpers)
if not suppress_bashrc:
extra_handlers.setdefault("request_bashrcs", self._request_bashrcs)
return run_generic_phase(
self.pkg, phase, self.env, userpriv, sandbox,
extra_handlers=extra_handlers, failure_allowed=failure_allowed,
logging=self.logging)
def _request_bashrcs(self, ebd):
for source in self.domain.get_package_bashrcs(self.pkg):
if source.path is not None:
ebd.write(f"path\n{source.path}")
elif source.get_data is not None:
raise NotImplementedError
else:
chuck_UnhandledCommand(
ebd, "bashrc request: unable to process bashrc "
f"due to source '{source}' due to lacking usable get_*")
if not ebd.expect("next"):
chuck_UnhandledCommand(
ebd, "bashrc transfer, didn't receive 'next' response. "
"failure?")
ebd.write("end_request")
def set_is_replacing(self, *pkgs):
if self.eapi.options.exports_replacing:
self.env['REPLACING_VERSIONS'] = " ".join(pkg.PVR for pkg in pkgs)
def set_is_being_replaced_by(self, pkg=None):
if self.eapi.options.exports_replacing and pkg is not None:
self.env['REPLACED_BY_VERSION'] = pkg.PVR
def cleanup(self, disable_observer=False, force=False):
if not force:
if not self.clean_needed:
return True
if not os.path.exists(self.builddir):
return True
if disable_observer:
return self.do_cleanup(disable_observer=disable_observer)
return self.do_cleanup()
@observer.decorate_build_method("cleanup")
def do_cleanup(self):
try:
shutil.rmtree(self.builddir)
# try to wipe the cat dir; if not empty, ignore it
try:
os.rmdir(os.path.dirname(self.builddir))
except EnvironmentError as e:
# POSIX specifies either ENOTEMPTY or EEXIST for non-empty dir
# in particular, Solaris uses EEXIST in that case.
# https://github.com/pkgcore/pkgcore/pull/181
if e.errno not in (errno.ENOTEMPTY, errno.EEXIST):
raise
except EnvironmentError as e:
raise format.GenericBuildError(
f"clean: Caught exception while cleansing: {e}") from e
return True
def feat_or_bool(self, name, extra_env=None):
if name in self.env:
v = bool(self.env[name])
del self.env[name]
name = name.lower()
if v:
self.features.add(name)
else:
if name in self.features:
self.features.remove(name)
elif extra_env is not None and name in extra_env:
v = bool(extra_env[name])
if v:
self.features.add(name.lower())
else:
self.features.remove(name.lower())
else:
v = name.lower() in self.features
return v
def __stage_step_callback__(self, stage):
try:
touch(pjoin(self.builddir, f'.{stage}'))
except EnvironmentError:
# we really don't care...
pass
def _reload_state(self):
try:
self.__set_stage_state__(
[x[1:] for x in listdir_files(self.builddir) if x.startswith(".")])
except EnvironmentError as e:
if e.errno not in (errno.ENOTDIR, errno.ENOENT):
raise
class setup_mixin:
setup_is_for_src = True
def setup(self, setup_phase_override=None):
self.setup_logging()
additional_commands = {}
phase_name = "setup-binpkg"
if self.setup_is_for_src:
phase_name = "setup"
if setup_phase_override is not None:
phase_name = setup_phase_override
if self.setup_is_for_src:
additional_commands["request_inherit"] = partial(inherit_handler, self.eclass_cache)
return self._generic_phase(
phase_name, False, True, extra_handlers=additional_commands)
def run_generic_phase(pkg, phase, env, userpriv, sandbox, fd_pipes=None,
extra_handlers=None, failure_allowed=False, logging=None, **kwargs):
"""
:param phase: phase to execute
:param env: environment mapping for the phase
:param userpriv: will we drop to
:obj:`pkgcore.os_data.portage_uid` and
:obj:`pkgcore.os_data.portage_gid` access for this phase?
:param sandbox: should this phase be sandboxed?
:param fd_pipes: use custom file descriptors for ebd instance
:type fd_pipes: mapping between file descriptors
:param extra_handlers: extra command handlers
:type extra_handlers: mapping from string to callable
:param failure_allowed: allow failure without raising error
:type failure_allowed: boolean
:param logging: None or a filepath to log output to
:return: True when the phase has finished execution
"""
userpriv = userpriv and is_userpriv_capable()
sandbox = sandbox and is_sandbox_capable()
tmpdir = kwargs.get('tmpdir', env.get('T', None))
if env is None:
env = expected_ebuild_env(pkg)
ebd = request_ebuild_processor(userpriv=userpriv, sandbox=sandbox, fd_pipes=fd_pipes)
# this is a bit of a hack; used until ebd accepts observers that handle
# the output redirection on its own. Primary relevance is when
# stdout/stderr are pointed at a file; we leave buffering on, just
# force the flush for synchronization.
sys.stdout.flush()
sys.stderr.flush()
try:
if not ebd.run_phase(phase, env, tmpdir=tmpdir, sandbox=sandbox,
logging=logging, additional_commands=extra_handlers):
if not failure_allowed:
raise format.GenericBuildError(
phase + ": Failed building (False/0 return from handler)")
logger.warning(f"executing phase {phase}: execution failed, ignoring")
except Exception as e:
if isinstance(e, ebd_ipc.IpcError):
# notify bash side of IPC error
ebd.write(e.ret)
if isinstance(e, ebd_ipc.IpcInternalError):
# show main exception cause for internal IPC errors
ebd.shutdown_processor(force=True)
raise e.__cause__
try:
ebd.shutdown_processor()
except ProcessorError as pe:
# catch die errors during shutdown
e = pe
release_ebuild_processor(ebd)
if isinstance(e, ProcessorError):
# force verbose die output
e._verbosity = 1
raise e
elif isinstance(e, IGNORED_EXCEPTIONS + (format.GenericBuildError,)):
raise
raise format.GenericBuildError(
f"Executing phase {phase}: Caught exception: {e}") from e
release_ebuild_processor(ebd)
return True
class install_op(ebd, format.install):
"""Phase operations and steps for install execution."""
def __init__(self, domain, pkg, observer):
format.install.__init__(self, domain, pkg, observer)
ebd.__init__(
self, pkg, observer=observer, initial_env=self.domain.settings,
env_data_source=pkg.environment, clean=False)
preinst = pretty_docs(
observer.decorate_build_method("preinst")(
post_curry(ebd._generic_phase, "preinst", False, False)),
"run the postinst phase")
postinst = pretty_docs(
observer.decorate_build_method("postinst")(
post_curry(ebd._generic_phase, "postinst", False, False)),
"run the postinst phase")
def add_triggers(self, domain_op, engine):
self.new_pkg.add_format_triggers(domain_op, self, engine)
class uninstall_op(ebd, format.uninstall):
"""Phase operations and steps for uninstall execution."""
def __init__(self, domain, pkg, observer):
format.uninstall.__init__(self, domain, pkg, observer)
ebd.__init__(
self, pkg, observer=observer, initial_env=self.domain.settings,
env_data_source=pkg.environment, clean=False,
tmp_offset="unmerge")
prerm = pretty_docs(
observer.decorate_build_method("prerm")(
post_curry(ebd._generic_phase, "prerm", False, False)),
"run the prerm phase")
postrm = pretty_docs(
observer.decorate_build_method("postrm")(
post_curry(
ebd._generic_phase, "postrm", False, False,
failure_allowed=True)),
"run the postrm phase")
def add_triggers(self, domain_op, engine):
self.old_pkg.add_format_triggers(domain_op, self, engine)
def finish(self):
self.cleanup()
return format.uninstall.finish(self)
class replace_op(format.replace):
"""Phase operations and steps for replace execution."""
install_kls = staticmethod(install_op)
uninstall_kls = staticmethod(uninstall_op)
def __init__(self, domain, old_pkg, new_pkg, observer):
super().__init__(domain, old_pkg, new_pkg, observer)
self.install_op = install_op(domain, new_pkg, observer)
self.install_op.set_is_replacing(old_pkg)
self.uninstall_op = uninstall_op(domain, old_pkg, observer)
self.uninstall_op.set_is_being_replaced_by(new_pkg)
def start(self):
self.install_op.start()
self.uninstall_op.start()
return True
prerm = klass.alias_method("uninstall_op.prerm")
postrm = klass.alias_method("uninstall_op.postrm")
preinst = klass.alias_method("install_op.preinst")
postinst = klass.alias_method("install_op.postinst")
def finalize(self):
ret = self.uninstall_op.finish()
ret2 = self.install_op.finish()
return (ret and ret2)
def add_triggers(self, domain_op, engine):
self.uninstall_op.add_triggers(domain_op, engine)
self.install_op.add_triggers(domain_op, engine)
class buildable(ebd, setup_mixin, format.build):
"""Generic build operation."""
# XXX this is unclean- should be handing in strictly what is build
# env, rather then dumping domain settings as env.
def __init__(self, domain, pkg, verified_files, eclass_cache,
observer=None, force_test=False, **kwargs):
"""
:param pkg: :obj:`pkgcore.ebuild.ebuild_src.package` instance we'll be
building
:param eclass_cache: the :class:`pkgcore.ebuild.eclass_cache`
we'll be using
:param verified_files: mapping of fetchables mapped to their disk location
"""
self._built_class = ebuild_built.fresh_built_package
format.build.__init__(self, domain, pkg, verified_files, observer)
domain_settings = self.domain.settings
ebd.__init__(self, pkg, initial_env=domain_settings, **kwargs)
self.env["FILESDIR"] = pjoin(os.path.dirname(pkg.ebuild.path), "files")
self.eclass_cache = eclass_cache
self.run_test = force_test or self.feat_or_bool("test", domain_settings)
self.allow_failed_test = self.feat_or_bool("test-fail-continue", domain_settings)
if "test" in self.restrict:
self.run_test = False
elif not force_test and "test" not in pkg.use:
if self.run_test:
logger.warning(f"disabling test for {pkg} due to test use flag being disabled")
self.run_test = False
# XXX minor hack
path = self.env["PATH"].split(os.pathsep)
for s, default in (("DISTCC", ".distcc"), ("CCACHE", "ccache")):
b = (self.feat_or_bool(s, domain_settings) and
s not in self.restrict)
setattr(self, s.lower(), b)
if b:
# looks weird I realize, but
# pjoin("/foor/bar", "/barr/foo") == "/barr/foo"
# and pjoin("/foo/bar", ".asdf") == "/foo/bar/.asdf"
self.env.setdefault(s + "_DIR", pjoin(self.domain.tmpdir, default))
# gentoo bug 355283
libdir = self.env.get("ABI")
if libdir is not None:
libdir = self.env.get(f"LIBDIR_{libdir}")
if libdir is not None:
libdir = self.env.get(libdir)
if libdir is None:
libdir = "lib"
path.insert(0, f"/usr/{libdir}/{s.lower()}/bin")
else:
for y in ("_PATH", "_DIR"):
if s + y in self.env:
del self.env[s+y]
self.env["PATH"] = os.pathsep.join(path)
# ordering must match appearance order in SRC_URI per PMS
self.env["A"] = ' '.join(iter_stable_unique(pkg.distfiles))
if self.eapi.options.has_AA:
pkg = self.pkg
while hasattr(pkg, '_raw_pkg'):
pkg = getattr(pkg, '_raw_pkg')
self.env["AA"] = ' '.join(set(iflatten_instance(pkg.distfiles)))
if self.eapi.options.has_KV:
self.env["KV"] = domain.KV
if self.eapi.options.has_merge_type:
self.env["MERGE_TYPE"] = "source"
if self.eapi.options.has_portdir:
self.env["PORTDIR"] = pkg.repo.location
self.env["ECLASSDIR"] = eclass_cache.eclassdir
if self.setup_is_for_src:
# TODO: PORTAGE_ACTUAL_DISTDIR usage by VCS eclasses needs to be
# dropped, but it's currently required for repo reuse.
self.env['PORTAGE_ACTUAL_DISTDIR'] = domain.distdir
self.env['DISTDIR'] = normpath(pjoin(self.builddir, 'distdir'))
for k in ('PORTAGE_ACTUAL_DISTDIR', 'DISTDIR'):
self.env[k] = os.path.realpath(self.env[k]).rstrip(os.sep) + os.sep
def _setup_distfiles(self):
# fetch distfiles
if not self.verified_files:
ops = self.domain.pkg_operations(self.pkg, observer=self.observer)
if ops.fetch():
# this break encapsulation and should be refactored. Trace
# f35f2 and 6561eac for where this was refactored.
self.verified_files = ops.verified_files
# symlink them into builddir
if self.verified_files:
try:
if os.path.exists(self.env["DISTDIR"]):
if (os.path.isdir(self.env["DISTDIR"]) and
not os.path.islink(self.env["DISTDIR"])):
shutil.rmtree(self.env["DISTDIR"])
else:
os.unlink(self.env["DISTDIR"])
except EnvironmentError as e:
raise format.FailedDirectory(
self.env["DISTDIR"],
f"failed removing existing file/dir/link: {e}") from e
if not ensure_dirs(self.env["DISTDIR"], mode=0o770, gid=portage_gid):
raise format.FailedDirectory(
self.env["DISTDIR"],
"failed creating distdir symlink directory")
try:
for src, dest in [
(k, pjoin(self.env["DISTDIR"], v.filename))
for (k, v) in self.verified_files.items()]:
os.symlink(src, dest)
except EnvironmentError as e:
raise format.GenericBuildError(
f"Failed symlinking in distfiles for src {src} -> {dest}: {e}") from e
@observer.decorate_build_method("setup")
def setup(self):
"""Execute the setup phase, mapping out to pkg_setup in the ebuild.
Necessarily dirs are created as required, and build env is
initialized at this point.
"""
if self.distcc:
for p in ("", "/lock", "/state"):
if not ensure_dirs(pjoin(self.env["DISTCC_DIR"], p),
mode=0o2775, gid=portage_gid):
raise format.FailedDirectory(
pjoin(self.env["DISTCC_DIR"], p),
"failed creating needed distcc directory")
if self.ccache:
# yuck.
st = None
try:
st = os.stat(self.env["CCACHE_DIR"])
except OSError as e:
st = None
if not ensure_dirs(self.env["CCACHE_DIR"], mode=0o2775,
gid=portage_gid):
raise format.FailedDirectory(
self.env["CCACHE_DIR"],
"failed creation of ccache dir") from e
# XXX this is more then mildly stupid.
st = os.stat(self.env["CCACHE_DIR"])
try:
if st.st_gid != portage_gid or (st.st_mode & 0o2775) != 0o2775:
try:
cwd = os.getcwd()
except OSError:
cwd = "/"
with chdir(cwd):
# crap.
os.chmod(self.env["CCACHE_DIR"], 0o2775)
os.chown(self.env["CCACHE_DIR"], -1, portage_gid)
if 0 != spawn(
["chgrp", "-R", str(portage_gid), self.env["CCACHE_DIR"]]):
raise format.FailedDirectory(
self.env["CCACHE_DIR"],
"failed changing ownership for CCACHE_DIR")
if 0 != spawn_bash(
"find '%s' -type d -print0 | %s --null chmod 02775"
% (self.env["CCACHE_DIR"], xargs)):
raise format.FailedDirectory(
self.env["CCACHE_DIR"],
"failed correcting perms for CCACHE_DIR")
if 0 != spawn_bash(
"find '%s' -type f -print0 | %s --null chmod 0775"
% (self.env["CCACHE_DIR"], xargs)):
raise format.FailedDirectory(
self.env["CCACHE_DIR"],
"failed correcting perms for CCACHE_DIR")
except OSError as e:
raise format.FailedDirectory(
self.env["CCACHE_DIR"],
"failed ensuring perms/group owner for CCACHE_DIR") from e
return setup_mixin.setup(self)
def configure(self):
"""Execute the configure phase.
Does nothing if the pkg's EAPI is less than 2 (that spec lacks a
separated configure phase).
"""
if "configure" in self.eapi.phases:
return self._generic_phase("configure", True, True)
return True
def prepare(self):
"""Execute a source preparation phase.
does nothing if the pkg's EAPI is less than 2
"""
ret = True
if "prepare" in self.eapi.phases:
ret = self._generic_phase("prepare", True, True)
if (self.eapi.options.user_patches and
not os.path.exists(pjoin(self.env['T'], '.user_patches_applied'))):
self.observer.error(
'eapply_user (or default) must be called in src_prepare()')
raise format.GenericBuildError('missing eapply_user call')
return ret
def nofetch(self):
"""Execute the nofetch phase.
We need the same prerequisites as setup, so reuse that.
"""
ensure_dirs(self.env["T"], mode=0o770, gid=portage_gid, minimal=True)
return setup_mixin.setup(self, "nofetch")
def unpack(self):
"""Execute the unpack phase."""
if self.setup_is_for_src:
self._setup_distfiles()
if self.userpriv:
try:
os.chown(self.env["WORKDIR"], portage_uid, -1)
except OSError as e:
raise format.GenericBuildError(
"failed forcing %i uid for WORKDIR: %s" %
(portage_uid, e)) from e
return self._generic_phase("unpack", True, True)
compile = pretty_docs(
observer.decorate_build_method("compile")(
post_curry(ebd._generic_phase, "compile", True, True)),
"Run the compile phase (maps to src_compile).")
@observer.decorate_build_method("install")
def install(self):
"""Run the install phase (maps to src_install)."""
# TODO: replace print() usage with observer
print(f">>> Install {self.env['PF']} into {self.ED!r} category {self.env['CATEGORY']}")
ret = self._generic_phase("install", False, True)
print(f">>> Completed installing {self.env['PF']} into {self.ED!r}")
return ret
@observer.decorate_build_method("test")
def test(self):
"""Run the test phase (if enabled), maps to src_test."""
if not self.run_test:
return True
return self._generic_phase(
"test", True, True, failure_allowed=self.allow_failed_test)
def finalize(self):
"""Finalize the operation.
This yields a built package, but the packages metadata/contents are
bound to the workdir. In other words, install the package somewhere
prior to executing clean if you intend on installing it.
:return: :obj:`pkgcore.ebuild.ebuild_built.package` instance
"""
factory = ebuild_built.fake_package_factory(self._built_class)
return factory.new_package(
self.pkg, self.env["D"], pjoin(self.env["T"], "environment"))
class binpkg_localize(ebd, setup_mixin, format.build):
stage_depends = {"finalize": "setup", "setup": "start"}
setup_is_for_src = False
def __init__(self, domain, pkg, **kwargs):
self._built_class = ebuild_built.package
format.build.__init__(self, domain, pkg, {}, observer=kwargs.get("observer", None))
ebd.__init__(self, pkg, **kwargs)
if self.eapi.options.has_merge_type:
self.env["MERGE_TYPE"] = "binpkg"
def finalize(self):
return MutatedPkg(self.pkg, {"environment": self.get_env_source()})
class ebuild_operations:
_checks = []
def _register_check(checks):
"""Decorator to register sanity checks that will be run."""
def _wrap_func(func):
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
checks.append(func)
return wrapped
return _wrap_func
def _cmd_implementation_sanity_check(self, domain):
"""Run all defined sanity checks."""
failures = []
for check in self._checks:
if result := check(self, self.pkg, domain=domain):
failures.append(result)
return failures
@_register_check(_checks)
def _check_required_use(self, pkg, **kwargs):
"""Perform REQUIRED_USE verification against a set of USE flags.
Note that this assumes the REQUIRED_USE depset has been evaluated
against a known set of enabled USE flags and is in collapsed form.
"""
if pkg.eapi.options.has_required_use:
if failures := tuple(node for node in pkg.required_use if not node.match(pkg.use)):
return errors.RequiredUseError(pkg, failures)
@_register_check(_checks)
def _check_pkg_pretend(self, pkg, *, domain, **kwargs):
"""Run pkg_pretend phase."""
# pkg_pretend is not defined or required
if 'pretend' not in pkg.mandatory_phases:
return
commands = None
if not pkg.built:
commands = {
'request_inherit': partial(inherit_handler, self._eclass_cache),
'has_version': ebd_ipc.Has_Version(self),
'best_version': ebd_ipc.Best_Version(self),
}
# Use base build tempdir for $T instead of full pkg specific path to
# avoid having to create/remove directories -- pkg_pretend isn't
# allowed to write to the filesystem anyway.
self.env = expected_ebuild_env(pkg)
self.env["T"] = domain.pm_tmpdir
ebd.set_path_vars(self.env, pkg, domain)
# avoid clipping eend() messages
self.env["PKGCORE_RC_PREFIX"] = '2'
with TemporaryFile() as f:
# suppress bash output by default
fd_pipes = {1: f.fileno(), 2: f.fileno()}
try:
run_generic_phase(
pkg, "pretend", self.env, tmpdir=None, fd_pipes=fd_pipes,
userpriv=True, sandbox=True, extra_handlers=commands)
except ProcessorError as e:
f.seek(0)
output = f.read().decode().strip('\n')
return errors.PkgPretendError(pkg, output, e)
class src_operations(ebuild_operations, format.build_operations):
def __init__(self, domain, pkg, eclass_cache, observer=None):
format.build_operations.__init__(self, domain, pkg, observer=observer)
self._eclass_cache = eclass_cache
def _cmd_implementation_build(self, observer, verified_files,
clean=False, force_test=False):
return buildable(
self.domain, self.pkg, verified_files,
self._eclass_cache, observer=observer,
clean=clean, force_test=force_test)
class misc_operations(ebd):
def __init__(self, domain, *args, **kwds):
self.domain = domain
super().__init__(*args, **kwds)
def configure(self, observer=None):
return self._generic_phase('config', False, True)
def info(self, observer=None):
return self._generic_phase('info', True, True)
class built_operations(ebuild_operations, format.operations):
def __init__(self, domain, pkg, observer=None, initial_env=None):
format.operations.__init__(self, domain, pkg, observer=observer)
self._initial_env = initial_env
self._localized_ebd = None
def _cmd_implementation_localize(self, observer, force=False):
if not force and getattr(self.pkg, '_is_from_source', False):
return self.pkg
self._localized_ebd = op = binpkg_localize(
self.domain, self.pkg, clean=False,
initial_env=self._initial_env, env_data_source=self.pkg.environment,
observer=observer)
return op.finalize()
def _cmd_implementation_cleanup(self, observer, force=False):
if not self._localized_ebd:
return True
return self._localized_ebd.cleanup(force=force)
def _cmd_check_support_configure(self):
pkg = self.pkg
if 'config' not in pkg.mandatory_phases:
return False
return True
def _cmd_implementation_configure(self, observer):
misc = misc_operations(
self.domain, self.pkg, env_data_source=self.pkg.environment, clean=True)
try:
misc.start()
misc.configure()
finally:
misc.cleanup()
return True
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
src/appearance.py | import os
import numpy
import pyopencl as cl
class AppearanceCL(object):
def __init__(self, lambda_occ, esim, appearance_norm_weight):
os.environ["PYOPENCL_COMPILER_OUTPUT"] = "1"
self.cl_context = cl.create_some_context(False)
self.queue = cl.CommandQueue(self.cl_context)
program_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "patchmatch.cl")
self.load_program(program_path)
self.lambda_occ = lambda_occ
self.esim = esim
self.appearance_norm_weight = appearance_norm_weight
self.energy = None
self.images = None
self.target_size = None
self.source_size = None
self.patch_size = None
self.effective_target_size = None
self.effective_source_size = None
self.nff = None
self.occurrence_map = None
self.nff_buf = None
self.const_occ = None
self.images_buf = None
self.occurrence_map_buf = None
self.iteration = None
self.energy_buf = None
self.gradient_buf = None
def load_program(self, filename):
program_file = open(filename, 'r')
program_text = "".join(program_file.readlines())
self.program = cl.Program(self.cl_context, program_text).build()
program_file.close()
def random_fill(self):
self.program.random_fill(
self.queue, self.effective_target_size, None, self.nff_buf,
numpy.int32(self.effective_source_size[0]),
numpy.int32(self.effective_source_size[1]),
numpy.int32(self.effective_target_size[1]))
def initialize_distance(self):
self.program.initialize_distance(
self.queue, self.effective_target_size, None, self.images_buf[0],
self.images_buf[1], self.nff_buf, self.occurrence_map_buf,
numpy.int32(self.patch_size[0]), numpy.int32(self.patch_size[1]),
numpy.int32(self.target_size[1]), numpy.int32(self.source_size[1]),
numpy.int32(self.effective_source_size[0]),
numpy.int32(self.effective_source_size[1]),
numpy.int32(self.effective_target_size[1]),
numpy.double(self.lambda_occ), numpy.double(self.esim),
numpy.double(self.const_occ))
def propagate(self):
self.program.propagate(
self.queue, self.effective_target_size, None, self.images_buf[0],
self.images_buf[1], self.nff_buf, self.occurrence_map_buf,
numpy.int32(self.patch_size[0]), numpy.int32(self.patch_size[1]),
numpy.int32(self.target_size[0]), numpy.int32(self.target_size[1]),
numpy.int32(self.source_size[0]), numpy.int32(self.source_size[1]),
numpy.int32(self.effective_target_size[0]),
numpy.int32(self.effective_target_size[1]),
numpy.int32(self.effective_source_size[0]),
numpy.int32(self.effective_source_size[1]),
numpy.int32(self.iteration), numpy.double(self.lambda_occ),
numpy.double(self.esim), numpy.double(self.const_occ))
def build_occurence_map(self):
self.program.build_occurrence_map(
self.queue, self.effective_target_size, None,
self.occurrence_map_buf, self.nff_buf,
numpy.int32(self.patch_size[0]), numpy.int32(self.patch_size[1]),
numpy.int32(self.source_size[1]),
numpy.int32(self.effective_target_size[1]))
def build_gradient(self):
self.program.build_gradient(
self.queue, self.target_size[0:2], None, self.images_buf[0],
self.images_buf[1], self.nff_buf, self.energy_buf,
self.gradient_buf, numpy.int32(self.patch_size[0]),
numpy.int32(self.patch_size[1]), numpy.int32(self.target_size[1]),
numpy.int32(self.source_size[1]),
numpy.int32(self.effective_target_size[0]),
numpy.int32(self.effective_target_size[1]),
numpy.int32(self.effective_source_size[0]),
numpy.int32(self.effective_source_size[1]),
numpy.double(self.esim))
def compute(self, target, source, gradient, patch_size, iterations):
self.energy = numpy.zeros(1)
self.images = [target, source]
self.target_size = self.images[0].shape
self.source_size = self.images[1].shape
self.patch_size = patch_size
self.effective_target_size = [self.target_size[i] - patch_size[i] + 1
for i in (0, 1)]
self.effective_source_size = [self.source_size[i] - patch_size[i] + 1
for i in (0, 1)]
assert all(x > 0 for x in self.effective_target_size), "Target dimensions too small."
assert all(x > 0 for x in self.effective_source_size), "Source dimensions too small."
self.nff = numpy.ndarray(
(self.effective_target_size[0], self.effective_target_size[1], 3))
self.occurrence_map = numpy.zeros(
(self.source_size[0], self.source_size[1]), dtype=int)
source_pixels = self.source_size[0] * self.source_size[1]
target_pixels = self.target_size[0] * self.target_size[1]
patch_pixels = self.patch_size[0] * self.patch_size[1]
self.const_occ = source_pixels / float(target_pixels * (patch_pixels ** 2) * (patch_pixels ** 2))
# neighborhood matching (patchmatch)
self.nff_buf = cl.Buffer(self.cl_context, cl.mem_flags.READ_WRITE |
cl.mem_flags.COPY_HOST_PTR, hostbuf=self.nff)
self.images_buf = [cl.Buffer(self.cl_context, cl.mem_flags.READ_ONLY |
cl.mem_flags.COPY_HOST_PTR, hostbuf=self.images[i])
for i in [0, 1]]
self.occurrence_map_buf = cl.Buffer(
self.cl_context, cl.mem_flags.READ_WRITE |
cl.mem_flags.COPY_HOST_PTR, hostbuf=self.occurrence_map)
self.random_fill()
if self.lambda_occ > 0:
self.build_occurence_map()
self.initialize_distance()
for i in range(iterations):
self.iteration = i + 1
self.propagate()
if self.lambda_occ > 0:
self.build_occurence_map()
# appearance gradient
self.energy_buf = cl.Buffer(
self.cl_context, cl.mem_flags.WRITE_ONLY |
cl.mem_flags.COPY_HOST_PTR, hostbuf=self.energy)
if gradient is not None:
self.gradient_buf = cl.Buffer(
self.cl_context, cl.mem_flags.WRITE_ONLY |
cl.mem_flags.COPY_HOST_PTR, hostbuf=gradient)
self.build_gradient()
cl.enqueue_read_buffer(self.queue, self.gradient_buf, gradient).wait()
cl.enqueue_read_buffer(self.queue, self.energy_buf, self.energy).wait()
# Experimental: appearance energy normalization (better convergence)
if self.appearance_norm_weight > 0:
norm_term = (self.effective_target_size[0] * self.effective_target_size[1] *
self.patch_size[0] * self.patch_size[1]) / self.appearance_norm_weight
if gradient is not None:
gradient[:] /= norm_term
self.energy[0] /= norm_term
return self.energy[0]
| []
| []
| [
"PYOPENCL_COMPILER_OUTPUT"
]
| [] | ["PYOPENCL_COMPILER_OUTPUT"] | python | 1 | 0 | |
pkg/configs/server_config.go | package configs
import (
"net/http"
"os"
"strconv"
"time"
"github.com/create-go-app/net_http-go-template/pkg/utils"
"github.com/gorilla/mux"
)
// ServerConfig func for configuration net/http app.
func ServerConfig(router *mux.Router) *http.Server {
// Define server settings:
serverConnURL, _ := utils.ConnectionURLBuilder("server")
readTimeoutSecondsCount, _ := strconv.Atoi(os.Getenv("SERVER_READ_TIMEOUT"))
// Return server configuration.
return &http.Server{
Handler: router,
Addr: serverConnURL,
ReadTimeout: time.Second * time.Duration(readTimeoutSecondsCount),
}
}
| [
"\"SERVER_READ_TIMEOUT\""
]
| []
| [
"SERVER_READ_TIMEOUT"
]
| [] | ["SERVER_READ_TIMEOUT"] | go | 1 | 0 | |
cmd/detectExecuteScan_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type detectExecuteScanOptions struct {
Token string `json:"token,omitempty"`
CodeLocation string `json:"codeLocation,omitempty"`
ProjectName string `json:"projectName,omitempty"`
Scanners []string `json:"scanners,omitempty"`
ScanPaths []string `json:"scanPaths,omitempty"`
ScanProperties []string `json:"scanProperties,omitempty"`
ServerURL string `json:"serverUrl,omitempty"`
Groups []string `json:"groups,omitempty"`
FailOn []string `json:"failOn,omitempty"`
Version string `json:"version,omitempty"`
VersioningModel string `json:"versioningModel,omitempty"`
ProjectSettingsFile string `json:"projectSettingsFile,omitempty"`
GlobalSettingsFile string `json:"globalSettingsFile,omitempty"`
M2Path string `json:"m2Path,omitempty"`
}
// DetectExecuteScanCommand Executes Synopsys Detect scan
func DetectExecuteScanCommand() *cobra.Command {
const STEP_NAME = "detectExecuteScan"
metadata := detectExecuteScanMetadata()
var stepConfig detectExecuteScanOptions
var startTime time.Time
var createDetectExecuteScanCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Executes Synopsys Detect scan",
Long: `This step executes [Synopsys Detect](https://synopsys.atlassian.net/wiki/spaces/INTDOCS/pages/62423113/Synopsys+Detect) scans.
Synopsys Detect command line utlity can be used to run various scans including BlackDuck and Polaris scans. This step allows users to run BlackDuck scans by default.
Please configure your BlackDuck server Url using the serverUrl parameter and the API token of your user using the apiToken parameter for this step.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Token)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
detectExecuteScan(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addDetectExecuteScanFlags(createDetectExecuteScanCmd, &stepConfig)
return createDetectExecuteScanCmd
}
func addDetectExecuteScanFlags(cmd *cobra.Command, stepConfig *detectExecuteScanOptions) {
cmd.Flags().StringVar(&stepConfig.Token, "token", os.Getenv("PIPER_token"), "Api token to be used for connectivity with Synopsis Detect server.")
cmd.Flags().StringVar(&stepConfig.CodeLocation, "codeLocation", os.Getenv("PIPER_codeLocation"), "An override for the name Detect will use for the scan file it creates.")
cmd.Flags().StringVar(&stepConfig.ProjectName, "projectName", os.Getenv("PIPER_projectName"), "Name of the Synopsis Detect (formerly BlackDuck) project.")
cmd.Flags().StringSliceVar(&stepConfig.Scanners, "scanners", []string{`signature`}, "List of scanners to be used for Synopsis Detect (formerly BlackDuck) scan.")
cmd.Flags().StringSliceVar(&stepConfig.ScanPaths, "scanPaths", []string{`.`}, "List of paths which should be scanned by the Synopsis Detect (formerly BlackDuck) scan.")
cmd.Flags().StringSliceVar(&stepConfig.ScanProperties, "scanProperties", []string{`--blackduck.signature.scanner.memory=4096`, `--blackduck.timeout=6000`, `--blackduck.trust.cert=true`, `--detect.report.timeout=4800`, `--logging.level.com.synopsys.integration=DEBUG`, `--detect.maven.excluded.scopes=test`}, "Properties passed to the Synopsis Detect (formerly BlackDuck) scan. You can find details in the [Synopsis Detect documentation](https://synopsys.atlassian.net/wiki/spaces/INTDOCS/pages/622846/Using+Synopsys+Detect+Properties)")
cmd.Flags().StringVar(&stepConfig.ServerURL, "serverUrl", os.Getenv("PIPER_serverUrl"), "Server URL to the Synopsis Detect (formerly BlackDuck) Server.")
cmd.Flags().StringSliceVar(&stepConfig.Groups, "groups", []string{}, "Users groups to be assigned for the Project")
cmd.Flags().StringSliceVar(&stepConfig.FailOn, "failOn", []string{`BLOCKER`}, "Mark the current build as fail based on the policy categories applied.")
cmd.Flags().StringVar(&stepConfig.Version, "version", os.Getenv("PIPER_version"), "Defines the version number of the artifact being build in the pipeline. It is used as source for the Detect version.")
cmd.Flags().StringVar(&stepConfig.VersioningModel, "versioningModel", `major`, "The versioning model used for result reporting (based on the artifact version). Example 1.2.3 using `major` will result in version 1")
cmd.Flags().StringVar(&stepConfig.ProjectSettingsFile, "projectSettingsFile", os.Getenv("PIPER_projectSettingsFile"), "Path or url to the mvn settings file that should be used as project settings file.")
cmd.Flags().StringVar(&stepConfig.GlobalSettingsFile, "globalSettingsFile", os.Getenv("PIPER_globalSettingsFile"), "Path or url to the mvn settings file that should be used as global settings file")
cmd.Flags().StringVar(&stepConfig.M2Path, "m2Path", os.Getenv("PIPER_m2Path"), "Path to the location of the local repository that should be used.")
cmd.MarkFlagRequired("token")
cmd.MarkFlagRequired("projectName")
cmd.MarkFlagRequired("serverUrl")
}
// retrieve step metadata
func detectExecuteScanMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "detectExecuteScan",
Aliases: []config.Alias{},
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "token",
ResourceRef: []config.ResourceReference{
{
Name: "detectTokenCredentialsId",
Type: "secret",
},
{
Name: "",
Paths: []string{"$(vaultPath)/detect", "$(vaultBasePath)/$(vaultPipelineName)/detect", "$(vaultBasePath)/GROUP-SECRETS/detect"},
Type: "vaultSecret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "blackduckToken"}, {Name: "detectToken"}, {Name: "apiToken"}, {Name: "detect/apiToken"}},
},
{
Name: "codeLocation",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "projectName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "detect/projectName"}},
},
{
Name: "scanners",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "detect/scanners"}},
},
{
Name: "scanPaths",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "detect/scanPaths"}},
},
{
Name: "scanProperties",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "detect/scanProperties"}},
},
{
Name: "serverUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "detect/serverUrl"}},
},
{
Name: "groups",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "detect/groups"}},
},
{
Name: "failOn",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "detect/failOn"}},
},
{
Name: "version",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "artifactVersion",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "projectVersion"}, {Name: "detect/projectVersion"}},
},
{
Name: "versioningModel",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "GENERAL", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "projectSettingsFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/projectSettingsFile"}},
},
{
Name: "globalSettingsFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/globalSettingsFile"}},
},
{
Name: "m2Path",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/m2Path"}},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_token\"",
"\"PIPER_codeLocation\"",
"\"PIPER_projectName\"",
"\"PIPER_serverUrl\"",
"\"PIPER_version\"",
"\"PIPER_projectSettingsFile\"",
"\"PIPER_globalSettingsFile\"",
"\"PIPER_m2Path\""
]
| []
| [
"PIPER_version",
"PIPER_token",
"PIPER_codeLocation",
"PIPER_globalSettingsFile",
"PIPER_serverUrl",
"PIPER_m2Path",
"PIPER_projectName",
"PIPER_projectSettingsFile"
]
| [] | ["PIPER_version", "PIPER_token", "PIPER_codeLocation", "PIPER_globalSettingsFile", "PIPER_serverUrl", "PIPER_m2Path", "PIPER_projectName", "PIPER_projectSettingsFile"] | go | 8 | 0 | |
cachet/cachet.go | package cachet
import (
"encoding/json"
"fmt"
"github.com/go-chat-bot/bot"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
)
const (
statusFailed = 4
)
var (
cachetAPI = os.Getenv("CACHET_API")
configFilePath = os.Getenv("CACHET_ALERT_CONFIG")
outageReportConfig []ChannelConfig
pastOutageNotifications map[string]time.Time
pastOutageMutex = sync.RWMutex{}
)
// cachetComponents is Go representation of https://docs.cachethq.io/reference#get-components
type cachetComponents struct {
Meta struct {
Pagination struct {
Total int `json:"total"`
Count int `json:"count"`
PerPage int `json:"per_page"`
CurrentPage int `json:"current_page"`
TotalPages int `json:"total_pages"`
Links struct {
NextPage string `json:"next_page"`
PreviousPage string `json:"previous_page"`
} `json:"links"`
} `json:"pagination"`
} `json:"meta"`
Data []struct {
ID int `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Link string `json:"link"`
Status int `json:"status"`
Order int `json:"order"`
GroupID int `json:"group_id"`
Enabled bool `json:"enabled"`
Meta interface{} `json:"meta"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
DeletedAt interface{} `json:"deleted_at"`
StatusName string `json:"status_name"`
Tags []interface{} `json:"tags"`
} `json:"data"`
}
// ChannelConfig is representation of alert configuration for single channel
type ChannelConfig struct {
Channel string `json:"channel"`
Services []string `json:"services"`
RepeatGap int `json:"repeatGap"`
}
func cachetGetComponentsFromURL(url string) (components cachetComponents, err error) {
err = nil
log.Printf("Getting components from Cachet URL %s", url)
resp, err := http.Get(url)
if err != nil {
log.Printf("Cachet API call failed: %v", err)
return
}
if resp.StatusCode != 200 {
log.Printf("Cachet API call failed with: %d", resp.StatusCode)
err = fmt.Errorf("Cachet API call failed with code: %d", resp.StatusCode)
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("Failed reading cachet response body: %v", err)
return
}
err = json.Unmarshal(body, &components)
if err != nil {
log.Printf("Failed to unmarshal JSON response: %v", err)
return
}
return
}
func cachetGetComponentNames(params string) (names []string, err error) {
url := fmt.Sprintf("%s/v1/components?%s", cachetAPI, params)
var components cachetComponents
for {
components, err = cachetGetComponentsFromURL(url)
if err != nil {
return
}
for _, component := range components.Data {
names = append(names, component.Name)
}
url = components.Meta.Pagination.Links.NextPage
if url == "" {
// end of paging
break
}
}
return
}
func getChannelNamesForServiceNotification(service string) (ret []string) {
for _, channelConfig := range outageReportConfig {
for _, serviceName := range channelConfig.Services {
if service == serviceName {
ret = append(ret, channelConfig.Channel)
}
}
}
return
}
func getChannelConfig(channel string) (ret *ChannelConfig) {
for i := range outageReportConfig {
if outageReportConfig[i].Channel == channel {
return &outageReportConfig[i]
}
}
return nil
}
func recordOutage(channel string, service string) {
cc := getChannelConfig(channel)
if cc == nil {
log.Printf("Could not find channel config for %s", channel)
return
}
until := time.Now().UTC().Add(time.Duration(cc.RepeatGap) * time.Minute)
key := fmt.Sprintf("%s-%s", channel, service)
pastOutageMutex.Lock()
pastOutageNotifications[key] = until
pastOutageMutex.Unlock()
go func() {
time.Sleep(time.Duration(cc.RepeatGap) * time.Minute)
pastOutageMutex.Lock()
delete(pastOutageNotifications, key)
pastOutageMutex.Unlock()
}()
}
func checkCachet() (ret []bot.CmdResult, err error) {
failedNames, err := cachetGetComponentNames("status=4")
if err != nil {
log.Printf("Failure while getting failed components: %v", err)
return
}
anyChannels := getChannelNamesForServiceNotification("any")
for _, failedService := range failedNames {
notifyChannels := []string{}
notifyChannels = append(notifyChannels, anyChannels...)
notifyChannels = append(notifyChannels,
getChannelNamesForServiceNotification(failedService)...)
log.Printf("Reporting alerts for %s to %s", failedService, notifyChannels)
for _, notifyChannel := range notifyChannels {
key := fmt.Sprintf("%s-%s", notifyChannel, failedService)
pastOutageMutex.RLock()
until, found := pastOutageNotifications[key]
pastOutageMutex.RUnlock()
if found {
log.Printf("Skipping notification for %s in %s (until %v)",
failedService, notifyChannel, until)
continue
}
recordOutage(notifyChannel, failedService)
log.Printf("Alerting about %s outage in %s", failedService, notifyChannel)
ret = append(ret, bot.CmdResult{
Message: fmt.Sprintf("Service '%s' is in outage as per %s",
failedService, cachetAPI),
Channel: notifyChannel,
})
}
}
return
}
func reloadConfig() {
configFile, err := os.Open(configFilePath)
if err != nil {
log.Printf("Failed to open config file: %v", err)
return
}
defer configFile.Close()
decoder := json.NewDecoder(configFile)
err = decoder.Decode(&outageReportConfig)
if err != nil {
log.Printf("Failed to parse config file: %v", err)
return
}
log.Printf("Loaded config: %v", outageReportConfig)
}
func saveConfig() {
log.Printf("Config before save: %v", outageReportConfig)
configFile, err := os.Create(configFilePath)
if err != nil {
log.Printf("Failed to open/write config file: %v", err)
return
}
defer configFile.Close()
encoder := json.NewEncoder(configFile)
err = encoder.Encode(&outageReportConfig)
if err != nil {
log.Printf("Failed to encode config file: %v", err)
return
}
}
func getChannelKey(cmd *bot.Cmd) string {
if cmd.ChannelData.IsPrivate {
return cmd.User.Nick
}
return cmd.Channel
}
func listComponents(cmd *bot.Cmd) (bot.CmdResultV3, error) {
componentNames, err := cachetGetComponentNames("")
log.Printf("Listing services in %s", getChannelKey(cmd))
result := bot.CmdResultV3{
Channel: getChannelKey(cmd),
Message: make(chan string),
Done: make(chan bool, 1)}
if err != nil {
log.Printf("Failed getting components from cachet: %v", err)
result.Message <- fmt.Sprintf("Failed getting components from cachet: %v", err)
result.Done <- true
return result, err
}
go func() {
result.Message <- "Services known in cachet:"
curMsgLen := 0
curComponents := []string{}
for _, componentName := range componentNames {
if curMsgLen > 80 {
log.Printf("Returning partial list of components: %v", curComponents)
result.Message <- strings.Join(curComponents, ", ")
curMsgLen = 0
curComponents = []string{}
}
curMsgLen = curMsgLen + len(componentName)
curComponents = append(curComponents, componentName)
}
log.Printf("Returning last part of components: %v", curComponents)
result.Message <- strings.Join(curComponents, ", ")
result.Done <- true
}()
return result, err
}
func listSubscriptions(cmd *bot.Cmd) (string, error) {
channelKey := getChannelKey(cmd)
channelConfig := getChannelConfig(channelKey)
if channelConfig != nil && channelConfig.Channel == channelKey {
return fmt.Sprintf("This channel is subscribed to notifications for: %v",
channelConfig.Services), nil
}
return "This channel has no subscriptions", nil
}
func subscribeChannel(cmd *bot.Cmd) (ret string, err error) {
if len(cmd.Args) != 1 {
return "Expecting 1 argument: <name of service>", nil
}
channelKey := getChannelKey(cmd)
newService := cmd.Args[0]
channelConfig := getChannelConfig(channelKey)
ret = fmt.Sprintf("Succesfully subscribed channel %s to outage notifications for '%s'",
channelKey, newService)
defer saveConfig()
if channelConfig == nil {
log.Printf("Channel %s has no config yet. Adding new one", channelKey)
outageReportConfig = append(outageReportConfig, ChannelConfig{
Channel: channelKey,
Services: []string{newService},
RepeatGap: 5,
})
return
}
for _, service := range channelConfig.Services {
if service == newService {
return fmt.Sprintf(
"This channel is already subscribed to '%s' outage notifications",
service), nil
}
}
log.Printf("Channel already has a config. Appending new service notification")
channelConfig.Services = append(channelConfig.Services, newService)
log.Printf("New notifications: %s", channelConfig.Services)
return
}
func unsubscribeChannel(cmd *bot.Cmd) (string, error) {
if len(cmd.Args) != 1 {
return "Expecting 1 argument: <name of service>", nil
}
channelKey := getChannelKey(cmd)
channelConfig := getChannelConfig(channelKey)
newService := cmd.Args[0]
if channelConfig == nil {
return "Channel is not subscribed to anything", nil
}
newServices := []string{}
for _, service := range channelConfig.Services {
if service == newService {
continue
}
newServices = append(newServices, service)
}
log.Printf("Channel already has a config. Appending new service notification")
channelConfig.Services = newServices
log.Printf("New notifications: %s", channelConfig.Services)
saveConfig()
return fmt.Sprintf(
"Succesfully unsubscribed channel %s from outage notifications for %s",
channelKey, newService), nil
}
func outageRepeatGap(cmd *bot.Cmd) (ret string, err error) {
if len(cmd.Args) != 1 {
return "Expecting 1 argument: <number of minutes>", nil
}
min, err := strconv.Atoi(cmd.Args[0])
if err != nil {
return "Argument must be exactly 1 number (of minutes between notifications)", nil
}
channelKey := getChannelKey(cmd)
channelConfig := getChannelConfig(channelKey)
defer saveConfig()
ret = fmt.Sprintf("Succesfully configured notification gap to be %d minutes", min)
if channelConfig == nil {
log.Printf("Channel has no config yet. Adding new one")
outageReportConfig = append(outageReportConfig, ChannelConfig{
Channel: cmd.Channel,
Services: []string{},
RepeatGap: min,
})
return
}
channelConfig.RepeatGap = min
return
}
func init() {
pastOutageNotifications = make(map[string]time.Time)
reloadConfig()
bot.RegisterPeriodicCommandV2(
"systemStatusCheck",
bot.PeriodicConfig{
CronSpec: "@every 1m",
CmdFuncV2: checkCachet,
})
bot.RegisterCommandV3(
"services",
"List services available for subscriptions",
"",
listComponents)
bot.RegisterCommand(
"subscriptions",
"Lists active outage subscriptions",
"",
listSubscriptions)
bot.RegisterCommand(
"subscribe",
"Subscribes this channel to outage notifications of specific service (or 'any' for all outages)",
"<service>",
subscribeChannel)
bot.RegisterCommand(
"unsubscribe",
"Unsubscribes this channel from outage notifications of specific service",
"<service>",
unsubscribeChannel)
bot.RegisterCommand(
"repeatgap",
"Sets number of minutes between notification of specific service outage",
"60",
outageRepeatGap)
}
| [
"\"CACHET_API\"",
"\"CACHET_ALERT_CONFIG\""
]
| []
| [
"CACHET_API",
"CACHET_ALERT_CONFIG"
]
| [] | ["CACHET_API", "CACHET_ALERT_CONFIG"] | go | 2 | 0 | |
tfx/tools/cli/testdata/test_pipeline_local_1.py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chicago taxi example using TFX on Local orchestrator."""
import os
from typing import Text
import absl
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.local import local_dag_runner
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'chicago_taxi_local'
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
metadata_path: Text) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[example_gen, statistics_gen, infer_schema],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
additional_pipeline_args={},
)
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
local_dag_runner.LocalDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
metadata_path=_metadata_path))
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
docker/docker.go | package docker
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"strconv"
"strings"
"time"
client "github.com/docker/docker/client"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/volume"
"github.com/containerd/containerd/reference"
"github.com/play-with-docker/play-with-docker/config"
)
const (
Byte = 1
Kilobyte = 1024 * Byte
Megabyte = 1024 * Kilobyte
)
type DockerApi interface {
GetClient() *client.Client
NetworkCreate(id string, opts types.NetworkCreate) error
NetworkConnect(container, network, ip string) (string, error)
NetworkInspect(id string) (types.NetworkResource, error)
NetworkDelete(id string) error
NetworkDisconnect(containerId, networkId string) error
DaemonInfo() (types.Info, error)
DaemonHost() string
GetSwarmPorts() ([]string, []uint16, error)
GetPorts() ([]uint16, error)
ContainerStats(name string) (io.ReadCloser, error)
ContainerResize(name string, rows, cols uint) error
ContainerRename(old, new string) error
ContainerDelete(name string) error
ContainerCreate(opts CreateContainerOpts) error
ContainerIPs(id string) (map[string]string, error)
ExecAttach(instanceName string, command []string, out io.Writer) (int, error)
Exec(instanceName string, command []string) (int, error)
CreateAttachConnection(name string) (net.Conn, error)
CopyToContainer(containerName, destination, fileName string, content io.Reader) error
CopyFromContainer(containerName, filePath string) (io.Reader, error)
SwarmInit(advertiseAddr string) (*SwarmTokens, error)
SwarmJoin(addr, token string) error
ConfigCreate(name string, labels map[string]string, data []byte) error
ConfigDelete(name string) error
}
type SwarmTokens struct {
Manager string
Worker string
}
type docker struct {
c *client.Client
}
func (d *docker) GetClient() *client.Client {
return d.c
}
func (d *docker) ConfigCreate(name string, labels map[string]string, data []byte) error {
config := swarm.ConfigSpec{}
config.Name = name
config.Labels = labels
config.Data = data
_, err := d.c.ConfigCreate(context.Background(), config)
return err
}
func (d *docker) ConfigDelete(name string) error {
return d.c.ConfigRemove(context.Background(), name)
}
func (d *docker) NetworkCreate(id string, opts types.NetworkCreate) error {
_, err := d.c.NetworkCreate(context.Background(), id, opts)
if err != nil {
log.Printf("Starting session err [%s]\n", err)
return err
}
return nil
}
func (d *docker) NetworkConnect(containerId, networkId, ip string) (string, error) {
settings := &network.EndpointSettings{}
if ip != "" {
settings.IPAddress = ip
}
err := d.c.NetworkConnect(context.Background(), networkId, containerId, settings)
if err != nil && !strings.Contains(err.Error(), "already exists") {
log.Printf("Connection container to network err [%s]\n", err)
return "", err
}
// Obtain the IP of the PWD container in this network
container, err := d.c.ContainerInspect(context.Background(), containerId)
if err != nil {
return "", err
}
n, found := container.NetworkSettings.Networks[networkId]
if !found {
return "", fmt.Errorf("Container [%s] connected to the network [%s] but couldn't obtain it's IP address", containerId, networkId)
}
return n.IPAddress, nil
}
func (d *docker) NetworkInspect(id string) (types.NetworkResource, error) {
return d.c.NetworkInspect(context.Background(), id, types.NetworkInspectOptions{})
}
func (d *docker) DaemonInfo() (types.Info, error) {
return d.c.Info(context.Background())
}
func (d *docker) DaemonHost() string {
return d.c.DaemonHost()
}
func (d *docker) GetSwarmPorts() ([]string, []uint16, error) {
hosts := []string{}
ports := []uint16{}
nodesIdx := map[string]string{}
nodes, nodesErr := d.c.NodeList(context.Background(), types.NodeListOptions{})
if nodesErr != nil {
return nil, nil, nodesErr
}
for _, n := range nodes {
nodesIdx[n.ID] = n.Description.Hostname
hosts = append(hosts, n.Description.Hostname)
}
services, err := d.c.ServiceList(context.Background(), types.ServiceListOptions{})
if err != nil {
return nil, nil, err
}
for _, service := range services {
for _, p := range service.Endpoint.Ports {
ports = append(ports, uint16(p.PublishedPort))
}
}
return hosts, ports, nil
}
func (d *docker) GetPorts() ([]uint16, error) {
opts := types.ContainerListOptions{}
containers, err := d.c.ContainerList(context.Background(), opts)
if err != nil {
return nil, err
}
openPorts := []uint16{}
for _, c := range containers {
for _, p := range c.Ports {
// When port is not published on the host docker return public port as 0, so we need to avoid it
if p.PublicPort != 0 {
openPorts = append(openPorts, p.PublicPort)
}
}
}
return openPorts, nil
}
func (d *docker) ContainerStats(name string) (io.ReadCloser, error) {
stats, err := d.c.ContainerStats(context.Background(), name, false)
return stats.Body, err
}
func (d *docker) ContainerResize(name string, rows, cols uint) error {
return d.c.ContainerResize(context.Background(), name, types.ResizeOptions{Height: rows, Width: cols})
}
func (d *docker) ContainerRename(old, new string) error {
return d.c.ContainerRename(context.Background(), old, new)
}
func (d *docker) CreateAttachConnection(name string) (net.Conn, error) {
ctx := context.Background()
conf := types.ContainerAttachOptions{true, true, true, true, "ctrl-^,ctrl-^", true}
conn, err := d.c.ContainerAttach(ctx, name, conf)
if err != nil {
return nil, err
}
return conn.Conn, nil
}
func (d *docker) CopyToContainer(containerName, destination, fileName string, content io.Reader) error {
contents, err := ioutil.ReadAll(content)
if err != nil {
return err
}
var buf bytes.Buffer
t := tar.NewWriter(&buf)
if err := t.WriteHeader(&tar.Header{Name: fileName, Mode: 0600, Size: int64(len(contents)), Uid: 9999, Gid: 9999, ModTime: time.Now()}); err != nil {
return err
}
if _, err := t.Write(contents); err != nil {
return err
}
if err := t.Close(); err != nil {
return err
}
return d.c.CopyToContainer(context.Background(), containerName, destination, &buf, types.CopyToContainerOptions{AllowOverwriteDirWithFile: true})
}
func (d *docker) CopyFromContainer(containerName, filePath string) (io.Reader, error) {
rc, stat, err := d.c.CopyFromContainer(context.Background(), containerName, filePath)
if err != nil {
return nil, err
}
if stat.Mode.IsDir() {
return nil, fmt.Errorf("Copying directories is not supported")
}
tr := tar.NewReader(rc)
// advance to the only possible file in the tar archive
tr.Next()
return tr, nil
}
func (d *docker) ContainerDelete(name string) error {
err := d.c.ContainerRemove(context.Background(), name, types.ContainerRemoveOptions{Force: true, RemoveVolumes: true})
d.c.VolumeRemove(context.Background(), name, true)
return err
}
type CreateContainerOpts struct {
Image string
SessionId string
ContainerName string
Hostname string
ServerCert []byte
ServerKey []byte
CACert []byte
Privileged bool
HostFQDN string
Labels map[string]string
Networks []string
DindVolumeSize string
Envs []string
}
func (d *docker) ContainerCreate(opts CreateContainerOpts) (err error) {
// Make sure directories are available for the new instance container
containerDir := "/opt/pwd"
containerCertDir := fmt.Sprintf("%s/certs", containerDir)
env := append(opts.Envs, fmt.Sprintf("SESSION_ID=%s", opts.SessionId))
// Write certs to container cert dir
if len(opts.ServerCert) > 0 {
env = append(env, `DOCKER_TLSCERT=\/opt\/pwd\/certs\/cert.pem`)
}
if len(opts.ServerKey) > 0 {
env = append(env, `DOCKER_TLSKEY=\/opt\/pwd\/certs\/key.pem`)
}
if len(opts.CACert) > 0 {
// if ca cert is specified, verify that clients that connects present a certificate signed by the CA
env = append(env, `DOCKER_TLSCACERT=\/opt\/pwd\/certs\/ca.pem`)
}
if len(opts.ServerCert) > 0 || len(opts.ServerKey) > 0 || len(opts.CACert) > 0 {
// if any of the certs is specified, enable TLS
env = append(env, "DOCKER_TLSENABLE=true")
} else {
env = append(env, "DOCKER_TLSENABLE=false")
}
h := &container.HostConfig{
NetworkMode: container.NetworkMode(opts.SessionId),
Privileged: opts.Privileged,
AutoRemove: true,
LogConfig: container.LogConfig{Config: map[string]string{"max-size": "10m", "max-file": "1"}},
}
if os.Getenv("APPARMOR_PROFILE") != "" {
h.SecurityOpt = []string{fmt.Sprintf("apparmor=%s", os.Getenv("APPARMOR_PROFILE"))}
}
if os.Getenv("STORAGE_SIZE") != "" {
// assing 10GB size FS for each container
h.StorageOpt = map[string]string{"size": os.Getenv("STORAGE_SIZE")}
}
var pidsLimit = int64(1000)
if envLimit := os.Getenv("MAX_PROCESSES"); envLimit != "" {
if i, err := strconv.Atoi(envLimit); err == nil {
pidsLimit = int64(i)
}
}
h.Resources.PidsLimit = &pidsLimit
if config.UseGPU {
gpu := container.DeviceRequest{}
gpu.Driver = "nvidia"
gpu.Count = -1
gpu.Capabilities = append(gpu.Capabilities, []string{"gpu"})
h.Resources.DeviceRequests = append(h.Resources.DeviceRequests, gpu)
}
if memLimit := os.Getenv("MAX_MEMORY_MB"); memLimit != "" {
if i, err := strconv.Atoi(memLimit); err == nil {
h.Resources.Memory = int64(i) * Megabyte
}
}
t := true
h.Resources.OomKillDisable = &t
env = append(env, fmt.Sprintf("PWD_HOST_FQDN=%s", opts.HostFQDN))
cf := &container.Config{
Hostname: opts.Hostname,
Image: opts.Image,
Tty: true,
OpenStdin: true,
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
Env: env,
Labels: opts.Labels,
}
networkConf := &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{opts.Networks[0]: &network.EndpointSettings{}},
}
if config.ExternalDindVolume {
_, err = d.c.VolumeCreate(context.Background(), volume.VolumeCreateBody{
Driver: "xfsvol",
DriverOpts: map[string]string{
"size": opts.DindVolumeSize,
},
Name: opts.ContainerName,
})
if err != nil {
return
}
h.Binds = []string{fmt.Sprintf("%s:/var/lib/docker", opts.ContainerName)}
defer func() {
if err != nil {
d.c.VolumeRemove(context.Background(), opts.SessionId, true)
}
}()
}
container, err := d.c.ContainerCreate(context.Background(), cf, h, networkConf, opts.ContainerName)
if err != nil {
//if client.IsErrImageNotFound(err) {
//log.Printf("Unable to find image '%s' locally\n", opts.Image)
//if err = d.pullImage(context.Background(), opts.Image); err != nil {
//return "", err
//}
//container, err = d.c.ContainerCreate(context.Background(), cf, h, networkConf, opts.ContainerName)
//if err != nil {
//return "", err
//}
//} else {
return err
//}
}
//connect remaining networks if there are any
if len(opts.Networks) > 1 {
for _, nid := range opts.Networks {
err = d.c.NetworkConnect(context.Background(), nid, container.ID, &network.EndpointSettings{})
if err != nil {
return
}
}
}
if err = d.copyIfSet(opts.ServerCert, "cert.pem", containerCertDir, opts.ContainerName); err != nil {
return
}
if err = d.copyIfSet(opts.ServerKey, "key.pem", containerCertDir, opts.ContainerName); err != nil {
return
}
if err = d.copyIfSet(opts.CACert, "ca.pem", containerCertDir, opts.ContainerName); err != nil {
return
}
err = d.c.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{})
if err != nil {
return
}
return
}
func (d *docker) ContainerIPs(id string) (map[string]string, error) {
cinfo, err := d.c.ContainerInspect(context.Background(), id)
if err != nil {
return nil, err
}
ips := map[string]string{}
for networkId, conf := range cinfo.NetworkSettings.Networks {
ips[networkId] = conf.IPAddress
}
return ips, nil
}
func (d *docker) pullImage(ctx context.Context, image string) error {
_, err := reference.Parse(image)
if err != nil {
return err
}
options := types.ImageCreateOptions{}
responseBody, err := d.c.ImageCreate(ctx, image, options)
if err != nil {
return err
}
_, err = io.Copy(ioutil.Discard, responseBody)
return err
}
func (d *docker) copyIfSet(content []byte, fileName, path, containerName string) error {
if len(content) > 0 {
return d.CopyToContainer(containerName, path, fileName, bytes.NewReader(content))
}
return nil
}
func (d *docker) ExecAttach(instanceName string, command []string, out io.Writer) (int, error) {
e, err := d.c.ContainerExecCreate(context.Background(), instanceName, types.ExecConfig{Cmd: command, AttachStdout: true, AttachStderr: true, Tty: true})
if err != nil {
return 0, err
}
resp, err := d.c.ContainerExecAttach(context.Background(), e.ID, types.ExecStartCheck{Tty: true})
if err != nil {
return 0, err
}
io.Copy(out, resp.Reader)
var ins types.ContainerExecInspect
for _ = range time.Tick(1 * time.Second) {
ins, err = d.c.ContainerExecInspect(context.Background(), e.ID)
if ins.Running {
continue
}
if err != nil {
return 0, err
}
break
}
return ins.ExitCode, nil
}
func (d *docker) Exec(instanceName string, command []string) (int, error) {
e, err := d.c.ContainerExecCreate(context.Background(), instanceName, types.ExecConfig{Cmd: command})
if err != nil {
return 0, err
}
err = d.c.ContainerExecStart(context.Background(), e.ID, types.ExecStartCheck{})
if err != nil {
return 0, err
}
var ins types.ContainerExecInspect
for _ = range time.Tick(1 * time.Second) {
ins, err = d.c.ContainerExecInspect(context.Background(), e.ID)
if ins.Running {
continue
}
if err != nil {
return 0, err
}
break
}
return ins.ExitCode, nil
}
func (d *docker) NetworkDisconnect(containerId, networkId string) error {
err := d.c.NetworkDisconnect(context.Background(), networkId, containerId, true)
if err != nil {
log.Printf("Disconnection of container from network err [%s]\n", err)
return err
}
return nil
}
func (d *docker) NetworkDelete(id string) error {
err := d.c.NetworkRemove(context.Background(), id)
if err != nil {
return err
}
return nil
}
func (d *docker) SwarmInit(advertiseAddr string) (*SwarmTokens, error) {
req := swarm.InitRequest{AdvertiseAddr: advertiseAddr, ListenAddr: "0.0.0.0:2377"}
_, err := d.c.SwarmInit(context.Background(), req)
if err != nil {
return nil, err
}
swarmInfo, err := d.c.SwarmInspect(context.Background())
if err != nil {
return nil, err
}
return &SwarmTokens{
Worker: swarmInfo.JoinTokens.Worker,
Manager: swarmInfo.JoinTokens.Manager,
}, nil
}
func (d *docker) SwarmJoin(addr, token string) error {
req := swarm.JoinRequest{RemoteAddrs: []string{addr}, JoinToken: token, ListenAddr: "0.0.0.0:2377", AdvertiseAddr: "eth0"}
return d.c.SwarmJoin(context.Background(), req)
}
func NewDocker(c *client.Client) *docker {
return &docker{c: c}
}
| [
"\"APPARMOR_PROFILE\"",
"\"APPARMOR_PROFILE\"",
"\"STORAGE_SIZE\"",
"\"STORAGE_SIZE\"",
"\"MAX_PROCESSES\"",
"\"MAX_MEMORY_MB\""
]
| []
| [
"APPARMOR_PROFILE",
"STORAGE_SIZE",
"MAX_MEMORY_MB",
"MAX_PROCESSES"
]
| [] | ["APPARMOR_PROFILE", "STORAGE_SIZE", "MAX_MEMORY_MB", "MAX_PROCESSES"] | go | 4 | 0 | |
tools/nnpackage_tool/gen_golden/gen_golden.py | #!/usr/bin/env python3
# Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import sys
import argparse
import numpy as np
# cmd arguments parsing
def usage():
script = os.path.basename(os.path.basename(__file__))
print("Usage: {} path_to_pb".format(script))
sys.exit(-1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'modelfile',
type=str,
help='path to modelfile in either graph_def (.pb) or tflite (.tflite)')
parser.add_argument(
'-o', '--output', action='store', dest="out_dir", help="output directory")
args = parser.parse_args()
if len(sys.argv) == 1:
parser.parse_args()
sys.exit(1)
filename = args.modelfile
if args.out_dir:
out_dir = args.out_dir + '/'
else:
out_dir = "./"
_, extension = os.path.splitext(filename)
input_names = []
output_names = []
input_dtypes = []
output_dtypes = []
input_values = []
output_values = []
if extension == ".pb":
# import graph_def (pb)
graph = tf.compat.v1.get_default_graph()
graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(filename, 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# identify input namess and output names
ops = graph.get_operations()
input_names = [op.outputs[0].name for op in ops if op.type == "Placeholder"]
output_names = [tensor.name for op in ops for tensor in op.outputs]
for op in ops:
for t in op.inputs:
if t.name in output_names:
output_names.remove(t.name)
# identify input dtypes and output dtypes
input_dtypes = [graph.get_tensor_by_name(name).dtype for name in input_names]
output_dtypes = [graph.get_tensor_by_name(name).dtype for name in output_names]
# gen random input values
for idx in range(len(input_names)):
this_shape = graph.get_tensor_by_name(input_names[idx]).shape
this_dtype = input_dtypes[idx]
if this_dtype == tf.uint8:
input_values.append(
np.random.randint(0, 255, this_shape).astype(np.uint8))
if this_dtype == tf.int8:
input_values.append(
np.random.randint(-127, 127, this_shape).astype(np.int8))
elif this_dtype == tf.float32:
input_values.append(
np.random.random_sample(this_shape).astype(np.float32))
elif this_dtype == tf.bool:
# generate random integer from [0, 2)
input_values.append(
np.random.randint(2, size=this_shape).astype(np.bool_))
elif this_dtype == tf.int32:
input_values.append(np.random.randint(0, 99, this_shape).astype(np.int32))
elif this_dtype == tf.int64:
input_values.append(np.random.randint(0, 99, this_shape).astype(np.int64))
# get output values by running
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
with tf.compat.v1.Session(config=config) as sess:
output_values = sess.run(
output_names, feed_dict=dict(zip(input_names, input_values)))
elif extension == ".tflite":
# load TFLite model and allocate tensors
interpreter = tf.lite.Interpreter(filename)
interpreter.allocate_tensors()
# get list of tensors details for input/output
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# identify input namess and output names
input_names = [d['name'] for d in input_details]
output_names = [d['name'] for d in output_details]
# identify input dtypes and output dtypes
input_dtypes = [d['dtype'] for d in input_details]
output_dtypes = [d['dtype'] for d in output_details]
# gen random input values and set tensor
for idx in range(len(input_details)):
this_shape = input_details[idx]['shape']
this_dtype = input_details[idx]['dtype']
if this_dtype == np.uint8:
input_values.append(
np.random.randint(0, 255, this_shape).astype(np.uint8))
if this_dtype == np.int8:
input_values.append(
np.random.randint(-127, 127, this_shape).astype(np.int8))
elif this_dtype == np.float32:
input_values.append(
np.random.random_sample(this_shape).astype(np.float32))
elif this_dtype == np.bool_:
# generate random integer from [0, 2)
input_values.append(
np.random.randint(2, size=this_shape).astype(np.bool_))
elif this_dtype == np.int32:
input_values.append(np.random.randint(0, 99, this_shape).astype(np.int32))
elif this_dtype == np.int64:
input_values.append(np.random.randint(0, 99, this_shape).astype(np.int64))
interpreter.set_tensor(input_details[idx]['index'], input_values[idx])
# get output values by running
interpreter.invoke()
for idx in range(len(output_details)):
output_values.append(interpreter.get_tensor(output_details[idx]['index']))
else:
print("Only .pb and .tflite models are supported.")
sys.exit(-1)
# dump input and output in h5
import h5py
supported_dtypes = ("float32", "uint8", "int8", "bool", "int32", "int64")
h5dtypes = {
"float32": ">f4",
"uint8": "u1",
"int8": "i1",
"bool": "u1",
"int32": "int32",
"int64": "int64"
}
with h5py.File(out_dir + "input.h5", 'w') as hf:
name_grp = hf.create_group("name")
val_grp = hf.create_group("value")
for idx, t in enumerate(input_names):
dtype = tf.compat.v1.as_dtype(input_dtypes[idx])
if not dtype.name in supported_dtypes:
print("ERR: Supported input types are {}".format(supported_dtypes))
sys.exit(-1)
val_grp.create_dataset(
str(idx), data=input_values[idx], dtype=h5dtypes[dtype.name])
name_grp.attrs[str(idx)] = input_names[idx]
with h5py.File(out_dir + "expected.h5", 'w') as hf:
name_grp = hf.create_group("name")
val_grp = hf.create_group("value")
for idx, t in enumerate(output_names):
dtype = tf.compat.v1.as_dtype(output_dtypes[idx])
if not dtype.name in supported_dtypes:
print("ERR: Supported output types are {}".format(supported_dtypes))
sys.exit(-1)
val_grp.create_dataset(
str(idx), data=output_values[idx], dtype=h5dtypes[dtype.name])
name_grp.attrs[str(idx)] = output_names[idx]
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
cmd/clone_github.go | package cmd
import (
"context"
"os"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
func getGitHubOrgCloneUrls() ([]string, error) {
ctx := context.Background()
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv("GHORG_GITHUB_TOKEN")},
)
tc := oauth2.NewClient(ctx, ts)
client := github.NewClient(tc)
opt := &github.RepositoryListByOrgOptions{
Type: "all",
ListOptions: github.ListOptions{PerPage: 100, Page: 0},
}
// get all pages of results
var allRepos []*github.Repository
for {
repos, resp, err := client.Repositories.ListByOrg(context.Background(), args[0], opt)
if err != nil {
return nil, err
}
allRepos = append(allRepos, repos...)
if resp.NextPage == 0 {
break
}
opt.Page = resp.NextPage
}
cloneUrls := []string{}
for _, repo := range allRepos {
if os.Getenv("GHORG_SKIP_ARCHIVED") == "true" {
if *repo.Archived == true {
continue
}
}
if os.Getenv("GHORG_CLONE_PROTOCOL") == "https" {
cloneUrls = append(cloneUrls, *repo.CloneURL)
} else {
cloneUrls = append(cloneUrls, *repo.SSHURL)
}
}
return cloneUrls, nil
}
// TODO: refactor with getAllOrgCloneUrls
func getGitHubUserCloneUrls() ([]string, error) {
ctx := context.Background()
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv("GHORG_GITHUB_TOKEN")},
)
tc := oauth2.NewClient(ctx, ts)
client := github.NewClient(tc)
opt := &github.RepositoryListOptions{
Type: "all",
ListOptions: github.ListOptions{PerPage: 100, Page: 0},
}
// get all pages of results
var allRepos []*github.Repository
for {
repos, resp, err := client.Repositories.List(context.Background(), args[0], opt)
if err != nil {
return nil, err
}
allRepos = append(allRepos, repos...)
if resp.NextPage == 0 {
break
}
opt.Page = resp.NextPage
}
cloneUrls := []string{}
for _, repo := range allRepos {
if os.Getenv("GHORG_SKIP_ARCHIVED") == "true" {
if *repo.Archived == true {
continue
}
}
if os.Getenv("GHORG_CLONE_PROTOCOL") == "https" {
cloneUrls = append(cloneUrls, *repo.CloneURL)
} else {
cloneUrls = append(cloneUrls, *repo.SSHURL)
}
}
return cloneUrls, nil
}
| [
"\"GHORG_GITHUB_TOKEN\"",
"\"GHORG_SKIP_ARCHIVED\"",
"\"GHORG_CLONE_PROTOCOL\"",
"\"GHORG_GITHUB_TOKEN\"",
"\"GHORG_SKIP_ARCHIVED\"",
"\"GHORG_CLONE_PROTOCOL\""
]
| []
| [
"GHORG_SKIP_ARCHIVED",
"GHORG_GITHUB_TOKEN",
"GHORG_CLONE_PROTOCOL"
]
| [] | ["GHORG_SKIP_ARCHIVED", "GHORG_GITHUB_TOKEN", "GHORG_CLONE_PROTOCOL"] | go | 3 | 0 | |
src/matching/matching/wsgi.py | """
WSGI config for matching project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "matching.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/dnc/setup.py | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
# HISTORY.rst entry.
VERSION = '0.1.3'
try:
from azext_dnc.manual.version import VERSION
except ImportError:
pass
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
try:
from azext_dnc.manual.dependency import DEPENDENCIES
except ImportError:
pass
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='dnc',
version=VERSION,
description='Microsoft Azure Command-Line Tools DNC Extension',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-cli-extensions/tree/master/src/dnc',
long_description=README + '\n\n' + HISTORY,
license='MIT',
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=DEPENDENCIES,
package_data={'azext_dnc': ['azext_metadata.json']},
)
| []
| []
| []
| [] | [] | python | null | null | null |
applications/dashboard/controllers/backupdefault.py | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
#########################################################################
@auth.requires_login()
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
if you need a simple wiki simply replace the two lines below with:
return auth.wiki()
"""
# response.flash = T("Hello World")
return dict(message=T('Welcome to web2py!'))
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/manage_users (requires membership in
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
def upload_logfile():
## This script will be for parsing acquisuite data only
## If we get another unit, We add another script!
from datetime import datetime
import gzip
import io
import boto.dynamodb2
from boto.dynamodb2.table import Table
## This means that its sending acquisuite info - not device info
if request.vars['MODE']=='STATUS':
time=datetime.now()
print "["+str(time)+"] "+"Recieved a mode of "+ str(request.vars['MODE'])
## Connect to Dynamo
conn=boto.dynamodb2.connect_to_region(
'us-east-1',
aws_access_key_id=os.environ['AWS_DYNAMO_KEY'],
aws_secret_access_key=os.environ['AWS_DYNAMO_SECRET']
)
## Fetch Table that keeps acquisuite info, passing the conn object we just made
table = Table('das_attributes',connection=conn)
## Must assign value to hash key, in this case it is serial number
data={
'serial_number':request.vars['SERIALNUMBER'],
}
## Add the remainder of the data into the table
## After the hash key it doesn't matter what they are called
## Because we already used serial number as the hash key
## We don't need to also have it as an arbitrary name value pair
for key in request.vars:
if key!='SERIALNUMBER':
data[key]=request.vars[key]
## The exception commented out below would only come up with the overwrite=True were removed from the put_item call
# try:
table.put_item(data, overwrite=True)
# except boto.dynamodb2.exceptions.ConditionalCheckFailedException:
# table.get_item(serial_number=data['serial_number'])
## Now that the info is in dynamo, put some basic info in the db that web2py talks with more easily
db.das_config.update_or_insert(
db.das_config.das_id==request.vars['SERIALNUMBER'],
das_id=request.vars['SERIALNUMBER'],
serial_number=request.vars['SERIALNUMBER'],
last_modified=datetime.now(),
)
print "["+str(time)+"] "+"Successfully updated data for "+ str(request.vars['SERIALNUMBER'])
return dict(status="SUCCESS")
## This means we are getting data from a device
elif request.vars['MODE']=='LOGFILEUPLOAD':
time=datetime.now()
print "["+str(time)+"] "+"Recieved a mode of "+ str(request.vars['MODE'])
print "["+str(time)+"] "+"Logfile upload started!"
## Check that there is a logfile in the request
field_storage_object=request.vars['LOGFILE']
## If for some reason there isn't actually a LOGFILE url variable then return failure
if field_storage_object==None:
print "["+str(time)+"] "+"No logfile found"
return dict(status="FAILURE")
## If there is a log file continue on!
else:
## Save the device information
## The device id is going to be the serial_number of parent unit and the modbus address of that unit
## Seperated by an underscore.
device_id=request.vars['SERIALNUMBER']+'_'+request.vars['MODBUSDEVICE']
print "["+str(time)+"] "+"["+str(device_id)+"] "+ "Device ID found"
## The log_filename
log_filename=field_storage_object.name
print "["+str(time)+"] "+"["+str(device_id)+"] "+"The filename is: "+str(log_filename)+". As always."
## First thing is to save the logfile in case a false success is achieved!
## logfiles are stored in the log_files table
## At this point we already know we have a logfile in the url so.....
## This will save duplicate log files :/
db.log_files.insert(
device_id=device_id,
log_filename=log_filename,
log_file=field_storage_object,
date_added=datetime.now(),
)
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Logfile saved!"
## add device info locally.
db.device_config.update_or_insert(
db.device_config.device_id==device_id,
device_id=device_id,
das_id=request.vars['SERIALNUMBER'],
last_modified=datetime.now(),
)
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Device info updated!"
## Commit changes in case errors happen before db io
## This saves the files to an S3 bucket
db.commit()
## If we get passed that part, then we can move on to putting the data in Dynamo
## Create the connection object to talk to dynamo
conn=boto.dynamodb2.connect_to_region(
'us-east-1',
aws_access_key_id=os.environ['AWS_DYNAMO_KEY'],
aws_secret_access_key=os.environ['AWS_DYNAMO_SECRET'],
)
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Created connection object"
## Fetch Table that keeps device info (passing in our connection object).
## We are going to overwrite the current values for the device
## like uptime, parent DAS, etc.
table = Table('device_attributes',connection=conn)
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Connected to device attributes table"
## The hash key is the device id! So let's start off the data dictionary (which will go into
## a call to the db later) with it.
data=dict(device_id=device_id)
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Started data dict for device attributes table"
## Add the remainder of the data into the table
## After the hash key it doesn't matter what they are called
## Carefull not to try and store the LOGFILE in the timeseries nosql db.
## That would be silly
for key in request.vars:
if key!='LOGFILE':
data[key]=request.vars[key]
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Now printing the current state of the data dict for device attributes\n"+str(data)
## Again, without overwrite this would throw an exception every time (but the first time)
## Will think of a better way to do this at some point.
table.put_item(data, overwrite=True)
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Just updated the device attributes table in aws"
## now we are ready to deal with the actual data
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Beginning the process of saving the interval data"
try:
## Now get what fields you want to collect
## This says - look in db table device config for a device with id device_id, then from the records that match (should be 1),
## only select the field device_field_groups. Take the first record (again, should only be one) and give me just the value
## without the dot operator at the end it would be a dictionary
device_field_group = db(db.device_config.device_id==device_id).select(db.device_config.device_field_groups).first().device_field_groups
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Device field group: " + str(device_field_group)
## So we have the name of the group
## Now we can get the fields that we want to collect
device_fields_collect = db(db.device_field_groups.field_group_name==device_field_group).select().first().field_group_columns
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Device fields collect:" + str(device_fields_collect)
## If for some reason there are not fields to get, or getting the fields causes an error
## set the variable to ALL
except:
print "["+str(time)+"] "+"["+str(device_id)+"] "+"There was a problem, using ALL instead"
device_fields_collect='ALL'
if device_fields_collect==None:
print "["+str(time)+"] "+"["+str(device_id)+"] "+"There was a problem, using ALL instead"
device_fields_collect='ALL'
## The file is gzipped(even when they send naturally every 15 minutes)
## I can put a check in at some point, but for now its assumed.
## use the native gzip library to read in the gzip file
## which came from the url variable, which web2py turned into a python fieldstorage object
## which web2py then put back into the post vars as LOGFILE.
## If the mode of r is not passed in (rb in python3), then it will assume the
## default type which is WRITE (I know right) and it will actually complain that
## you are trying to read from a write-only file :/
## I had to add the io.BytesIO wrapper around the file before giving it to
## gzip. Why? I have no idea. But I was just following this thread:
## http://stackoverflow.com/questions/4204604/how-can-i-create-a-gzipfile-instance-from-the-file-like-object-that-urllib-url
file_handle=gzip.GzipFile(fileobj=io.BytesIO(field_storage_object.value), mode='r')
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Just created the file handle"
## This line reads the entire contents of the file into a string.
## I hope the files don't get too big!
## I tried using readlines which auto chops up the lines into items of a list
## BUT it gives an error, I guess gzip produces a slightly different type of file handle
## than the standard python 'open' construct.
#file_data_as_string=file_handle.read()
## If you don't do this, then you will have an empty line at the end of your file and get all the index errors
## I'm actually still getting some index errors with this included. But its likely because there was an error line?
## It turned out it was just blank lines
#file_data_as_string=file_data_as_string.strip()
## readlines turns the file into a list of lines in the file
## for some reason I think it leaves the last newline in the last list item or something
## Don't quote me on that though.
lines=file_handle.readlines()
if len(lines)==0:
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Lines is length 0, aborting"
return dict(status="FAILURE")
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Just made the lines list"
## Connect to the timeseries table, this table has a hash and a range key
## The hash key is the timeseries name (which I'm setting to the device ID for now)
## and the timestamp (which I'm using the utc string ts straight from the DAS for now)
## It is close to ISO format anyway.
timeseriestable = Table('timeseriestable',connection=conn)
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Connected to time series table using same connection object"
## At this point, we need to know what data we are saving from this particular device.
## Acquisuites do a pretty good job of keeping things in the same order accross lines of devices etc
## What I'm going to do is assume that if I can't find a configuration for the particular DEVICE, then I
## will save all parameters related to that device.
## If there is a config file found, it will consist of a flag for include or exlcude and the columns
## to include or exclude.
## So where do I keep this config information!
## at the device level of course in a table that lists devices (Serialnumber_modbusaddress)
## In another field it will list the flag, in a third field it will have the columns
## if it fails to interpret what is placed in either field it will save all the information to dynamo
## So basically, look for the config info in a table called device_config?
print "["+str(time)+"] "+"["+str(device_id)+"] "+"About to enter the with loop for batch writing"
## This with clause is for batch writing.
with timeseriestable.batch_write() as batch:
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Inside the with clause"
for line in lines:
## Get rid of whitespace at the beginning and end of the row
line=line.strip()
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Line:\n"+str(line)
## Seperate the 'row' into what would be cells if opened in csv or excel format
cells=line.split(',')
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Cells:\n"+str(cells)
## for testing purposes get the ts
## the second slice is to remove the quotes that the acquisuite sends around the ts
timestamp=cells[0][1:-1]
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Timestamp:\n"+timestamp
## Start of the data dictionary with the timeseriesname and the timestamp
data=dict(
timeseriesname=device_id,
timestamp=timestamp,
)
if device_fields_collect=='ALL':
for index in range(len(cells)):
data[device_id+'__'+str(index)]=cells[int(index)]
else:
for index in device_fields_collect:
if index<0:
index = len(cells)+index
data[device_id+'__'+str(index)]=cells[int(index)]
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Data dict for timeseries table:\n"+str(data)
## populate the context manager with our requests
## when the with clause is natrually exited, the batch write request will occur.
## This is where I should fill up the other fields by default and have mappings
## to configured names and allow user to "include only" or "exclude"
# batch.put_item(data=dict(
# timeseriesname=device_id,
# timestamp=timestamp,
# cumulative_electric_usage_kwh=cumulative_reading,
# ))
db.debug_tbl.insert(
error_message=str(data),
other_info=str(datetime.now()),
row_text=str(line),
cell_text=str(cells),
timestamp_text=str(timestamp),
)
db.commit()
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Added stuff to debug table"
batch.put_item(data)
# except IndexError:
# ## Save the lines that counldn't be added
# db.debug_tbl.insert(error_message=str(cells), other_info=str(datetime.now()))
# db.commit()
print "["+str(time)+"] "+"["+str(device_id)+"] "+"Finished added stuff to timeseries table for this device"
return dict(status="SUCCESS")
## If the mode is not supported
else:
time=datetime.now()
print "["+str(time)+"] "+"Recieved a MODE of "+ str(request.vars['MODE']) + ". This MODE is not supported"
return dict(status='MODE value not supported')
# Authenticate
# Check MODE
# If mode is LOGFILEUPLOAD
# check md5sum is same as sent value
# Save or update device information
# Do not use the name supplied by the fieldstorage object, instead supply your own name made up of validated pieces
# Account for the possibility that the name you come up with may exist already
## Loop through the file going a line at a time (one line will be one timestamp)
## Make sure line is <0 but less than 512? why less than 512 byts?
## put the data you want in the locations you want!
#Done!
def parse_logfile_from_db():
import boto
import io
import gzip
import os
import csv
import boto.dynamodb2
from boto.dynamodb2.table import Table
## Connection object with our keys
## These come from the environment which is supplied by the hosting service (heroku, beanstalk), or by the local environment
## via .bashrc. Google setting environment variables in python or something for your os.
conn=boto.s3.connect_to_region(
'us-east-1',
aws_access_key_id=os.environ['AWS_WSDS3_KEY'],
aws_secret_access_key=os.environ['AWS_WSDS3_SECRET']
)
## Get a sample logfile FILENAME from the db
## This only gets the FILENAME that was used to name the file in s3
## This code is generated automatically by web2py
## for now by id, but maybe later by device
log_file_name=db(db.log_files.id==request.args[0]).select().first().log_file
## connect to the bucket where we store logfiles
## boto has good docs for what get_bucket does and the other connection attributes
bucket=conn.get_bucket('wantsomedashboards')
## key is what boto uses to refer to items in the bucket, in this case it is a gzip file
## because I'm storing the logfiles in a folder of the bucket, the folder name is being added
## manually here.
## logfile='logfiles/log_files.log_file.800bed7f7f8c2857.6d622d3235302e35354231463331455f332e6c6f672e677a.gz'
## key=bucket.get_key(logfile)
key=bucket.get_key('logfiles/'+log_file_name)
## Thank you unutbu
## http://stackoverflow.com/questions/4204604/how-can-i-create-a-gzipfile-instance-from-the-file-like-object-that-urllib-url
## I used the above link to figure out how to actually get the gzip file to behave like a normal decoded file handle in python
fh = gzip.GzipFile(fileobj=io.BytesIO(key.read()))
data_LOD=[]
lines=fh.readlines()
device_fields_collect=[4,-2,-1]
# device_fields_collect='ALL'
conn_dynamo=boto.dynamodb2.connect_to_region(
'us-east-1',
aws_access_key_id=os.environ['AWS_DYNAMO_KEY'],
aws_secret_access_key=os.environ['AWS_DYNAMO_SECRET'],
)
timeseriestable = Table('timeseriestable',connection=conn_dynamo)
with timeseriestable.batch_write() as batch:
for line in lines:
line = line.strip()
cells = line.split(',')
device_id = 'TEST_3'
timestamp = cells[0][1:-1]
data=dict(
timeseriesname=device_id,
timestamp=timestamp,
)
if device_fields_collect=='ALL':
for index in range(len(cells)):
data[device_id+'__'+str(index)]=cells[int(index)]
else:
for index in device_fields_collect:
if index<0:
index = len(cells)+index
data[device_id+'__'+str(index)]=cells[int(index)]
data_LOD.append(data)
batch.put_item(data)
return locals()
def view_aws_info():
import boto.dynamodb2
from boto.dynamodb2.table import Table
import os
from datetime import datetime
conn=boto.dynamodb2.connect_to_region(
'us-east-1',
aws_access_key_id=os.environ['AWS_DYNAMO_KEY'],
aws_secret_access_key=os.environ['AWS_DYNAMO_SECRET']
)
# print conn.list_tables()
# print request.args[0]
table = Table(request.args[0],connection=conn)
all_entries=table.scan()
return dict(all_entries=all_entries)
def view_aws_timeseries():
import boto.dynamodb2
from boto.dynamodb2.table import Table
import os
from datetime import datetime
conn=boto.dynamodb2.connect_to_region(
'us-east-1',
aws_access_key_id=os.environ['AWS_DYNAMO_KEY'],
aws_secret_access_key=os.environ['AWS_DYNAMO_SECRET']
)
tst = Table('timeseriestable',connection=conn)
timeseriesname=request.args[0]
timeseriesdata=tst.query_2(
timeseriesname__eq=timeseriesname,
consistent=True,
)
timeserieslist=[]
for entry in timeseriesdata:
timeserieslist.append([entry['timeseriesname'],
entry['timestamp'],
entry['cumulative_electric_usage_kwh']]
)
return dict(timeserieslist=timeserieslist)
def iframe_test():
return dict()
def datatables():
return dict()
def view_aws_datatables():
return dict()
def ajax_view_aws_timeseries():
import boto.dynamodb2
from boto.dynamodb2.table import Table
import os, json
from datetime import datetime
#print request.vars
conn=boto.dynamodb2.connect_to_region(
'us-east-1',
aws_access_key_id=os.environ['AWS_DYNAMO_KEY'],
aws_secret_access_key=os.environ['AWS_DYNAMO_SECRET']
)
tst = Table('timeseriestable',connection=conn)
timeseriesname=request.args[0]
# timeseriesname='001EC600229C_250'
timeseriesdata=tst.query_2(
timeseriesname__eq=timeseriesname,
consistent=True,
)
timeserieslist=[]
for entry in timeseriesdata:
timeserieslist.append([entry['timeseriesname'],
entry['timestamp'],
entry['cumulative_electric_usage_kwh']]
)
items=int(request.vars['length'])
start=int(request.vars['start'])
draw=int(request.vars['draw'])
end=start+items
data_dict=dict(
draw=draw,
recordsTotal=len(timeserieslist),
recordsFiltered=len(timeserieslist),
data=timeserieslist[start:end]
)
#return dict(timeserieslist=timeserieslist)
# print json.dumps(data_dict)
# return json.dumps(timeserieslist)
return json.dumps(data_dict)
def ajax_graph_aws_timeseries():
import boto.dynamodb2
from boto.dynamodb2.table import Table
import os, json
from datetime import datetime
# print request.vars
conn=boto.dynamodb2.connect_to_region(
'us-east-1',
aws_access_key_id=os.environ['AWS_DYNAMO_KEY'],
aws_secret_access_key=os.environ['AWS_DYNAMO_SECRET']
)
tst = Table('timeseriestable',connection=conn)
timeseriesname=request.args[0]
# timeseriesname='001EC600229C_250'
timeseriesdata=tst.query_2(
timeseriesname__eq=timeseriesname,
consistent=True,
)
timeserieslist=[]
datalist=[]
for entry in timeseriesdata:
if entry['timestamp'][0]=="'":
timeserieslist.append([
entry['timeseriesname'],
entry['timestamp'][1:-1],
entry['cumulative_electric_usage_kwh']
])
else:
timeserieslist.append([
entry['timeseriesname'],
entry['timestamp'],
entry['cumulative_electric_usage_kwh']
])
datalist.append(entry['cumulative_electric_usage_kwh'])
# for i in range(1,len(timeserieslist)):
# timeserieslist[i][2]=float(timeserieslist[i][2])-float(timeserieslist[i-1][2])
# data_dict=dict(data1=timeserieslist)
# items=int(request.vars['length'])
# start=int(request.vars['start'])
# draw=int(request.vars['draw'])
# end=start+items
# data_dict=dict(
# draw=draw,
# recordsTotal=len(timeserieslist),
# recordsFiltered=len(timeserieslist),
# data=timeserieslist[start:end]
# )
#return dict(timeserieslist=timeserieslist)
# print json.dumps(data_dict)
return json.dumps(timeserieslist[-100:])
# return json.dumps(data_dict)
def d3play():
return dict()
def device():
return dict()
def success():
return dict()
def ajax_dynamo_delete_das():
das_id=request.args[0]
return True
def view_table():
table_name=request.vars['table_name']
if request.vars['db']=='dynamo':
return dict(message="Not Supported Yet")
elif request.vars['db']=='postgres':
grid=SQLFORM.grid(db[table_name])
return dict(grid=grid)
def das_test():
# das_list=list(db().select(db.das_config.das_id)
# if '001EC600229C' in das_list:
# print "yay"
# return dict(das_list=das_list)
device_id=request.args[0]
device_field_group = db(db.device_config.device_id==device_id).select(db.device_config.device_field_groups).first().device_field_groups
device_fields_collect = db(db.device_field_groups.field_group_name==device_field_group).select().first().field_group_columns
# list[int(slice[:slice.index(':')]):int(slice[slice.index(':')+1:])]
return dict(var1=device_field_group, var2=device_fields_collect) | []
| []
| [
"AWS_DYNAMO_SECRET",
"AWS_WSDS3_KEY",
"AWS_DYNAMO_KEY",
"AWS_WSDS3_SECRET"
]
| [] | ["AWS_DYNAMO_SECRET", "AWS_WSDS3_KEY", "AWS_DYNAMO_KEY", "AWS_WSDS3_SECRET"] | python | 4 | 0 | |
app/models/__init__.py | import os
from sqlalchemy import Column, Integer, String
from flask_sqlalchemy import SQLAlchemy
database_path = os.environ.get('DATABASE_URL')
db = SQLAlchemy()
def create_db(app=None):
app.config['SQLALCHEMY_DATABASE_URI'] = database_path
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
return db
class User(db.Model):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
email = Column(String(50), nullable=False, unique=True)
password = Column(String(), nullable=False)
token = Column(String(), nullable=False)
def __init__(self, name, email, password, token):
self.name = name
self.email = email
self.password = password
self.token = token
def add(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
| []
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | python | 1 | 0 | |
test/test_model.py | #!/usr/bin/python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import json
import ipaddress
import os
import pathlib
from textwrap import dedent
import unittest
import ops.model
import ops.charm
import ops.pebble
import ops.testing
from ops.charm import RelationMeta, RelationRole
from ops._private import yaml
from test.test_helpers import fake_script, fake_script_calls
class TestModel(unittest.TestCase):
def setUp(self):
self.harness = ops.testing.Harness(ops.charm.CharmBase, meta='''
name: myapp
provides:
db0:
interface: db0
requires:
db1:
interface: db1
peers:
db2:
interface: db2
resources:
foo: {type: file, filename: foo.txt}
bar: {type: file, filename: bar.txt}
''')
self.addCleanup(self.harness.cleanup)
self.relation_id_db0 = self.harness.add_relation('db0', 'db')
self.model = self.harness.model
def test_model_attributes(self):
self.assertIs(self.model.app, self.model.unit.app)
self.assertIsNone(self.model.name)
def test_unit_immutable(self):
with self.assertRaises(AttributeError):
self.model.unit = object()
def test_app_immutable(self):
with self.assertRaises(AttributeError):
self.model.app = object()
def test_model_name_from_backend(self):
self.harness.set_model_name('default')
m = ops.model.Model(ops.charm.CharmMeta(), self.harness._backend)
self.assertEqual(m.name, 'default')
with self.assertRaises(AttributeError):
m.name = "changes-disallowed"
def test_relations_keys(self):
rel_app1 = self.harness.add_relation('db1', 'remoteapp1')
self.harness.add_relation_unit(rel_app1, 'remoteapp1/0')
self.harness.add_relation_unit(rel_app1, 'remoteapp1/1')
rel_app2 = self.harness.add_relation('db1', 'remoteapp2')
self.harness.add_relation_unit(rel_app2, 'remoteapp2/0')
# We invalidate db1 so that it causes us to reload it
self.model.relations._invalidate('db1')
self.resetBackendCalls()
for relation in self.model.relations['db1']:
self.assertIn(self.model.unit, relation.data)
unit_from_rel = next(filter(lambda u: u.name == 'myapp/0', relation.data.keys()))
self.assertIs(self.model.unit, unit_from_rel)
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', rel_app1),
('relation_list', rel_app2),
])
def test_relations_immutable(self):
with self.assertRaises(AttributeError):
self.model.relations = {}
def test_get_relation(self):
# one relation on db1
# two relations on db0
# no relations on db2
relation_id_db1 = self.harness.add_relation('db1', 'remoteapp1')
self.harness.add_relation_unit(relation_id_db1, 'remoteapp1/0')
relation_id_db0_b = self.harness.add_relation('db0', 'another')
self.resetBackendCalls()
with self.assertRaises(ops.model.ModelError):
# You have to specify it by just the integer ID
self.model.get_relation('db1', 'db1:{}'.format(relation_id_db1))
rel_db1 = self.model.get_relation('db1', relation_id_db1)
self.assertIsInstance(rel_db1, ops.model.Relation)
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id_db1),
])
dead_rel = self.model.get_relation('db1', 7)
self.assertIsInstance(dead_rel, ops.model.Relation)
self.assertEqual(set(dead_rel.data.keys()), {self.model.unit, self.model.unit.app})
self.assertEqual(dead_rel.data[self.model.unit], {})
self.assertBackendCalls([
('relation_list', 7),
('relation_remote_app_name', 7),
('relation_get', 7, 'myapp/0', False),
])
self.assertIsNone(self.model.get_relation('db2'))
self.assertBackendCalls([
('relation_ids', 'db2'),
])
self.assertIs(self.model.get_relation('db1'), rel_db1)
with self.assertRaises(ops.model.TooManyRelatedAppsError):
self.model.get_relation('db0')
self.assertBackendCalls([
('relation_ids', 'db0'),
('relation_list', self.relation_id_db0),
('relation_remote_app_name', 0),
('relation_list', relation_id_db0_b),
('relation_remote_app_name', 2),
])
def test_peer_relation_app(self):
self.harness.add_relation('db2', 'myapp')
rel_dbpeer = self.model.get_relation('db2')
self.assertIs(rel_dbpeer.app, self.model.app)
def test_remote_units_is_our(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.harness.add_relation_unit(relation_id, 'remoteapp1/1')
self.resetBackendCalls()
for u in self.model.get_relation('db1').units:
self.assertFalse(u._is_our_unit)
self.assertFalse(u.app._is_our_app)
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id)
])
def test_our_unit_is_our(self):
self.assertTrue(self.model.unit._is_our_unit)
self.assertTrue(self.model.unit.app._is_our_app)
def test_unit_relation_data(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.harness.update_relation_data(
relation_id,
'remoteapp1/0',
{'host': 'remoteapp1-0'})
self.model.relations._invalidate('db1')
self.resetBackendCalls()
random_unit = self.model.get_unit('randomunit/0')
with self.assertRaises(KeyError):
self.model.get_relation('db1').data[random_unit]
remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0',
self.model.get_relation('db1').units))
self.assertEqual(self.model.get_relation('db1').data[remoteapp1_0],
{'host': 'remoteapp1-0'})
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id),
('relation_get', relation_id, 'remoteapp1/0', False),
])
def test_remote_app_relation_data(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.update_relation_data(relation_id, 'remoteapp1', {'secret': 'cafedeadbeef'})
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.harness.add_relation_unit(relation_id, 'remoteapp1/1')
self.resetBackendCalls()
rel_db1 = self.model.get_relation('db1')
# Try to get relation data for an invalid remote application.
random_app = self.model._cache.get(ops.model.Application, 'randomapp')
with self.assertRaises(KeyError):
rel_db1.data[random_app]
remoteapp1 = rel_db1.app
self.assertEqual(remoteapp1.name, 'remoteapp1')
self.assertEqual(rel_db1.data[remoteapp1],
{'secret': 'cafedeadbeef'})
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id),
('relation_get', relation_id, 'remoteapp1', True),
])
def test_relation_data_modify_remote(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.update_relation_data(relation_id, 'remoteapp1', {'secret': 'cafedeadbeef'})
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.harness.update_relation_data(relation_id, 'remoteapp1/0', {'host': 'remoteapp1/0'})
self.model.relations._invalidate('db1')
self.resetBackendCalls()
rel_db1 = self.model.get_relation('db1')
remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0',
self.model.get_relation('db1').units))
# Force memory cache to be loaded.
self.assertIn('host', rel_db1.data[remoteapp1_0])
self.assertEqual(repr(rel_db1.data[remoteapp1_0]), "{'host': 'remoteapp1/0'}")
with self.assertRaises(ops.model.RelationDataError):
rel_db1.data[remoteapp1_0]['foo'] = 'bar'
self.assertNotIn('foo', rel_db1.data[remoteapp1_0])
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id),
('relation_get', relation_id, 'remoteapp1/0', False),
])
# this will fire more backend calls
# the CountEqual and weird (and brittle) splitting is to accommodate python 3.5
# TODO: switch to assertEqual when we drop 3.5
self.assertCountEqual(
repr(rel_db1.data)[1:-1].split(', '),
["<ops.model.Unit myapp/0>: {}",
"<ops.model.Application myapp>: {}",
"<ops.model.Unit remoteapp1/0>: {'host': 'remoteapp1/0'}",
"<ops.model.Application remoteapp1>: {'secret': 'cafedeadbeef'}"])
def test_relation_data_modify_our(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'nothing'})
self.resetBackendCalls()
rel_db1 = self.model.get_relation('db1')
# Force memory cache to be loaded.
self.assertIn('host', rel_db1.data[self.model.unit])
rel_db1.data[self.model.unit]['host'] = 'bar'
self.assertEqual(rel_db1.data[self.model.unit]['host'], 'bar')
self.assertBackendCalls([
('relation_get', relation_id, 'myapp/0', False),
('relation_set', relation_id, 'host', 'bar', False),
])
def test_app_relation_data_modify_local_as_leader(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.update_relation_data(relation_id, 'myapp', {'password': 'deadbeefcafe'})
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.harness.set_leader(True)
self.resetBackendCalls()
local_app = self.model.unit.app
rel_db1 = self.model.get_relation('db1')
self.assertEqual(rel_db1.data[local_app], {'password': 'deadbeefcafe'})
rel_db1.data[local_app]['password'] = 'foo'
self.assertEqual(rel_db1.data[local_app]['password'], 'foo')
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id),
('relation_get', relation_id, 'myapp', True),
('is_leader',),
('relation_set', relation_id, 'password', 'foo', True),
])
def test_app_relation_data_modify_local_as_minion(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.update_relation_data(relation_id, 'myapp', {'password': 'deadbeefcafe'})
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.harness.set_leader(False)
self.resetBackendCalls()
local_app = self.model.unit.app
rel_db1 = self.model.get_relation('db1')
self.assertEqual(rel_db1.data[local_app], {'password': 'deadbeefcafe'})
with self.assertRaises(ops.model.RelationDataError):
rel_db1.data[local_app]['password'] = 'foobar'
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id),
('relation_get', relation_id, 'myapp', True),
('is_leader',),
])
def test_relation_data_del_key(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'bar'})
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.resetBackendCalls()
rel_db1 = self.model.get_relation('db1')
# Force memory cache to be loaded.
self.assertIn('host', rel_db1.data[self.model.unit])
del rel_db1.data[self.model.unit]['host']
self.assertNotIn('host', rel_db1.data[self.model.unit])
self.assertEqual({}, self.harness.get_relation_data(relation_id, 'myapp/0'))
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id),
('relation_get', relation_id, 'myapp/0', False),
('relation_set', relation_id, 'host', '', False),
])
def test_relation_data_del_missing_key(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'bar'})
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.resetBackendCalls()
rel_db1 = self.model.get_relation('db1')
# Force memory cache to be loaded.
self.assertIn('host', rel_db1.data[self.model.unit])
rel_db1.data[self.model.unit]['port'] = '' # Same as a delete, should not fail.
self.assertNotIn('port', rel_db1.data[self.model.unit])
self.assertEqual({'host': 'bar'}, self.harness.get_relation_data(relation_id, 'myapp/0'))
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id),
('relation_get', relation_id, 'myapp/0', False),
('relation_set', relation_id, 'port', '', False),
])
def test_relation_set_fail(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'myapp-0'})
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.resetBackendCalls()
backend = self.harness._backend
# TODO: jam 2020-03-06 This is way too much information about relation_set
# The original test forced 'relation-set' to return exit code 2,
# but there was nothing illegal about the data that was being set,
# for us to properly test the side effects of relation-set failing.
def broken_relation_set(relation_id, key, value, is_app):
backend._calls.append(('relation_set', relation_id, key, value, is_app))
raise ops.model.ModelError()
backend.relation_set = broken_relation_set
rel_db1 = self.model.get_relation('db1')
# Force memory cache to be loaded.
self.assertIn('host', rel_db1.data[self.model.unit])
with self.assertRaises(ops.model.ModelError):
rel_db1.data[self.model.unit]['host'] = 'bar'
self.assertEqual(rel_db1.data[self.model.unit]['host'], 'myapp-0')
with self.assertRaises(ops.model.ModelError):
del rel_db1.data[self.model.unit]['host']
self.assertIn('host', rel_db1.data[self.model.unit])
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id),
('relation_get', relation_id, 'myapp/0', False),
('relation_set', relation_id, 'host', 'bar', False),
('relation_set', relation_id, 'host', '', False),
])
def test_relation_data_type_check(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'myapp-0'})
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.resetBackendCalls()
rel_db1 = self.model.get_relation('db1')
with self.assertRaises(ops.model.RelationDataError):
rel_db1.data[self.model.unit]['foo'] = 1
with self.assertRaises(ops.model.RelationDataError):
rel_db1.data[self.model.unit]['foo'] = {'foo': 'bar'}
with self.assertRaises(ops.model.RelationDataError):
rel_db1.data[self.model.unit]['foo'] = None
# No data has actually been changed
self.assertEqual(dict(rel_db1.data[self.model.unit]), {'host': 'myapp-0'})
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', relation_id),
('relation_get', relation_id, 'myapp/0', False),
])
def test_relation_no_units(self):
self.harness.add_relation('db1', 'remoteapp1')
rel = self.model.get_relation('db1')
self.assertEqual(rel.units, set())
self.assertIs(rel.app, self.model.get_app('remoteapp1'))
self.assertBackendCalls([
('relation_ids', 'db1'),
('relation_list', 1),
('relation_remote_app_name', 1),
])
def test_config(self):
self.harness.update_config({'foo': 'foo', 'bar': 1, 'qux': True})
self.assertEqual(self.model.config, {
'foo': 'foo',
'bar': 1,
'qux': True,
})
with self.assertRaises(TypeError):
# Confirm that we cannot modify config values.
self.model.config['foo'] = 'bar'
self.assertBackendCalls([('config_get',)])
def test_config_immutable(self):
with self.assertRaises(AttributeError):
self.model.config = {}
def test_is_leader(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.harness.set_leader(True)
self.resetBackendCalls()
def check_remote_units():
# Cannot determine leadership for remote units.
for u in self.model.get_relation('db1').units:
with self.assertRaises(RuntimeError):
u.is_leader()
self.assertTrue(self.model.unit.is_leader())
check_remote_units()
# Create a new model and backend to drop a cached is-leader output.
self.harness.set_leader(False)
self.assertFalse(self.model.unit.is_leader())
check_remote_units()
self.assertBackendCalls([
('is_leader',),
('relation_ids', 'db1'),
('relation_list', relation_id),
('is_leader',),
])
def test_workload_version(self):
self.model.unit.set_workload_version('1.2.3')
self.assertBackendCalls([
('application_version_set', '1.2.3'),
])
def test_workload_version_invalid(self):
with self.assertRaises(TypeError) as cm:
self.model.unit.set_workload_version(5)
self.assertEqual(str(cm.exception), "workload version must be a str, not int: 5")
self.assertBackendCalls([])
def test_resources(self):
with self.assertRaises(ops.model.ModelError):
self.harness.model.resources.fetch('foo')
self.harness.add_resource('foo', 'foo contents\n')
self.harness.add_resource('bar', '')
with self.assertRaises(RuntimeError):
self.harness.model.resources.fetch('qux')
self.assertEqual(self.harness.model.resources.fetch('foo').name, 'foo.txt')
self.assertEqual(self.harness.model.resources.fetch('bar').name, 'bar.txt')
def test_resources_immutable(self):
with self.assertRaises(AttributeError):
self.model.resources = object()
def test_pod_spec(self):
self.harness.set_leader(True)
self.harness.model.pod.set_spec({'foo': 'bar'})
self.assertEqual(self.harness.get_pod_spec(), ({'foo': 'bar'}, None))
self.harness.model.pod.set_spec({'bar': 'foo'}, {'qux': 'baz'})
self.assertEqual(self.harness.get_pod_spec(), ({'bar': 'foo'}, {'qux': 'baz'}))
# no leader -> no set pod spec
self.harness.set_leader(False)
with self.assertRaises(ops.model.ModelError):
self.harness.model.pod.set_spec({'foo': 'bar'})
def test_pod_immutable(self):
with self.assertRaises(AttributeError):
self.model.pod = object()
def test_base_status_instance_raises(self):
with self.assertRaises(TypeError):
ops.model.StatusBase('test')
class NoNameStatus(ops.model.StatusBase):
pass
with self.assertRaises(AttributeError):
ops.model.StatusBase.register_status(NoNameStatus)
def test_status_repr(self):
test_cases = {
"ActiveStatus('Seashell')": ops.model.ActiveStatus('Seashell'),
"MaintenanceStatus('Red')": ops.model.MaintenanceStatus('Red'),
"BlockedStatus('Magenta')": ops.model.BlockedStatus('Magenta'),
"WaitingStatus('Thistle')": ops.model.WaitingStatus('Thistle'),
'UnknownStatus()': ops.model.UnknownStatus(),
}
for expected, status in test_cases.items():
self.assertEqual(repr(status), expected)
def test_status_eq(self):
status_types = [
ops.model.ActiveStatus,
ops.model.MaintenanceStatus,
ops.model.BlockedStatus,
ops.model.WaitingStatus,
]
self.assertEqual(ops.model.UnknownStatus(), ops.model.UnknownStatus())
for (i, t1) in enumerate(status_types):
self.assertNotEqual(t1(''), ops.model.UnknownStatus())
for (j, t2) in enumerate(status_types):
self.assertNotEqual(t1('one'), t2('two'))
if i == j:
self.assertEqual(t1('one'), t2('one'))
else:
self.assertNotEqual(t1('one'), t2('one'))
def test_active_message_default(self):
self.assertEqual(ops.model.ActiveStatus().message, '')
def test_local_set_valid_unit_status(self):
test_cases = [(
'active',
ops.model.ActiveStatus('Green'),
('status_set', 'active', 'Green', {'is_app': False}),
), (
'maintenance',
ops.model.MaintenanceStatus('Yellow'),
('status_set', 'maintenance', 'Yellow', {'is_app': False}),
), (
'blocked',
ops.model.BlockedStatus('Red'),
('status_set', 'blocked', 'Red', {'is_app': False}),
), (
'waiting',
ops.model.WaitingStatus('White'),
('status_set', 'waiting', 'White', {'is_app': False}),
)]
for test_case, target_status, backend_call in test_cases:
with self.subTest(test_case):
self.model.unit.status = target_status
self.assertEqual(self.model.unit.status, target_status)
self.model.unit._invalidate()
self.assertEqual(self.model.unit.status, target_status)
self.assertBackendCalls([backend_call, ('status_get', {'is_app': False})])
def test_local_set_valid_app_status(self):
self.harness.set_leader(True)
test_cases = [(
'active',
ops.model.ActiveStatus('Green'),
('status_set', 'active', 'Green', {'is_app': True}),
), (
'maintenance',
ops.model.MaintenanceStatus('Yellow'),
('status_set', 'maintenance', 'Yellow', {'is_app': True}),
), (
'blocked',
ops.model.BlockedStatus('Red'),
('status_set', 'blocked', 'Red', {'is_app': True}),
), (
'waiting',
ops.model.WaitingStatus('White'),
('status_set', 'waiting', 'White', {'is_app': True}),
)]
for test_case, target_status, backend_call in test_cases:
with self.subTest(test_case):
self.model.app.status = target_status
self.assertEqual(self.model.app.status, target_status)
self.model.app._invalidate()
self.assertEqual(self.model.app.status, target_status)
# There is a backend call to check if we can set the value,
# and then another check each time we assert the status above
expected_calls = [
('is_leader',), backend_call,
('is_leader',),
('is_leader',), ('status_get', {'is_app': True}),
]
self.assertBackendCalls(expected_calls)
def test_set_app_status_non_leader_raises(self):
self.harness.set_leader(False)
with self.assertRaises(RuntimeError):
self.model.app.status
with self.assertRaises(RuntimeError):
self.model.app.status = ops.model.ActiveStatus()
def test_set_unit_status_invalid(self):
with self.assertRaises(ops.model.InvalidStatusError):
self.model.unit.status = 'blocked'
def test_set_app_status_invalid(self):
with self.assertRaises(ops.model.InvalidStatusError):
self.model.app.status = 'blocked'
def test_remote_unit_status(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.harness.add_relation_unit(relation_id, 'remoteapp1/1')
remote_unit = next(filter(lambda u: u.name == 'remoteapp1/0',
self.model.get_relation('db1').units))
self.resetBackendCalls()
# Remote unit status is always unknown.
self.assertEqual(remote_unit.status, ops.model.UnknownStatus())
test_statuses = (
ops.model.UnknownStatus(),
ops.model.ActiveStatus('Green'),
ops.model.MaintenanceStatus('Yellow'),
ops.model.BlockedStatus('Red'),
ops.model.WaitingStatus('White'),
)
for target_status in test_statuses:
with self.subTest(target_status.name):
with self.assertRaises(RuntimeError):
remote_unit.status = target_status
self.assertBackendCalls([])
def test_remote_app_status(self):
relation_id = self.harness.add_relation('db1', 'remoteapp1')
self.harness.add_relation_unit(relation_id, 'remoteapp1/0')
self.harness.add_relation_unit(relation_id, 'remoteapp1/1')
remoteapp1 = self.model.get_relation('db1').app
self.resetBackendCalls()
# Remote application status is always unknown.
self.assertIsInstance(remoteapp1.status, ops.model.UnknownStatus)
test_statuses = (
ops.model.UnknownStatus(),
ops.model.ActiveStatus(),
ops.model.MaintenanceStatus('Upgrading software'),
ops.model.BlockedStatus('Awaiting manual resolution'),
ops.model.WaitingStatus('Awaiting related app updates'),
)
for target_status in test_statuses:
with self.subTest(target_status.name):
with self.assertRaises(RuntimeError):
remoteapp1.status = target_status
self.assertBackendCalls([])
def test_storage(self):
# TODO: (jam) 2020-05-07 Harness doesn't yet expose storage-get issue #263
meta = ops.charm.CharmMeta()
meta.storages = {'disks': None, 'data': None}
model = ops.model.Model(meta, ops.model._ModelBackend('myapp/0'))
fake_script(self, 'storage-list', '''
if [ "$1" = disks ]; then
echo '["disks/0", "disks/1"]'
else
echo '[]'
fi
''')
fake_script(self, 'storage-get', '''
if [ "$2" = disks/0 ]; then
echo '"/var/srv/disks/0"'
elif [ "$2" = disks/1 ]; then
echo '"/var/srv/disks/1"'
else
exit 2
fi
''')
fake_script(self, 'storage-add', '')
self.assertEqual(len(model.storages), 2)
self.assertEqual(model.storages.keys(), meta.storages.keys())
self.assertIn('disks', model.storages)
test_cases = {
0: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/0')},
1: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/1')},
}
for storage in model.storages['disks']:
self.assertEqual(storage.name, 'disks')
self.assertIn(storage.id, test_cases)
self.assertEqual(storage.name, test_cases[storage.id]['name'])
self.assertEqual(storage.location, test_cases[storage.id]['location'])
self.assertEqual(fake_script_calls(self, clear=True), [
['storage-list', 'disks', '--format=json'],
['storage-get', '-s', 'disks/0', 'location', '--format=json'],
['storage-get', '-s', 'disks/1', 'location', '--format=json'],
])
self.assertSequenceEqual(model.storages['data'], [])
model.storages.request('data', count=3)
self.assertEqual(fake_script_calls(self), [
['storage-list', 'data', '--format=json'],
['storage-add', 'data=3'],
])
# Try to add storage not present in charm metadata.
with self.assertRaises(ops.model.ModelError):
model.storages.request('deadbeef')
# Invalid count parameter types.
for count_v in [None, False, 2.0, 'a', b'beef', object]:
with self.assertRaises(TypeError):
model.storages.request('data', count_v)
def test_storages_immutable(self):
with self.assertRaises(AttributeError):
self.model.storages = {}
def resetBackendCalls(self):
self.harness._get_backend_calls(reset=True)
def assertBackendCalls(self, expected, *, reset=True):
self.assertEqual(expected, self.harness._get_backend_calls(reset=reset))
class TestContainers(unittest.TestCase):
def setUp(self):
meta = ops.charm.CharmMeta.from_yaml("""
name: k8s-charm
containers:
c1:
k: v
c2:
k: v
""")
backend = ops.model._ModelBackend('myapp/0')
self.model = ops.model.Model(meta, backend)
def test_unit_containers(self):
containers = self.model.unit.containers
self.assertEqual(sorted(containers), ['c1', 'c2'])
self.assertEqual(len(containers), 2)
self.assertIn('c1', containers)
self.assertIn('c2', containers)
self.assertNotIn('c3', containers)
for name in ['c1', 'c2']:
container = containers[name]
self.assertIsInstance(container, ops.model.Container)
self.assertEqual(container.name, name)
self.assertIsInstance(container.pebble, ops.pebble.Client)
with self.assertRaises(KeyError):
containers['c3']
with self.assertRaises(RuntimeError):
other_unit = self.model.get_unit('other')
other_unit.containers
def test_unit_get_container(self):
unit = self.model.unit
for name in ['c1', 'c2']:
container = unit.get_container(name)
self.assertIsInstance(container, ops.model.Container)
self.assertEqual(container.name, name)
self.assertIsInstance(container.pebble, ops.pebble.Client)
with self.assertRaises(ops.model.ModelError):
unit.get_container('c3')
with self.assertRaises(RuntimeError):
other_unit = self.model.get_unit('other')
other_unit.get_container('foo')
class TestContainerPebble(unittest.TestCase):
def setUp(self):
meta = ops.charm.CharmMeta.from_yaml("""
name: k8s-charm
containers:
c1:
k: v
""")
backend = MockPebbleBackend('myapp/0')
self.model = ops.model.Model(meta, backend)
self.container = self.model.unit.containers['c1']
self.pebble = self.container.pebble
def test_socket_path(self):
self.assertEqual(self.pebble.socket_path, '/charm/containers/c1/pebble.socket')
def test_autostart(self):
self.container.autostart()
self.assertEqual(self.pebble.requests, [('autostart',)])
def test_start(self):
self.container.start('foo')
self.container.start('foo', 'bar')
self.assertEqual(self.pebble.requests, [
('start', ('foo',)),
('start', ('foo', 'bar')),
])
def test_start_no_arguments(self):
with self.assertRaises(TypeError):
self.container.start()
def test_stop(self):
self.container.stop('foo')
self.container.stop('foo', 'bar')
self.assertEqual(self.pebble.requests, [
('stop', ('foo',)),
('stop', ('foo', 'bar')),
])
def test_stop_no_arguments(self):
with self.assertRaises(TypeError):
self.container.stop()
def test_type_errors(self):
meta = ops.charm.CharmMeta.from_yaml("""
name: k8s-charm
containers:
c1:
k: v
""")
# Only the real pebble Client checks types, so use actual backend class
backend = ops.model._ModelBackend('myapp/0')
model = ops.model.Model(meta, backend)
container = model.unit.containers['c1']
with self.assertRaises(TypeError):
container.start(['foo'])
with self.assertRaises(TypeError):
container.stop(['foo'])
def test_add_layer(self):
self.container.add_layer('a', 'summary: str\n')
self.container.add_layer('b', {'summary': 'dict'})
self.container.add_layer('c', ops.pebble.Layer('summary: Layer'))
self.container.add_layer('d', 'summary: str\n', combine=True)
self.assertEqual(self.pebble.requests, [
('add_layer', 'a', 'summary: str\n', False),
('add_layer', 'b', 'summary: dict\n', False),
('add_layer', 'c', 'summary: Layer\n', False),
('add_layer', 'd', 'summary: str\n', True),
])
# combine is a keyword-only arg (should be combine=True)
with self.assertRaises(TypeError):
self.container.add_layer('x', {}, True)
def test_get_plan(self):
plan_yaml = 'services:\n foo:\n override: replace\n command: bar'
self.pebble.responses.append(ops.pebble.Plan(plan_yaml))
plan = self.container.get_plan()
self.assertEqual(self.pebble.requests, [('get_plan',)])
self.assertIsInstance(plan, ops.pebble.Plan)
self.assertEqual(plan.to_yaml(), yaml.safe_dump(yaml.safe_load(plan_yaml)))
@staticmethod
def _make_service(name, startup, current):
return ops.pebble.ServiceInfo.from_dict(
{'name': name, 'startup': startup, 'current': current})
def test_get_services(self):
two_services = [
self._make_service('s1', 'enabled', 'active'),
self._make_service('s2', 'disabled', 'inactive'),
]
self.pebble.responses.append(two_services)
services = self.container.get_services()
self.assertEqual(len(services), 2)
self.assertEqual(set(services), {'s1', 's2'})
self.assertEqual(services['s1'].name, 's1')
self.assertEqual(services['s1'].startup, ops.pebble.ServiceStartup.ENABLED)
self.assertEqual(services['s1'].current, ops.pebble.ServiceStatus.ACTIVE)
self.assertEqual(services['s2'].name, 's2')
self.assertEqual(services['s2'].startup, ops.pebble.ServiceStartup.DISABLED)
self.assertEqual(services['s2'].current, ops.pebble.ServiceStatus.INACTIVE)
self.pebble.responses.append(two_services)
services = self.container.get_services('s1', 's2')
self.assertEqual(len(services), 2)
self.assertEqual(set(services), {'s1', 's2'})
self.assertEqual(services['s1'].name, 's1')
self.assertEqual(services['s1'].startup, ops.pebble.ServiceStartup.ENABLED)
self.assertEqual(services['s1'].current, ops.pebble.ServiceStatus.ACTIVE)
self.assertEqual(services['s2'].name, 's2')
self.assertEqual(services['s2'].startup, ops.pebble.ServiceStartup.DISABLED)
self.assertEqual(services['s2'].current, ops.pebble.ServiceStatus.INACTIVE)
self.assertEqual(self.pebble.requests, [
('get_services', ()),
('get_services', ('s1', 's2')),
])
def test_get_service(self):
# Single service returned successfully
self.pebble.responses.append([self._make_service('s1', 'enabled', 'active')])
s = self.container.get_service('s1')
self.assertEqual(self.pebble.requests, [('get_services', ('s1', ))])
self.assertEqual(s.name, 's1')
self.assertEqual(s.startup, ops.pebble.ServiceStartup.ENABLED)
self.assertEqual(s.current, ops.pebble.ServiceStatus.ACTIVE)
# If Pebble returns no services, should be a ModelError
self.pebble.responses.append([])
with self.assertRaises(ops.model.ModelError) as cm:
self.container.get_service('s2')
self.assertEqual(str(cm.exception), "service 's2' not found")
# If Pebble returns more than one service, RuntimeError is raised
self.pebble.responses.append([
self._make_service('s1', 'enabled', 'active'),
self._make_service('s2', 'disabled', 'inactive'),
])
with self.assertRaises(RuntimeError):
self.container.get_service('s1')
def test_pull(self):
self.pebble.responses.append('dummy1')
got = self.container.pull('/path/1')
self.assertEqual(got, 'dummy1')
self.assertEqual(self.pebble.requests, [
('pull', '/path/1', 'utf-8'),
])
self.pebble.requests = []
self.pebble.responses.append(b'dummy2')
got = self.container.pull('/path/2', encoding=None)
self.assertEqual(got, b'dummy2')
self.assertEqual(self.pebble.requests, [
('pull', '/path/2', None),
])
def test_push(self):
self.container.push('/path/1', 'content1')
self.assertEqual(self.pebble.requests, [
('push', '/path/1', 'content1', 'utf-8', False, None,
None, None, None, None),
])
self.pebble.requests = []
self.container.push('/path/2', b'content2', encoding=None, make_dirs=True,
permissions=0o600, user_id=12, user='bob', group_id=34, group='staff')
self.assertEqual(self.pebble.requests, [
('push', '/path/2', b'content2', None, True, 0o600, 12, 'bob', 34, 'staff'),
])
def test_list_files(self):
self.pebble.responses.append('dummy1')
ret = self.container.list_files('/path/1')
self.assertEqual(ret, 'dummy1')
self.assertEqual(self.pebble.requests, [
('list_files', '/path/1', None, False),
])
self.pebble.requests = []
self.pebble.responses.append('dummy2')
ret = self.container.list_files('/path/2', pattern='*.txt', itself=True)
self.assertEqual(ret, 'dummy2')
self.assertEqual(self.pebble.requests, [
('list_files', '/path/2', '*.txt', True),
])
def test_make_dir(self):
self.container.make_dir('/path/1')
self.assertEqual(self.pebble.requests, [
('make_dir', '/path/1', False, None, None, None, None, None),
])
self.pebble.requests = []
self.container.make_dir('/path/2', make_parents=True, permissions=0o700,
user_id=12, user='bob', group_id=34, group='staff')
self.assertEqual(self.pebble.requests, [
('make_dir', '/path/2', True, 0o700, 12, 'bob', 34, 'staff'),
])
def test_remove_path(self):
self.container.remove_path('/path/1')
self.assertEqual(self.pebble.requests, [
('remove_path', '/path/1', False),
])
self.pebble.requests = []
self.container.remove_path('/path/2', recursive=True)
self.assertEqual(self.pebble.requests, [
('remove_path', '/path/2', True),
])
class MockPebbleBackend(ops.model._ModelBackend):
def get_pebble(self, socket_path):
return MockPebbleClient(socket_path)
class MockPebbleClient:
def __init__(self, socket_path):
self.socket_path = socket_path
self.requests = []
self.responses = []
def autostart_services(self):
self.requests.append(('autostart',))
def start_services(self, service_names):
self.requests.append(('start', service_names))
def stop_services(self, service_names):
self.requests.append(('stop', service_names))
def add_layer(self, label, layer, combine=False):
if isinstance(layer, dict):
layer = ops.pebble.Layer(layer).to_yaml()
elif isinstance(layer, ops.pebble.Layer):
layer = layer.to_yaml()
self.requests.append(('add_layer', label, layer, combine))
def get_plan(self):
self.requests.append(('get_plan',))
return self.responses.pop(0)
def get_services(self, names=None):
self.requests.append(('get_services', names))
return self.responses.pop(0)
def pull(self, path, *, encoding='utf-8'):
self.requests.append(('pull', path, encoding))
return self.responses.pop(0)
def push(self, path, source, *, encoding='utf-8', make_dirs=False, permissions=None,
user_id=None, user=None, group_id=None, group=None):
self.requests.append(('push', path, source, encoding, make_dirs, permissions,
user_id, user, group_id, group))
def list_files(self, path, *, pattern=None, itself=False):
self.requests.append(('list_files', path, pattern, itself))
return self.responses.pop(0)
def make_dir(self, path, *, make_parents=False, permissions=None, user_id=None, user=None,
group_id=None, group=None):
self.requests.append(('make_dir', path, make_parents, permissions, user_id, user,
group_id, group))
def remove_path(self, path, *, recursive=False):
self.requests.append(('remove_path', path, recursive))
class TestModelBindings(unittest.TestCase):
def setUp(self):
meta = ops.charm.CharmMeta()
meta.relations = {
'db0': RelationMeta(
RelationRole.provides, 'db0', {'interface': 'db0', 'scope': 'global'}),
'db1': RelationMeta(
RelationRole.requires, 'db1', {'interface': 'db1', 'scope': 'global'}),
'db2': RelationMeta(
RelationRole.peer, 'db2', {'interface': 'db2', 'scope': 'global'}),
}
self.backend = ops.model._ModelBackend('myapp/0')
self.model = ops.model.Model(meta, self.backend)
fake_script(self, 'relation-ids',
"""([ "$1" = db0 ] && echo '["db0:4"]') || echo '[]'""")
fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""")
self.network_get_out = '''{
"bind-addresses": [
{
"mac-address": "de:ad:be:ef:ca:fe",
"interface-name": "lo",
"addresses": [
{
"hostname": "",
"value": "192.0.2.2",
"cidr": "192.0.2.0/24"
},
{
"hostname": "deadbeef.example",
"value": "dead:beef::1",
"cidr": "dead:beef::/64"
}
]
},
{
"mac-address": "",
"interface-name": "tun",
"addresses": [
{
"hostname": "",
"value": "192.0.3.3",
"cidr": ""
},
{
"hostname": "",
"value": "2001:db8::3",
"cidr": ""
},
{
"hostname": "deadbeef.local",
"value": "fe80::1:1",
"cidr": "fe80::/64"
}
]
}
],
"egress-subnets": [
"192.0.2.2/32",
"192.0.3.0/24",
"dead:beef::/64",
"2001:db8::3/128"
],
"ingress-addresses": [
"192.0.2.2",
"192.0.3.3",
"dead:beef::1",
"2001:db8::3"
]
}'''
def _check_binding_data(self, binding_name, binding):
self.assertEqual(binding.name, binding_name)
self.assertEqual(binding.network.bind_address, ipaddress.ip_address('192.0.2.2'))
self.assertEqual(binding.network.ingress_address, ipaddress.ip_address('192.0.2.2'))
# /32 and /128 CIDRs are valid one-address networks for IPv{4,6}Network types respectively.
self.assertEqual(binding.network.egress_subnets, [ipaddress.ip_network('192.0.2.2/32'),
ipaddress.ip_network('192.0.3.0/24'),
ipaddress.ip_network('dead:beef::/64'),
ipaddress.ip_network('2001:db8::3/128')])
for (i, (name, address, subnet)) in enumerate([
('lo', '192.0.2.2', '192.0.2.0/24'),
('lo', 'dead:beef::1', 'dead:beef::/64'),
('tun', '192.0.3.3', '192.0.3.3/32'),
('tun', '2001:db8::3', '2001:db8::3/128'),
('tun', 'fe80::1:1', 'fe80::/64')]):
self.assertEqual(binding.network.interfaces[i].name, name)
self.assertEqual(binding.network.interfaces[i].address, ipaddress.ip_address(address))
self.assertEqual(binding.network.interfaces[i].subnet, ipaddress.ip_network(subnet))
def test_invalid_keys(self):
# Basic validation for passing invalid keys.
for name in (object, 0):
with self.assertRaises(ops.model.ModelError):
self.model.get_binding(name)
def test_dead_relations(self):
fake_script(
self,
'network-get',
'''
if [ "$1" = db0 ] && [ "$2" = --format=json ]; then
echo '{}'
else
echo ERROR invalid value "$2" for option -r: relation not found >&2
exit 2
fi
'''.format(self.network_get_out))
# Validate the behavior for dead relations.
binding = ops.model.Binding('db0', 42, self.model._backend)
self.assertEqual(binding.network.bind_address, ipaddress.ip_address('192.0.2.2'))
self.assertEqual(fake_script_calls(self, clear=True), [
['network-get', 'db0', '-r', '42', '--format=json'],
['network-get', 'db0', '--format=json'],
])
def test_binding_by_relation_name(self):
fake_script(self, 'network-get',
'''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(self.network_get_out))
binding_name = 'db0'
expected_calls = [['network-get', 'db0', '--format=json']]
binding = self.model.get_binding(binding_name)
self._check_binding_data(binding_name, binding)
self.assertEqual(fake_script_calls(self, clear=True), expected_calls)
def test_binding_by_relation(self):
fake_script(self, 'network-get',
'''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(self.network_get_out))
binding_name = 'db0'
expected_calls = [
['relation-ids', 'db0', '--format=json'],
# The two invocations below are due to the get_relation call.
['relation-list', '-r', '4', '--format=json'],
['network-get', 'db0', '-r', '4', '--format=json'],
]
binding = self.model.get_binding(self.model.get_relation(binding_name))
self._check_binding_data(binding_name, binding)
self.assertEqual(fake_script_calls(self, clear=True), expected_calls)
def test_binding_no_iface_name(self):
network_get_out_obj = {
'bind-addresses': [
{
'mac-address': '',
'interface-name': '',
'addresses': [
{
'hostname': '',
'value': '10.1.89.35',
'cidr': ''
}
]
}
],
'egress-subnets': [
'10.152.183.158/32'
],
'ingress-addresses': [
'10.152.183.158'
]
}
network_get_out = json.dumps(network_get_out_obj)
fake_script(self, 'network-get',
'''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(network_get_out))
binding_name = 'db0'
expected_calls = [['network-get', 'db0', '--format=json']]
binding = self.model.get_binding(binding_name)
self.assertEqual(binding.name, 'db0')
self.assertEqual(binding.network.bind_address, ipaddress.ip_address('10.1.89.35'))
self.assertEqual(binding.network.ingress_address, ipaddress.ip_address('10.152.183.158'))
self.assertEqual(fake_script_calls(self, clear=True), expected_calls)
def test_missing_bind_addresses(self):
network_data = json.dumps({})
fake_script(self, 'network-get',
'''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(network_data))
binding_name = 'db0'
binding = self.model.get_binding(self.model.get_relation(binding_name))
self.assertEqual(binding.network.interfaces, [])
def test_empty_bind_addresses(self):
network_data = json.dumps({'bind-addresses': [{}]})
fake_script(self, 'network-get',
'''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(network_data))
binding_name = 'db0'
binding = self.model.get_binding(self.model.get_relation(binding_name))
self.assertEqual(binding.network.interfaces, [])
def test_no_bind_addresses(self):
network_data = json.dumps({'bind-addresses': [{'addresses': None}]})
fake_script(self, 'network-get',
'''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(network_data))
binding_name = 'db0'
binding = self.model.get_binding(self.model.get_relation(binding_name))
self.assertEqual(binding.network.interfaces, [])
def test_empty_interface_info(self):
network_data = json.dumps({
'bind-addresses': [{
'interface-name': 'eth0',
'addresses': [{}],
}],
})
fake_script(self, 'network-get',
'''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(network_data))
binding_name = 'db0'
binding = self.model.get_binding(self.model.get_relation(binding_name))
self.assertEqual(len(binding.network.interfaces), 1)
interface = binding.network.interfaces[0]
self.assertIsNone(interface.address)
self.assertIsNone(interface.subnet)
def test_missing_ingress_addresses(self):
network_data = json.dumps({
'bind-addresses': [],
})
fake_script(self, 'network-get',
'''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(network_data))
binding_name = 'db0'
binding = self.model.get_binding(self.model.get_relation(binding_name))
self.assertEqual(binding.network.ingress_addresses, [])
self.assertEqual(binding.network.ingress_address, None)
def test_missing_egress_subnets(self):
network_data = json.dumps({
'bind-addresses': [],
'ingress-addresses': [],
})
fake_script(self, 'network-get',
'''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(network_data))
binding_name = 'db0'
binding = self.model.get_binding(self.model.get_relation(binding_name))
self.assertEqual(binding.network.egress_subnets, [])
class TestModelBackend(unittest.TestCase):
def setUp(self):
self._backend = None
@property
def backend(self):
if self._backend is None:
self._backend = ops.model._ModelBackend('myapp/0')
return self._backend
def test_relation_get_set_is_app_arg(self):
# No is_app provided.
with self.assertRaises(TypeError):
self.backend.relation_set(1, 'fookey', 'barval')
with self.assertRaises(TypeError):
self.backend.relation_get(1, 'fooentity')
# Invalid types for is_app.
for is_app_v in [None, 1, 2.0, 'a', b'beef']:
with self.assertRaises(TypeError):
self.backend.relation_set(1, 'fookey', 'barval', is_app=is_app_v)
with self.assertRaises(TypeError):
self.backend.relation_get(1, 'fooentity', is_app=is_app_v)
def test_is_leader_refresh(self):
meta = ops.charm.CharmMeta.from_yaml('''
name: myapp
''')
model = ops.model.Model(meta, self.backend)
fake_script(self, 'is-leader', 'echo false')
self.assertFalse(model.unit.is_leader())
# Change the leadership status
fake_script(self, 'is-leader', 'echo true')
# If you don't force it, we don't check, so we won't see the change
self.assertFalse(model.unit.is_leader())
# If we force a recheck, then we notice
self.backend._leader_check_time = None
self.assertTrue(model.unit.is_leader())
# Force a recheck without changing the leadership status.
fake_script(self, 'is-leader', 'echo true')
self.backend._leader_check_time = None
self.assertTrue(model.unit.is_leader())
def test_relation_tool_errors(self):
self.addCleanup(os.environ.pop, 'JUJU_VERSION', None)
os.environ['JUJU_VERSION'] = '2.8.0'
err_msg = 'ERROR invalid value "$2" for option -r: relation not found'
test_cases = [(
lambda: fake_script(self, 'relation-list', 'echo fooerror >&2 ; exit 1'),
lambda: self.backend.relation_list(3),
ops.model.ModelError,
[['relation-list', '-r', '3', '--format=json']],
), (
lambda: fake_script(self, 'relation-list', 'echo {} >&2 ; exit 2'.format(err_msg)),
lambda: self.backend.relation_list(3),
ops.model.RelationNotFoundError,
[['relation-list', '-r', '3', '--format=json']],
), (
lambda: fake_script(self, 'relation-set', 'echo fooerror >&2 ; exit 1'),
lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False),
ops.model.ModelError,
[['relation-set', '-r', '3', 'foo=bar']],
), (
lambda: fake_script(self, 'relation-set', 'echo {} >&2 ; exit 2'.format(err_msg)),
lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False),
ops.model.RelationNotFoundError,
[['relation-set', '-r', '3', 'foo=bar']],
), (
lambda: None,
lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=True),
ops.model.RelationNotFoundError,
[['relation-set', '-r', '3', 'foo=bar', '--app']],
), (
lambda: fake_script(self, 'relation-get', 'echo fooerror >&2 ; exit 1'),
lambda: self.backend.relation_get(3, 'remote/0', is_app=False),
ops.model.ModelError,
[['relation-get', '-r', '3', '-', 'remote/0', '--format=json']],
), (
lambda: fake_script(self, 'relation-get', 'echo {} >&2 ; exit 2'.format(err_msg)),
lambda: self.backend.relation_get(3, 'remote/0', is_app=False),
ops.model.RelationNotFoundError,
[['relation-get', '-r', '3', '-', 'remote/0', '--format=json']],
), (
lambda: None,
lambda: self.backend.relation_get(3, 'remote/0', is_app=True),
ops.model.RelationNotFoundError,
[['relation-get', '-r', '3', '-', 'remote/0', '--app', '--format=json']],
)]
for i, (do_fake, run, exception, calls) in enumerate(test_cases):
with self.subTest(i):
do_fake()
with self.assertRaises(exception):
run()
self.assertEqual(fake_script_calls(self, clear=True), calls)
def test_relation_get_juju_version_quirks(self):
self.addCleanup(os.environ.pop, 'JUJU_VERSION', None)
fake_script(self, 'relation-get', '''echo '{"foo": "bar"}' ''')
# on 2.7.0+, things proceed as expected
for v in ['2.8.0', '2.7.0']:
with self.subTest(v):
os.environ['JUJU_VERSION'] = v
rel_data = self.backend.relation_get(1, 'foo/0', is_app=True)
self.assertEqual(rel_data, {"foo": "bar"})
calls = [' '.join(i) for i in fake_script_calls(self, clear=True)]
self.assertEqual(calls, ['relation-get -r 1 - foo/0 --app --format=json'])
# before 2.7.0, it just fails (no --app support)
os.environ['JUJU_VERSION'] = '2.6.9'
with self.assertRaisesRegex(RuntimeError, 'not supported on Juju version 2.6.9'):
self.backend.relation_get(1, 'foo/0', is_app=True)
self.assertEqual(fake_script_calls(self), [])
def test_relation_set_juju_version_quirks(self):
self.addCleanup(os.environ.pop, 'JUJU_VERSION', None)
fake_script(self, 'relation-set', 'exit 0')
# on 2.7.0+, things proceed as expected
for v in ['2.8.0', '2.7.0']:
with self.subTest(v):
os.environ['JUJU_VERSION'] = v
self.backend.relation_set(1, 'foo', 'bar', is_app=True)
calls = [' '.join(i) for i in fake_script_calls(self, clear=True)]
self.assertEqual(calls, ['relation-set -r 1 foo=bar --app'])
# before 2.7.0, it just fails always (no --app support)
os.environ['JUJU_VERSION'] = '2.6.9'
with self.assertRaisesRegex(RuntimeError, 'not supported on Juju version 2.6.9'):
self.backend.relation_set(1, 'foo', 'bar', is_app=True)
self.assertEqual(fake_script_calls(self), [])
def test_status_get(self):
# taken from actual Juju output
content = '{"message": "", "status": "unknown", "status-data": {}}'
fake_script(self, 'status-get', "echo '{}'".format(content))
s = self.backend.status_get(is_app=False)
self.assertEqual(s['status'], "unknown")
self.assertEqual(s['message'], "")
# taken from actual Juju output
content = dedent("""
{
"application-status": {
"message": "installing",
"status": "maintenance",
"status-data": {},
"units": {
"uo/0": {
"message": "",
"status": "active",
"status-data": {}
}
}
}
}
""")
fake_script(self, 'status-get', "echo '{}'".format(content))
s = self.backend.status_get(is_app=True)
self.assertEqual(s['status'], "maintenance")
self.assertEqual(s['message'], "installing")
self.assertEqual(fake_script_calls(self, clear=True), [
['status-get', '--include-data', '--application=False', '--format=json'],
['status-get', '--include-data', '--application=True', '--format=json'],
])
def test_status_is_app_forced_kwargs(self):
fake_script(self, 'status-get', 'exit 1')
fake_script(self, 'status-set', 'exit 1')
test_cases = (
lambda: self.backend.status_get(False),
lambda: self.backend.status_get(True),
lambda: self.backend.status_set('active', '', False),
lambda: self.backend.status_set('active', '', True),
)
for case in test_cases:
with self.assertRaises(TypeError):
case()
def test_local_set_invalid_status(self):
# juju return exit code 1 if you ask to set status to 'unknown'
meta = ops.charm.CharmMeta.from_yaml('''
name: myapp
''')
model = ops.model.Model(meta, self.backend)
fake_script(self, 'status-set', 'exit 1')
fake_script(self, 'is-leader', 'echo true')
with self.assertRaises(ops.model.ModelError):
model.unit.status = ops.model.UnknownStatus()
self.assertEqual(fake_script_calls(self, True), [
['status-set', '--application=False', 'unknown', ''],
])
with self.assertRaises(ops.model.ModelError):
model.app.status = ops.model.UnknownStatus()
# A leadership check is needed for application status.
self.assertEqual(fake_script_calls(self, True), [
['is-leader', '--format=json'],
['status-set', '--application=True', 'unknown', ''],
])
def test_status_set_is_app_not_bool_raises(self):
for is_app_v in [None, 1, 2.0, 'a', b'beef', object]:
with self.assertRaises(TypeError):
self.backend.status_set(ops.model.ActiveStatus, is_app=is_app_v)
def test_storage_tool_errors(self):
test_cases = [(
lambda: fake_script(self, 'storage-list', 'echo fooerror >&2 ; exit 1'),
lambda: self.backend.storage_list('foobar'),
ops.model.ModelError,
[['storage-list', 'foobar', '--format=json']],
), (
lambda: fake_script(self, 'storage-get', 'echo fooerror >&2 ; exit 1'),
lambda: self.backend.storage_get('foobar', 'someattr'),
ops.model.ModelError,
[['storage-get', '-s', 'foobar', 'someattr', '--format=json']],
), (
lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'),
lambda: self.backend.storage_add('foobar', count=2),
ops.model.ModelError,
[['storage-add', 'foobar=2']],
), (
lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'),
lambda: self.backend.storage_add('foobar', count=object),
TypeError,
[],
), (
lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'),
lambda: self.backend.storage_add('foobar', count=True),
TypeError,
[],
)]
for do_fake, run, exception, calls in test_cases:
do_fake()
with self.assertRaises(exception):
run()
self.assertEqual(fake_script_calls(self, clear=True), calls)
def test_network_get(self):
network_get_out = '''{
"bind-addresses": [
{
"mac-address": "",
"interface-name": "",
"addresses": [
{
"hostname": "",
"value": "192.0.2.2",
"cidr": ""
}
]
}
],
"egress-subnets": [
"192.0.2.2/32"
],
"ingress-addresses": [
"192.0.2.2"
]
}'''
fake_script(self, 'network-get',
'''[ "$1" = deadbeef ] && echo '{}' || exit 1'''.format(network_get_out))
network_info = self.backend.network_get('deadbeef')
self.assertEqual(network_info, json.loads(network_get_out))
self.assertEqual(fake_script_calls(self, clear=True),
[['network-get', 'deadbeef', '--format=json']])
network_info = self.backend.network_get('deadbeef', 1)
self.assertEqual(network_info, json.loads(network_get_out))
self.assertEqual(fake_script_calls(self, clear=True),
[['network-get', 'deadbeef', '-r', '1', '--format=json']])
def test_network_get_errors(self):
err_no_endpoint = 'ERROR no network config found for binding "$2"'
err_no_rel = 'ERROR invalid value "$3" for option -r: relation not found'
test_cases = [(
lambda: fake_script(self, 'network-get',
'echo {} >&2 ; exit 1'.format(err_no_endpoint)),
lambda: self.backend.network_get("deadbeef"),
ops.model.ModelError,
[['network-get', 'deadbeef', '--format=json']],
), (
lambda: fake_script(self, 'network-get', 'echo {} >&2 ; exit 2'.format(err_no_rel)),
lambda: self.backend.network_get("deadbeef", 3),
ops.model.RelationNotFoundError,
[['network-get', 'deadbeef', '-r', '3', '--format=json']],
)]
for do_fake, run, exception, calls in test_cases:
do_fake()
with self.assertRaises(exception):
run()
self.assertEqual(fake_script_calls(self, clear=True), calls)
def test_action_get_error(self):
fake_script(self, 'action-get', '')
fake_script(self, 'action-get', 'echo fooerror >&2 ; exit 1')
with self.assertRaises(ops.model.ModelError):
self.backend.action_get()
calls = [['action-get', '--format=json']]
self.assertEqual(fake_script_calls(self, clear=True), calls)
def test_action_set_error(self):
fake_script(self, 'action-get', '')
fake_script(self, 'action-set', 'echo fooerror >&2 ; exit 1')
with self.assertRaises(ops.model.ModelError):
self.backend.action_set(OrderedDict([('foo', 'bar'), ('dead', 'beef cafe')]))
calls = [["action-set", "foo=bar", "dead=beef cafe"]]
self.assertEqual(fake_script_calls(self, clear=True), calls)
def test_action_log_error(self):
fake_script(self, 'action-get', '')
fake_script(self, 'action-log', 'echo fooerror >&2 ; exit 1')
with self.assertRaises(ops.model.ModelError):
self.backend.action_log('log-message')
calls = [["action-log", "log-message"]]
self.assertEqual(fake_script_calls(self, clear=True), calls)
def test_action_get(self):
fake_script(self, 'action-get', """echo '{"foo-name": "bar", "silent": false}'""")
params = self.backend.action_get()
self.assertEqual(params['foo-name'], 'bar')
self.assertEqual(params['silent'], False)
self.assertEqual(fake_script_calls(self), [['action-get', '--format=json']])
def test_action_set(self):
fake_script(self, 'action-get', 'exit 1')
fake_script(self, 'action-set', 'exit 0')
self.backend.action_set(OrderedDict([('x', 'dead beef'), ('y', 1)]))
self.assertEqual(fake_script_calls(self), [['action-set', 'x=dead beef', 'y=1']])
def test_action_fail(self):
fake_script(self, 'action-get', 'exit 1')
fake_script(self, 'action-fail', 'exit 0')
self.backend.action_fail('error 42')
self.assertEqual(fake_script_calls(self), [['action-fail', 'error 42']])
def test_action_log(self):
fake_script(self, 'action-get', 'exit 1')
fake_script(self, 'action-log', 'exit 0')
self.backend.action_log('progress: 42%')
self.assertEqual(fake_script_calls(self), [['action-log', 'progress: 42%']])
def test_application_version_set(self):
fake_script(self, 'application-version-set', 'exit 0')
self.backend.application_version_set('1.2b3')
self.assertEqual(fake_script_calls(self), [['application-version-set', '--', '1.2b3']])
def test_application_version_set_invalid(self):
fake_script(self, 'application-version-set', 'exit 0')
with self.assertRaises(TypeError):
self.backend.application_version_set(2)
with self.assertRaises(TypeError):
self.backend.application_version_set()
self.assertEqual(fake_script_calls(self), [])
def test_juju_log(self):
fake_script(self, 'juju-log', 'exit 0')
self.backend.juju_log('WARNING', 'foo')
self.assertEqual(fake_script_calls(self, clear=True),
[['juju-log', '--log-level', 'WARNING', '--', 'foo']])
with self.assertRaises(TypeError):
self.backend.juju_log('DEBUG')
self.assertEqual(fake_script_calls(self, clear=True), [])
fake_script(self, 'juju-log', 'exit 1')
with self.assertRaises(ops.model.ModelError):
self.backend.juju_log('BAR', 'foo')
self.assertEqual(fake_script_calls(self, clear=True),
[['juju-log', '--log-level', 'BAR', '--', 'foo']])
def test_valid_metrics(self):
fake_script(self, 'add-metric', 'exit 0')
test_cases = [(
OrderedDict([('foo', 42), ('b-ar', 4.5), ('ba_-z', 4.5), ('a', 1)]),
OrderedDict([('de', 'ad'), ('be', 'ef_ -')]),
[['add-metric', '--labels', 'de=ad,be=ef_ -',
'foo=42', 'b-ar=4.5', 'ba_-z=4.5', 'a=1']]
), (
OrderedDict([('foo1', 0), ('b2r', 4.5)]),
OrderedDict([('d3', 'aд'), ('b33f', '3_ -')]),
[['add-metric', '--labels', 'd3=aд,b33f=3_ -', 'foo1=0', 'b2r=4.5']],
)]
for metrics, labels, expected_calls in test_cases:
self.backend.add_metrics(metrics, labels)
self.assertEqual(fake_script_calls(self, clear=True), expected_calls)
def test_invalid_metric_names(self):
invalid_inputs = [
({'': 4.2}, {}),
({'1': 4.2}, {}),
({'1': -4.2}, {}),
({'123': 4.2}, {}),
({'1foo': 4.2}, {}),
({'-foo': 4.2}, {}),
({'_foo': 4.2}, {}),
({'foo-': 4.2}, {}),
({'foo_': 4.2}, {}),
({'a-': 4.2}, {}),
({'a_': 4.2}, {}),
({'BAЯ': 4.2}, {}),
]
for metrics, labels in invalid_inputs:
with self.assertRaises(ops.model.ModelError):
self.backend.add_metrics(metrics, labels)
def test_invalid_metric_values(self):
invalid_inputs = [
({'a': float('+inf')}, {}),
({'a': float('-inf')}, {}),
({'a': float('nan')}, {}),
({'foo': 'bar'}, {}),
({'foo': '1O'}, {}),
]
for metrics, labels in invalid_inputs:
with self.assertRaises(ops.model.ModelError):
self.backend.add_metrics(metrics, labels)
def test_invalid_metric_labels(self):
invalid_inputs = [
({'foo': 4.2}, {'': 'baz'}),
({'foo': 4.2}, {',bar': 'baz'}),
({'foo': 4.2}, {'b=a=r': 'baz'}),
({'foo': 4.2}, {'BAЯ': 'baz'}),
]
for metrics, labels in invalid_inputs:
with self.assertRaises(ops.model.ModelError):
self.backend.add_metrics(metrics, labels)
def test_invalid_metric_label_values(self):
invalid_inputs = [
({'foo': 4.2}, {'bar': ''}),
({'foo': 4.2}, {'bar': 'b,az'}),
({'foo': 4.2}, {'bar': 'b=az'}),
]
for metrics, labels in invalid_inputs:
with self.assertRaises(ops.model.ModelError):
self.backend.add_metrics(metrics, labels)
def test_relation_remote_app_name_env(self):
self.addCleanup(os.environ.pop, 'JUJU_RELATION_ID', None)
self.addCleanup(os.environ.pop, 'JUJU_REMOTE_APP', None)
os.environ['JUJU_RELATION_ID'] = 'x:5'
os.environ['JUJU_REMOTE_APP'] = 'remoteapp1'
self.assertEqual(self.backend.relation_remote_app_name(5), 'remoteapp1')
os.environ['JUJU_RELATION_ID'] = '5'
self.assertEqual(self.backend.relation_remote_app_name(5), 'remoteapp1')
def test_relation_remote_app_name_script_success(self):
self.addCleanup(os.environ.pop, 'JUJU_RELATION_ID', None)
self.addCleanup(os.environ.pop, 'JUJU_REMOTE_APP', None)
# JUJU_RELATION_ID and JUJU_REMOTE_APP both unset
fake_script(self, 'relation-list', r"""
echo '"remoteapp2"'
""")
self.assertEqual(self.backend.relation_remote_app_name(1), 'remoteapp2')
self.assertEqual(fake_script_calls(self, clear=True), [
['relation-list', '-r', '1', '--app', '--format=json'],
])
# JUJU_RELATION_ID set but JUJU_REMOTE_APP unset
os.environ['JUJU_RELATION_ID'] = 'x:5'
self.assertEqual(self.backend.relation_remote_app_name(5), 'remoteapp2')
# JUJU_RELATION_ID unset but JUJU_REMOTE_APP set
del os.environ['JUJU_RELATION_ID']
os.environ['JUJU_REMOTE_APP'] = 'remoteapp1'
self.assertEqual(self.backend.relation_remote_app_name(5), 'remoteapp2')
# Both set, but JUJU_RELATION_ID a different relation
os.environ['JUJU_RELATION_ID'] = 'x:6'
self.assertEqual(self.backend.relation_remote_app_name(5), 'remoteapp2')
def test_relation_remote_app_name_script_errors(self):
fake_script(self, 'relation-list', r"""
echo "ERROR invalid value \"6\" for option -r: relation not found" >&2 # NOQA
exit 2
""")
self.assertIs(self.backend.relation_remote_app_name(6), None)
self.assertEqual(fake_script_calls(self, clear=True), [
['relation-list', '-r', '6', '--app', '--format=json'],
])
fake_script(self, 'relation-list', r"""
echo "ERROR option provided but not defined: --app" >&2
exit 2
""")
self.assertIs(self.backend.relation_remote_app_name(6), None)
self.assertEqual(fake_script_calls(self, clear=True), [
['relation-list', '-r', '6', '--app', '--format=json'],
])
class TestLazyMapping(unittest.TestCase):
def test_invalidate(self):
loaded = []
class MyLazyMap(ops.model.LazyMapping):
def _load(self):
loaded.append(1)
return {'foo': 'bar'}
map = MyLazyMap()
self.assertEqual(map['foo'], 'bar')
self.assertEqual(loaded, [1])
self.assertEqual(map['foo'], 'bar')
self.assertEqual(loaded, [1])
map._invalidate()
self.assertEqual(map['foo'], 'bar')
self.assertEqual(loaded, [1, 1])
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"JUJU_VERSION",
"JUJU_RELATION_ID",
"JUJU_REMOTE_APP"
]
| [] | ["JUJU_VERSION", "JUJU_RELATION_ID", "JUJU_REMOTE_APP"] | python | 3 | 0 | |
Lib/test/support/os_helper.py | import collections.abc
import contextlib
import errno
import os
import re
import stat
import sys
import time
import unittest
import warnings
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN_ASCII = '$test'
else:
TESTFN_ASCII = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN_ASCII = "{}_{}_tmp".format(TESTFN_ASCII, os.getpid())
# TESTFN_UNICODE is a non-ascii filename
TESTFN_UNICODE = TESTFN_ASCII + "-\xe0\xf2\u0258\u0141\u011f"
if sys.platform == 'darwin':
# In Mac OS X's VFS API file names are, by definition, canonically
# decomposed Unicode, encoded using UTF-8. See QA1173:
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
import unicodedata
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
# encoded by the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename.
TESTFN_UNENCODABLE = None
if os.name == 'nt':
# skip win32s (0) or Windows 9x/ME (1)
if sys.getwindowsversion().platform >= 2:
# Different kinds of characters from various languages to minimize the
# probability that the whole name is encodable to MBCS (issue #9819)
TESTFN_UNENCODABLE = TESTFN_ASCII + "-\u5171\u0141\u2661\u0363\uDC80"
try:
TESTFN_UNENCODABLE.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem '
'encoding (%s). Unicode filename tests may not be effective'
% (TESTFN_UNENCODABLE, sys.getfilesystemencoding()))
TESTFN_UNENCODABLE = None
# macOS and Emscripten deny unencodable filenames (invalid utf-8)
elif sys.platform not in {'darwin', 'emscripten', 'wasi'}:
try:
# ascii and utf-8 cannot encode the byte 0xff
b'\xff'.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
# 0xff will be encoded using the surrogate character u+DCFF
TESTFN_UNENCODABLE = TESTFN_ASCII \
+ b'-\xff'.decode(sys.getfilesystemencoding(), 'surrogateescape')
else:
# File system encoding (eg. ISO-8859-* encodings) can encode
# the byte 0xff. Skip some unicode filename tests.
pass
# FS_NONASCII: non-ASCII character encodable by os.fsencode(),
# or an empty string if there is no such character.
FS_NONASCII = ''
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
'\u00E6',
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
'\u0130',
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
'\u0141',
# U+03C6 (Greek Small Letter Phi): cp1253
'\u03C6',
# U+041A (Cyrillic Capital Letter Ka): cp1251
'\u041A',
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
'\u05D0',
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
'\u060C',
# U+062A (Arabic Letter Teh): cp720
'\u062A',
# U+0E01 (Thai Character Ko Kai): cp874
'\u0E01',
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
'\u00A0',
# U+20AC (Euro Sign)
'\u20AC',
):
try:
# If Python is set up to use the legacy 'mbcs' in Windows,
# 'replace' error mode is used, and encode() returns b'?'
# for characters missing in the ANSI codepage
if os.fsdecode(os.fsencode(character)) != character:
raise UnicodeError
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# Save the initial cwd
SAVEDCWD = os.getcwd()
# TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be
# decoded from the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename (ex: the latin1 encoding can decode any byte
# sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks
# to the surrogateescape error handler (PEP 383), but not from the filesystem
# encoding in strict mode.
TESTFN_UNDECODABLE = None
for name in (
# b'\xff' is not decodable by os.fsdecode() with code page 932. Windows
# accepts it to create a file or a directory, or don't accept to enter to
# such directory (when the bytes name is used). So test b'\xe7' first:
# it is not decodable from cp932.
b'\xe7w\xf0',
# undecodable from ASCII, UTF-8
b'\xff',
# undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856
# and cp857
b'\xae\xd5'
# undecodable from UTF-8 (UNIX and Mac OS X)
b'\xed\xb2\x80', b'\xed\xb4\x80',
# undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252,
# cp1253, cp1254, cp1255, cp1257, cp1258
b'\x81\x98',
):
try:
name.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
TESTFN_UNDECODABLE = os.fsencode(TESTFN_ASCII) + name
break
if FS_NONASCII:
TESTFN_NONASCII = TESTFN_ASCII + FS_NONASCII
else:
TESTFN_NONASCII = None
TESTFN = TESTFN_NONASCII or TESTFN_ASCII
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
_can_symlink = None
def can_symlink():
global _can_symlink
if _can_symlink is not None:
return _can_symlink
# WASI / wasmtime prevents symlinks with absolute paths, see man
# openat2(2) RESOLVE_BENEATH. Almost all symlink tests use absolute
# paths. Skip symlink tests on WASI for now.
src = os.path.abspath(TESTFN)
symlink_path = src + "can_symlink"
try:
os.symlink(src, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink"""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
_can_xattr = None
def can_xattr():
import tempfile
global _can_xattr
if _can_xattr is not None:
return _can_xattr
if not hasattr(os, "setxattr"):
can = False
else:
import platform
tmp_dir = tempfile.mkdtemp()
tmp_fp, tmp_name = tempfile.mkstemp(dir=tmp_dir)
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
os.setxattr(tmp_name, b"trusted.foo", b"42")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match(r"2.6.(\d{1,2})", kernel_version)
can = m is None or int(m.group(1)) >= 39
except OSError:
can = False
finally:
unlink(TESTFN)
unlink(tmp_name)
rmdir(tmp_dir)
_can_xattr = can
return can
def skip_unless_xattr(test):
"""Skip decorator for tests that require functional extended attributes"""
ok = can_xattr()
msg = "no non-broken extended attribute support"
return test if ok else unittest.skip(msg)(test)
_can_chmod = None
def can_chmod():
global _can_chmod
if _can_chmod is not None:
return _can_chmod
if not hasattr(os, "chown"):
_can_chmod = False
return _can_chmod
try:
with open(TESTFN, "wb") as f:
try:
os.chmod(TESTFN, 0o777)
mode1 = os.stat(TESTFN).st_mode
os.chmod(TESTFN, 0o666)
mode2 = os.stat(TESTFN).st_mode
except OSError as e:
can = False
else:
can = stat.S_IMODE(mode1) != stat.S_IMODE(mode2)
finally:
os.unlink(TESTFN)
_can_chmod = can
return can
def skip_unless_working_chmod(test):
"""Skip tests that require working os.chmod()
WASI SDK 15.0 cannot change file mode bits.
"""
ok = can_chmod()
msg = "requires working os.chmod()"
return test if ok else unittest.skip(msg)(test)
def unlink(filename):
try:
_unlink(filename)
except (FileNotFoundError, NotADirectoryError):
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on an [email protected] shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
from test.support import _force_run
def _rmtree_inner(path):
for name in _force_run(path, os.listdir, path):
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError as exc:
print("support.rmtree(): os.lstat(%r) failed with %s"
% (fullname, exc),
file=sys.__stderr__)
mode = 0
if stat.S_ISDIR(mode):
_waitfor(_rmtree_inner, fullname, waitall=True)
_force_run(fullname, os.rmdir, fullname)
else:
_force_run(fullname, os.unlink, fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(lambda p: _force_run(p, os.rmdir, p), path)
def _longpath(path):
try:
import ctypes
except ImportError:
# No ctypes means we can't expands paths.
pass
else:
buffer = ctypes.create_unicode_buffer(len(path) * 2)
length = ctypes.windll.kernel32.GetLongPathNameW(path, buffer,
len(buffer))
if length:
return buffer[:length]
return path
else:
_unlink = os.unlink
_rmdir = os.rmdir
def _rmtree(path):
import shutil
try:
shutil.rmtree(path)
return
except OSError:
pass
def _rmtree_inner(path):
from test.support import _force_run
for name in _force_run(path, os.listdir, path):
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_inner(fullname)
_force_run(path, os.rmdir, fullname)
else:
_force_run(path, os.unlink, fullname)
_rmtree_inner(path)
os.rmdir(path)
def _longpath(path):
return path
def rmdir(dirname):
try:
_rmdir(dirname)
except FileNotFoundError:
pass
def rmtree(path):
try:
_rmtree(path)
except FileNotFoundError:
pass
@contextlib.contextmanager
def temp_dir(path=None, quiet=False):
"""Return a context manager that creates a temporary directory.
Arguments:
path: the directory to create temporarily. If omitted or None,
defaults to creating a temporary directory using tempfile.mkdtemp.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, if the path is specified and cannot be
created, only a warning is issued.
"""
import tempfile
dir_created = False
if path is None:
path = tempfile.mkdtemp()
dir_created = True
path = os.path.realpath(path)
else:
try:
os.mkdir(path)
dir_created = True
except OSError as exc:
if not quiet:
raise
warnings.warn(f'tests may fail, unable to create '
f'temporary directory {path!r}: {exc}',
RuntimeWarning, stacklevel=3)
if dir_created:
pid = os.getpid()
try:
yield path
finally:
# In case the process forks, let only the parent remove the
# directory. The child has a different process id. (bpo-30028)
if dir_created and pid == os.getpid():
rmtree(path)
@contextlib.contextmanager
def change_cwd(path, quiet=False):
"""Return a context manager that changes the current working directory.
Arguments:
path: the directory to use as the temporary current working directory.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, it issues only a warning and keeps the current
working directory the same.
"""
saved_dir = os.getcwd()
try:
os.chdir(os.path.realpath(path))
except OSError as exc:
if not quiet:
raise
warnings.warn(f'tests may fail, unable to change the current working '
f'directory to {path!r}: {exc}',
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False):
"""
Context manager that temporarily creates and changes the CWD.
The function temporarily changes the current working directory
after creating a temporary directory in the current directory with
name *name*. If *name* is None, the temporary directory is
created using tempfile.mkdtemp.
If *quiet* is False (default) and it is not possible to
create or change the CWD, an error is raised. If *quiet* is True,
only a warning is raised and the original CWD is used.
"""
with temp_dir(path=name, quiet=quiet) as temp_path:
with change_cwd(temp_path, quiet=quiet) as cwd_dir:
yield cwd_dir
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
@contextlib.contextmanager
def open_dir_fd(path):
"""Open a file descriptor to a directory."""
assert os.path.isdir(path)
flags = os.O_RDONLY
if hasattr(os, "O_DIRECTORY"):
flags |= os.O_DIRECTORY
dir_fd = os.open(path, flags)
try:
yield dir_fd
finally:
os.close(dir_fd)
def fs_is_case_insensitive(directory):
"""Detects if the file system for the specified directory
is case-insensitive."""
import tempfile
with tempfile.NamedTemporaryFile(dir=directory) as base:
base_path = base.name
case_path = base_path.upper()
if case_path == base_path:
case_path = base_path.lower()
try:
return os.path.samefile(base_path, case_path)
except FileNotFoundError:
return False
class FakePath:
"""Simple implementing of the path protocol.
"""
def __init__(self, path):
self.path = path
def __repr__(self):
return f'<FakePath {self.path!r}>'
def __fspath__(self):
if (isinstance(self.path, BaseException) or
isinstance(self.path, type) and
issubclass(self.path, BaseException)):
raise self.path
else:
return self.path
def fd_count():
"""Count the number of open file descriptors.
"""
if sys.platform.startswith(('linux', 'freebsd', 'emscripten')):
try:
names = os.listdir("/proc/self/fd")
# Subtract one because listdir() internally opens a file
# descriptor to list the content of the /proc/self/fd/ directory.
return len(names) - 1
except FileNotFoundError:
pass
MAXFD = 256
if hasattr(os, 'sysconf'):
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except OSError:
pass
old_modes = None
if sys.platform == 'win32':
# bpo-25306, bpo-31009: Call CrtSetReportMode() to not kill the process
# on invalid file descriptor if Python is compiled in debug mode
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
old_modes = {}
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
old_modes[report_type] = msvcrt.CrtSetReportMode(report_type,
0)
try:
count = 0
for fd in range(MAXFD):
try:
# Prefer dup() over fstat(). fstat() can require input/output
# whereas dup() doesn't.
fd2 = os.dup(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
os.close(fd2)
count += 1
finally:
if old_modes is not None:
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
msvcrt.CrtSetReportMode(report_type, old_modes[report_type])
return count
if hasattr(os, "umask"):
@contextlib.contextmanager
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)
class EnvironmentVarGuard(collections.abc.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def copy(self):
# We do what os.environ.copy() does.
return dict(self)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
| []
| []
| []
| [] | [] | python | 0 | 0 | |
web_app/__init__.py | # web_app/__init__.py
import os
from flask import Flask
from dotenv import load_dotenv
load_dotenv() # loads env vars from the .env file (locally)
from web_app.models import db, migrate
from web_app.routes.home_routes import home_routes
from web_app.routes.book_routes import book_routes
# DATABASE_URL = os.getenv("DATABASE_URL"), connection to our database.
DATABASE_URI = "sqlite:///C:\\Users\\Aarons\\Desktop\\twitoff-dspt6\\web_app\\twitoff_development.db"
def create_app():
# Our app instance.
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
db.init_app(app)
migrate.init_app(app, db)
app.register_blueprint(home_routes)
app.register_blueprint(book_routes)
return app
if __name__ == "__main__":
my_app = create_app()
my_app.run(debug=True)
| []
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | python | 1 | 0 | |
core/utils/auth.go | package utils
import (
"context"
"errors"
"net/http"
"os"
"strings"
"time"
"github.com/alexedwards/argon2id"
"github.com/golang-jwt/jwt"
"github.com/gorilla/mux"
"github.com/nus-utils/nus-peer-review/loggers"
"github.com/nus-utils/nus-peer-review/models"
"gorm.io/gorm"
)
type ClaimsData struct {
Data models.User `json:"data"`
jwt.StandardClaims
}
const JWTClaimContextKey = "claims"
func GenerateJWT(user models.User) (string, error) {
var mySigningKey = []byte(os.Getenv("JWT_SECRET"))
claims := ClaimsData{
user,
jwt.StandardClaims{
ExpiresAt: time.Now().Add(time.Hour).Unix(),
Issuer: "npr-api",
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString(mySigningKey)
if err != nil {
loggers.ErrorLogger.Println("Something Went Wrong: %s" + err.Error())
return "", err
}
return tokenString, nil
}
func ParseJWT(tokenString string) (*ClaimsData, error) {
token, err := jwt.ParseWithClaims(tokenString, &ClaimsData{}, func(token *jwt.Token) (interface{}, error) {
return []byte(os.Getenv("JWT_SECRET")), nil
})
if claims, ok := token.Claims.(*ClaimsData); ok && token.Valid {
return claims, nil
} else {
return claims, err
}
}
func ParseJWTWithClaims(tokenString string, claims *ClaimsData) error {
token, err := jwt.ParseWithClaims(tokenString, claims, func(token *jwt.Token) (interface{}, error) {
return []byte(os.Getenv("JWT_SECRET")), nil
})
if !token.Valid {
return err
}
return nil
}
// returns argon2 hash of strings such as email recovery tokens
func HashString(token string) string {
hash, err := argon2id.CreateHash(token, argon2id.DefaultParams)
if err != nil {
loggers.ErrorLogger.Println("Something Went Wrong: %s" + err.Error())
}
return hash
}
func SupervisionCheckMiddleware(db *gorm.DB, moduleIdResolver func(r *http.Request) string) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
data := r.Context().Value(JWTClaimContextKey).(*models.Staff)
moduleId := moduleIdResolver(r)
var count int64
db.Model(&models.Supervision{}).Where("staff_id = ? and module_id = ?", data.ID, moduleId).Count(&count)
if count == 0 {
HandleResponse(w, "Not enrolled in module", http.StatusUnauthorized)
} else {
next.ServeHTTP(w, r)
}
})
}
}
func MarkerCheckMiddleware(db *gorm.DB) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var count int64
data := r.Context().Value(DecodeBodyContextKey).(*models.Grade)
claims := r.Context().Value(JWTClaimContextKey).(*models.Student)
db.Model(&models.Pairing{}).Where("id = ? AND marker_id = ?", data.PairingID, claims.ID).Count(&count)
if count == 0 {
HandleResponse(w, "Please don't cheat", http.StatusUnauthorized)
} else {
next.ServeHTTP(w, r)
}
})
}
}
func MarkeeCheckMiddleware(db *gorm.DB) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var count int64
data := r.Context().Value(DecodeBodyContextKey).(*models.Grade)
student := r.Context().Value(JWTClaimContextKey).(*models.Student)
db.Model(&models.Pairing{}).Where("id = ? AND student_id = ?", data.PairingID, student.ID).Count(&count)
if count == 0 {
HandleResponse(w, "Please don't cheat", http.StatusUnauthorized)
} else {
next.ServeHTTP(w, r)
}
})
}
}
func EnrollmentCheckMiddleware(db *gorm.DB, moduleIdResolver func(r *http.Request) string) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
data := r.Context().Value(JWTClaimContextKey).(*models.User)
moduleId := moduleIdResolver(r)
var count int64
db.Model(&models.Enrollment{}).Where("student_id = ? and module_id = ?", data.ID, moduleId).Count(&count)
if count == 0 {
HandleResponse(w, "Not enrolled in module", http.StatusUnauthorized)
} else {
next.ServeHTTP(w, r)
}
})
}
}
func ValidateJWT(r *http.Request) (*ClaimsData, error) {
authHeader := r.Header.Get("Authorization")
if !strings.Contains(authHeader, "Bearer") {
return nil, errors.New("Unauthorized")
}
tokenString := strings.Split(authHeader, "Bearer ")[1]
claims, err := ParseJWT(tokenString)
if err != nil {
return nil, errors.New("Unauthenticated")
}
return claims, nil
}
func AuthenticationMiddleware() mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if claims, err := ValidateJWT(r); err != nil {
HandleResponse(w, err.Error(), http.StatusUnauthorized)
} else {
ctxWithUser := context.WithValue(r.Context(), JWTClaimContextKey, &claims.Data)
next.ServeHTTP(w, r.WithContext(ctxWithUser))
}
})
}
}
func IsAdmin(user models.User, db *gorm.DB) bool {
result := db.Take(&models.Admin{}, "id = ?", user.ID)
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return false
}
return true
}
func IsSupervisor(user models.User, moduleId uint, db *gorm.DB) bool {
if bypass := IsAdmin(user, db); bypass {
return true
}
resultStaff := db.Take(&models.Staff{}, "id = ?", user.ID)
if errors.Is(resultStaff.Error, gorm.ErrRecordNotFound) {
return false
}
resultSupervision := db.Take(&models.Supervision{}, "staff_id = ? AND module_id = ?", user.ID, moduleId)
if errors.Is(resultSupervision.Error, gorm.ErrRecordNotFound) {
return false
}
return true
}
func IsEnrolled(user models.User, moduleId uint, db *gorm.DB) bool {
if bypass := IsAdmin(user, db); bypass {
return true
}
resultStaff := db.Take(&models.Student{}, "id = ?", user.ID)
if errors.Is(resultStaff.Error, gorm.ErrRecordNotFound) {
return false
}
resultSupervision := db.Take(&models.Enrollment{}, "student_id = ? AND module_id = ? ", user.ID, moduleId)
if errors.Is(resultSupervision.Error, gorm.ErrRecordNotFound) {
return false
}
return true
}
// Both Supervisor and Marker are authorized for marking
func IsMarker(user models.User, assignmentId uint, studentId uint, db *gorm.DB) bool {
if bypass := IsAdmin(user, db); bypass {
return true
}
var assignment models.Assignment
db.Model(&models.Assignment{}).Where("id = ?", assignmentId).Find(&assignment)
if bypass := IsSupervisor(user, assignment.ModuleID, db); bypass {
return true
}
result := db.Model(&models.Pairing{}).Take(&models.Pairing{},
models.Pairing{MarkerID: user.ID, StudentID: studentId, AssignmentID: assignmentId})
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return false
}
return true
}
func IsReviewee(user models.User, assignmentId uint, markerId uint, db *gorm.DB) bool {
if bypass := IsAdmin(user, db); bypass {
return true
}
result := db.Model(&models.Pairing{}).Take(&models.Pairing{},
models.Pairing{StudentID: user.ID, MarkerID: markerId, AssignmentID: assignmentId})
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return false
}
return true
}
func IsPair(user models.User, assignmentId uint, otherStudentId uint, db *gorm.DB) bool {
return IsMarker(user, assignmentId, otherStudentId, db) || IsReviewee(user, assignmentId, otherStudentId, db)
}
func IsMemberOf(claims models.User, moduleId uint, db *gorm.DB) bool {
return IsEnrolled(claims, moduleId, db) || IsSupervisor(claims, moduleId, db)
}
func IsAdminMiddleware(db *gorm.DB) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
data := r.Context().Value(JWTClaimContextKey).(*models.User)
if IsAdmin(*data, db) {
next.ServeHTTP(w, r)
} else {
HandleResponse(w, "Insufficient Permissions", http.StatusUnauthorized)
}
})
}
}
func LoginHandleFunc(db *gorm.DB, scope func(db *gorm.DB) *gorm.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var input models.User
if err := DecodeParams(r, &input); err != nil {
HandleResponse(w, err.Error(), http.StatusBadRequest)
return
}
var user models.User
result := db.Scopes(scope).Take(&user, "email = ?", input.Email)
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
HandleResponse(w, "Incorrect email", http.StatusUnauthorized)
return
}
isEqual, _ := argon2id.ComparePasswordAndHash(input.Password, user.Password)
if !isEqual {
HandleResponse(w, "Incorrect Password", http.StatusUnauthorized)
return
}
token, err := GenerateJWT(user)
if err != nil {
HandleResponse(w, "Internal Error", http.StatusInternalServerError)
} else {
HandleResponse(w, token, http.StatusOK)
}
}
}
| [
"\"JWT_SECRET\"",
"\"JWT_SECRET\"",
"\"JWT_SECRET\""
]
| []
| [
"JWT_SECRET"
]
| [] | ["JWT_SECRET"] | go | 1 | 0 | |
tasks/libs/common/gitlab.py | import errno
import json
import os
import platform
import re
import subprocess
from urllib.parse import quote
from invoke.exceptions import Exit
errno_regex = re.compile(r".*\[Errno (\d+)\] (.*)")
__all__ = ["Gitlab"]
class Gitlab(object):
BASE_URL = "https://gitlab.ddbuild.io/api/v4"
def __init__(self, api_token=None):
self.api_token = api_token if api_token else self._api_token()
def test_project_found(self, project):
"""
Checks if a project can be found. This is useful for testing access permissions to projects.
"""
result = self.project(project)
# name is arbitrary, just need to check if something is in the result
if "name" in result:
return
print("Cannot find GitLab project {}".format(project))
print("If you cannot see it in the GitLab WebUI, you likely need permission.")
raise Exit(code=1)
def project(self, project_name):
"""
Gets the project info.
"""
path = "/projects/{}".format(quote(project_name, safe=""))
return self.make_request(path, json_output=True)
def create_pipeline(self, project_name, ref, variables=None):
"""
Create a pipeline targeting a given reference of a project.
ref must be a branch or a tag.
"""
if variables is None:
variables = {}
path = "/projects/{}/pipeline".format(quote(project_name, safe=""))
headers = {"Content-Type": "application/json"}
data = json.dumps({"ref": ref, "variables": [{"key": k, "value": v} for (k, v) in variables.items()],})
return self.make_request(path, headers=headers, data=data, json_output=True)
def all_pipelines_for_ref(self, project_name, ref, sha=None):
"""
Gets all pipelines for a given reference (+ optionally git sha).
"""
page = 1
# Go through all pages
results = self.pipelines_for_ref(project_name, ref, sha=sha, page=page)
while results:
yield from results
page += 1
results = self.pipelines_for_ref(project_name, ref, sha=sha, page=page)
def pipelines_for_ref(self, project_name, ref, sha=None, page=1, per_page=100):
"""
Gets one page of pipelines for a given reference (+ optionally git sha).
"""
path = "/projects/{}/pipelines?ref={}&per_page={}&page={}".format(
quote(project_name, safe=""), quote(ref, safe=""), per_page, page
)
if sha:
path = "{}&sha={}".format(path, sha)
return self.make_request(path, json_output=True)
def last_pipeline_for_ref(self, project_name, ref, per_page=100):
"""
Gets the last pipeline for a given reference.
per_page cannot exceed 100.
"""
pipelines = self.pipelines_for_ref(project_name, ref, per_page=per_page)
if len(pipelines) == 0:
return None
return sorted(pipelines, key=lambda pipeline: pipeline['created_at'], reverse=True)[0]
def trigger_pipeline(self, project_name, data):
"""
Trigger a pipeline on a project using the trigger endpoint.
Requires a trigger token in the data object, in the 'token' field.
"""
path = "/projects/{}/trigger/pipeline".format(quote(project_name, safe=""))
if 'token' not in data:
raise Exit("Missing 'token' field in data object to trigger child pipelines", 1)
return self.make_request(path, data=data, json_input=True, json_output=True)
def pipeline(self, project_name, pipeline_id):
"""
Gets info for a given pipeline.
"""
path = "/projects/{}/pipelines/{}".format(quote(project_name, safe=""), pipeline_id)
return self.make_request(path, json_output=True)
def cancel_pipeline(self, project_name, pipeline_id):
"""
Cancels a given pipeline.
"""
path = "/projects/{}/pipelines/{}/cancel".format(quote(project_name, safe=""), pipeline_id)
return self.make_request(path, json_output=True, method="POST")
def commit(self, project_name, commit_sha):
"""
Gets info for a given commit sha.
"""
path = "/projects/{}/repository/commits/{}".format(quote(project_name, safe=""), commit_sha)
return self.make_request(path, json_output=True)
def artifact(self, project_name, job_id, artifact_name):
path = "/projects/{}/jobs/{}/artifacts/{}".format(quote(project_name, safe=""), job_id, artifact_name)
response = self.make_request(path, stream_output=True)
if response.status_code != 200:
return None
return response
def all_jobs(self, project_name, pipeline_id):
"""
Gets all the jobs for a pipeline.
"""
page = 1
# Go through all pages
results = self.jobs(project_name, pipeline_id, page)
while results:
yield from results
page += 1
results = self.jobs(project_name, pipeline_id, page)
def jobs(self, project_name, pipeline_id, page=1, per_page=100):
"""
Gets one page of the jobs for a pipeline.
per_page cannot exceed 100.
"""
path = "/projects/{}/pipelines/{}/jobs?per_page={}&page={}".format(
quote(project_name, safe=""), pipeline_id, per_page, page
)
return self.make_request(path, json_output=True)
def all_pipeline_schedules(self, project_name):
"""
Gets all pipelines schedules for the given project.
"""
page = 1
# Go through all pages
results = self.pipeline_schedules(project_name, page)
while results:
yield from results
page += 1
results = self.pipeline_schedules(project_name, page)
def pipeline_schedules(self, project_name, page=1, per_page=100):
"""
Gets one page of the pipeline schedules for the given project.
per_page cannot exceed 100
"""
path = "/projects/{}/pipeline_schedules?per_page={}&page={}".format(
quote(project_name, safe=""), per_page, page
)
return self.make_request(path, json_output=True)
def pipeline_schedule(self, project_name, schedule_id):
"""
Gets a single pipeline schedule.
"""
path = "/projects/{}/pipeline_schedules/{}".format(quote(project_name, safe=""), schedule_id)
return self.make_request(path, json_output=True)
def create_pipeline_schedule(self, project_name, description, ref, cron, cron_timezone=None, active=None):
"""
Create a new pipeline schedule with given attributes.
"""
path = "/projects/{}/pipeline_schedules".format(quote(project_name, safe=""))
data = {
"description": description,
"ref": ref,
"cron": cron,
"cron_timezone": cron_timezone,
"active": active,
}
no_none_data = {k: v for k, v in data.items() if v is not None}
return self.make_request(path, data=no_none_data, json_output=True, json_input=True)
def edit_pipeline_schedule(
self, project_name, schedule_id, description=None, ref=None, cron=None, cron_timezone=None, active=None
):
"""
Edit an existing pipeline schedule with given attributes.
"""
path = "/projects/{}/pipeline_schedules/{}".format(quote(project_name, safe=""), schedule_id)
data = {
"description": description,
"ref": ref,
"cron": cron,
"cron_timezone": cron_timezone,
"active": active,
}
no_none_data = {k: v for k, v in data.items() if v is not None}
return self.make_request(path, json_output=True, data=no_none_data, method="PUT")
def delete_pipeline_schedule(self, project_name, schedule_id):
"""
Delete an existing pipeline schedule.
"""
path = "/projects/{}/pipeline_schedules/{}".format(quote(project_name, safe=""), schedule_id)
# Gitlab API docs claim that this returns the JSON representation of the deleted schedule,
# but it actually returns an empty string
result = self.make_request(path, json_output=False, method="DELETE")
return "Pipeline schedule deleted; result: {}".format(result if result else "(empty)")
def create_pipeline_schedule_variable(self, project_name, schedule_id, key, value):
"""
Create a variable for an existing pipeline schedule.
"""
path = "/projects/{}/pipeline_schedules/{}/variables".format(quote(project_name, safe=""), schedule_id)
data = {
"key": key,
"value": value,
}
return self.make_request(path, data=data, json_output=True, json_input=True)
def edit_pipeline_schedule_variable(self, project_name, schedule_id, key, value):
"""
Edit an existing variable for a pipeline schedule.
"""
path = "/projects/{}/pipeline_schedules/{}/variables/{}".format(quote(project_name, safe=""), schedule_id, key)
return self.make_request(path, data={"value": value}, json_output=True, method="PUT")
def delete_pipeline_schedule_variable(self, project_name, schedule_id, key):
"""
Delete an existing variable for a pipeline schedule.
"""
path = "/projects/{}/pipeline_schedules/{}/variables/{}".format(quote(project_name, safe=""), schedule_id, key)
return self.make_request(path, json_output=True, method="DELETE")
def find_tag(self, project_name, tag_name):
"""
Look up a tag by its name.
"""
path = "/projects/{}/repository/tags/{}".format(quote(project_name, safe=""), tag_name)
return self.make_request(path, json_output=True)
def make_request(
self, path, headers=None, data=None, json_input=False, json_output=False, stream_output=False, method=None
):
"""
Utility to make a request to the Gitlab API.
headers: A hash of headers to pass to the request.
data: An object containing the body of the request.
json_input: If set to true, data is passed with the json parameter of requests.post instead of the data parameter.
By default, the request method is GET, or POST if data is not empty.
method: Can be set to "POST" to force a POST request even when data is empty.
By default, we return the text field of the response object. The following fields can alter this behavior:
json_output: the json field of the response object is returned.
stream_output: the request asks for a stream response, and the raw response object is returned.
"""
import requests
url = self.BASE_URL + path
headers = dict(headers or [])
headers["PRIVATE-TOKEN"] = self.api_token
# TODO: Use the param argument of requests instead of handling URL params
# manually
try:
# If json_input is true, we specifically want to send data using the json
# parameter of requests.post
if data and json_input:
r = requests.post(url, headers=headers, json=data, stream=stream_output)
elif method == "PUT":
r = requests.put(url, headers=headers, json=data, stream=stream_output)
elif method == "DELETE":
r = requests.delete(url, headers=headers, stream=stream_output)
elif data or method == "POST":
r = requests.post(url, headers=headers, data=data, stream=stream_output)
else:
r = requests.get(url, headers=headers, stream=stream_output)
if r.status_code == 401:
print(
"HTTP 401: Your GITLAB_TOKEN may have expired. You can "
"check and refresh it at "
"https://gitlab.ddbuild.io/profile/personal_access_tokens"
)
print("Gitlab says: {}".format(r.json()["error_description"]))
raise Exit(code=1)
except requests.exceptions.Timeout:
print("Connection to GitLab ({}) timed out.".format(url))
raise Exit(code=1)
except requests.exceptions.RequestException as e:
m = errno_regex.match(str(e))
if not m:
print("Unknown error raised connecting to {}: {}".format(url, e))
# Parse errno to give a better explanation
# Requests doesn't have granularity at the level we want:
# http://docs.python-requests.org/en/master/_modules/requests/exceptions/
errno_code = int(m.group(1))
message = m.group(2)
if errno_code == errno.ENOEXEC:
print("Error resolving {}: {}".format(url, message))
elif errno_code == errno.ECONNREFUSED:
print("Connection to Gitlab ({}) refused".format(url))
else:
print("Error while connecting to {}: {}".format(url, str(e)))
raise Exit(code=1)
if json_output:
return r.json()
if stream_output:
return r
return r.text
def _api_token(self):
if "GITLAB_TOKEN" not in os.environ:
print("GITLAB_TOKEN not found in env. Trying keychain...")
if platform.system() == "Darwin":
try:
output = subprocess.check_output(
['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITLAB_TOKEN', '-w']
)
if len(output) > 0:
return output.strip()
except subprocess.CalledProcessError:
print("GITLAB_TOKEN not found in keychain...")
pass
print(
"Please create an 'api' access token at "
"https://gitlab.ddbuild.io/profile/personal_access_tokens and "
"add it as GITLAB_TOKEN in your keychain "
"or export it from your .bashrc or equivalent."
)
raise Exit(code=1)
return os.environ["GITLAB_TOKEN"]
| []
| []
| [
"USER",
"GITLAB_TOKEN"
]
| [] | ["USER", "GITLAB_TOKEN"] | python | 2 | 0 | |
analytics/circleci_analyze.py | #!/usr/bin/env python3.7
from datetime import datetime, time
import json
import requests
import itertools
import sqlite3
import os
import sys
from typing import Callable, Dict, Generator, List, MutableSet, Optional
def get_executor_price_rate(executor):
(etype, eclass) = executor['type'], executor['resource_class']
assert etype in ['machine', 'external', 'docker', 'macos', 'runner'], f'Unexpected type {etype}:{eclass}'
if etype == 'machine':
return {
'medium': 10,
'large': 20,
'xlarge': 100,
'2xlarge': 200,
'gpu.medium': 160,
'gpu.large': 320,
'gpu.small': 80,
'windows.medium': 40,
'windows.large': 120,
'windows.xlarge': 210,
'windows.2xlarge': 500,
'windows.gpu.nvidia.medium': 500,
'gpu.nvidia.small': 160,
'gpu.nvidia.medium': 240,
'gpu.nvidia.large': 1000,
}[eclass]
if etype == 'macos':
return {
'medium': 50,
'large': 100,
}[eclass]
if etype == 'docker':
return {
'small': 5,
'medium': 10,
'medium+': 15,
'large': 20,
'xlarge': 40,
'2xlarge': 80,
'2xlarge+': 100,
}[eclass]
if etype == 'runner' or etype == 'external':
return {
'pytorch/amd-gpu': 0,
}[eclass]
raise RuntimeError(f'Undefined executor {etype}:{eclass}')
price_per_credit = 6e-4
def get_circleci_token() -> str:
token_file_path = os.path.join(os.getenv('HOME'), '.circleci_token')
token = os.getenv('CIRCLECI_TOKEN')
if token is not None:
return token
if not os.path.exists(token_file_path):
raise RuntimeError('Can not get CirclCI token'
' neither from CIRCLECI_TOKEN environment variable,'
' nor via ~/.circleci_token file')
with open(token_file_path) as f:
return f.read().strip()
def is_workflow_in_progress(workflow: Dict) -> bool:
return workflow['status'] in ['running', 'not_run', 'failing', 'on_hold']
def str2date(val: str) -> datetime:
assert val is not None
return datetime.fromisoformat(val[:-1] if val.endswith('Z') else val)
class CircleCICache:
def __init__(self, token: Optional[str], db_name: str = 'circleci-cache.db') -> None:
file_folder = os.path.dirname(__file__)
self.url_prefix = 'https://circleci.com/api/v2'
self.session = requests.session()
self.headers = {
'Accept': 'application/json',
'Circle-Token': token,
} if token is not None else None
self.db = sqlite3.connect(os.path.join(file_folder, db_name))
self.db.execute('CREATE TABLE IF NOT EXISTS jobs(slug TEXT NOT NULL, job_id INTEGER NOT NULL, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS artifacts(slug TEXT NOT NULL, job_id INTEGER NOT NULL, json TEXT NOT NULL);')
self.db.execute('CREATE UNIQUE INDEX IF NOT EXISTS jobs_key on jobs(slug, job_id);')
self.db.execute('CREATE TABLE IF NOT EXISTS workflows(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS pipeline_workflows(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS pipelines(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL, branch TEXT, revision TEXT);')
self.db.commit()
def is_offline(self) -> bool:
return self.headers is None
def _get_paged_items_list(self, url: str, params: Optional[Dict] = None, item_count: Optional[int] = -1) -> List:
rc, token, run_once = [], None, False
def _should_quit():
nonlocal run_once, rc, token
if not run_once:
run_once = True
return False
if token is None:
return True
if item_count is None:
return True
return item_count >= 0 and len(rc) >= item_count
if params is None:
params = {}
while not _should_quit():
if token is not None:
params['page-token'] = token
r = self.session.get(url, params=params, headers=self.headers)
try:
j = r.json()
except json.JSONDecodeError:
print(f"Failed to decode {rc}", file=sys.stderr)
raise
if 'message' in j:
raise RuntimeError(f'Failed to get list from {url}: {j["message"]}')
token = j['next_page_token']
rc.extend(j['items'])
return rc
def get_pipelines(self, project: str = 'github/pytorch/pytorch', branch: Optional[str] = None, item_count: Optional[int] = None) -> List:
if self.is_offline():
c = self.db.cursor()
cmd = "SELECT json from pipelines"
if branch is not None:
cmd += f" WHERE branch='{branch}'"
if item_count is not None and item_count > 0:
cmd += f" LIMIT {item_count}"
c.execute(cmd)
return [json.loads(val[0]) for val in c.fetchall()]
rc = self._get_paged_items_list(f'{self.url_prefix}/project/{project}/pipeline', {'branch': branch} if branch is not None else {}, item_count)
for pipeline in rc:
vcs = pipeline['vcs']
pid, branch, revision, pser = pipeline['id'], vcs['branch'], vcs['revision'], json.dumps(pipeline)
self.db.execute("INSERT OR REPLACE INTO pipelines(id, branch, revision, json) VALUES (?, ?, ?, ?)", (pid, branch, revision, pser))
self.db.commit()
return rc
def get_pipeline_workflows(self, pipeline) -> List:
c = self.db.cursor()
c.execute("SELECT json FROM pipeline_workflows WHERE id=?", (pipeline,))
rc = c.fetchone()
if rc is not None:
rc = json.loads(rc[0])
if not any(is_workflow_in_progress(w) for w in rc) or self.is_offline():
return rc
if self.is_offline():
return []
rc = self._get_paged_items_list(f'{self.url_prefix}/pipeline/{pipeline}/workflow')
self.db.execute("INSERT OR REPLACE INTO pipeline_workflows(id, json) VALUES (?, ?)", (pipeline, json.dumps(rc)))
self.db.commit()
return rc
def get_workflow_jobs(self, workflow, should_cache=True) -> List:
c = self.db.cursor()
c.execute("select json from workflows where id=?", (workflow,))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return []
rc = self._get_paged_items_list(f'{self.url_prefix}/workflow/{workflow}/job')
if should_cache:
self.db.execute("INSERT INTO workflows(id, json) VALUES (?, ?)", (workflow, json.dumps(rc)))
self.db.commit()
return rc
def get_job(self, project_slug, job_number) -> Dict:
c = self.db.cursor()
c.execute("select json from jobs where slug=? and job_id = ?", (project_slug, job_number))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return {}
r = self.session.get(f'{self.url_prefix}/project/{project_slug}/job/{job_number}', headers=self.headers)
try:
rc = r.json()
except json.JSONDecodeError:
print(f"Failed to decode {rc}", file=sys.stderr)
raise
self.db.execute("INSERT INTO jobs(slug,job_id, json) VALUES (?, ?, ?)", (project_slug, job_number, json.dumps(rc)))
self.db.commit()
return rc
def get_job_artifacts(self, project_slug, job_number) -> List[Dict]:
c = self.db.cursor()
c.execute("select json from artifacts where slug=? and job_id = ?", (project_slug, job_number))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return [{}]
rc = self._get_paged_items_list(f"{self.url_prefix}/project/{project_slug}/{job_number}/artifacts")
self.db.execute("INSERT INTO artifacts(slug,job_id, json) VALUES (?, ?, ?)", (project_slug, job_number, json.dumps(rc)))
self.db.commit()
return rc
def get_pipeline_jobs(self, project: str = 'github/pytorch/pytorch', branch: Optional[str] = None, item_count: Optional[int] = None) -> Generator:
for pipeline in self.get_pipelines(project, branch, item_count):
for workflow in self.get_pipeline_workflows(pipeline['id']):
in_progress = is_workflow_in_progress(workflow)
for job in self.get_workflow_jobs(workflow['id'], should_cache=not in_progress):
yield (pipeline, workflow, job)
def get_jobs_summary(self, slug='gh/pytorch/pytorch', workflow='build') -> Dict:
items = self._get_paged_items_list(f'{self.url_prefix}/insights/{slug}/workflows/{workflow}/jobs')
return {item['name']: item for item in items}
def get_job_timeseries(self, job_name: str,
slug: str = 'gh/pytorch/pytorch',
workflow: str = 'build',
branch: Optional[str] = None) -> List:
params = {'branch': branch} if branch is not None else {}
items = self._get_paged_items_list(f'{self.url_prefix}/insights/{slug}/workflows/build/jobs/{job_name}', params)
return [(str2date(x['started_at']), x['duration']) for x in items if x['status'] == 'success']
def aggregate_by_day(series):
rc = {}
for (ts, val) in series:
date = datetime.combine(ts.date(), time())
valcount = [val, 1.0]
if date not in rc:
rc[date] = valcount
else:
rc[date] = [sum(x) for x in zip(rc[date], valcount)]
return [(x, rc[x][0] / rc[x][1]) for x in sorted(rc.keys())]
def filter_names(names: List[str], name_filter: Optional[str] = None) -> List[str]:
import re
if name_filter is None:
return names
filters = name_filter.split(",")
return [name for name in names if any(re.match(filter, name) for filter in filters)]
def common_prefix(names: List[str]) -> str:
if len(names) == 0 or len(names[0]) == 0:
return ''
if len(names) == 1:
return names[0]
rc = names[0][0]
while rc != names[0] and all(name.startswith(rc) for name in names[1:]):
rc = names[0][:len(rc) + 1]
return rc[:-1]
def plot_graph(name_filter: Optional[str] = None,
output_file: Optional[str] = None,
branch: Optional[str] = None) -> None:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
ci_cache = CircleCICache(token=get_circleci_token())
summary = ci_cache.get_jobs_summary()
test_jobs = [name for name in summary.keys() if name.startswith('pytorch') and 'test' in name]
filtered_jobs = filter_names(test_jobs, name_filter)
prefix = common_prefix(filtered_jobs)
if len(filtered_jobs) == 0:
print(f'Filter "{name_filter}" does not match to any of {test_jobs}')
return
series = []
labels = []
styles = [f'{color}{style}' for (style, color) in itertools.product(['-', '--', '-.', ':'], ['b', 'g', 'r', 'c', 'm', 'y', 'k'])]
fig, ax = plt.subplots()
for name in test_jobs:
label = f"{name}(p95 = {int(summary[name]['metrics']['duration_metrics']['p95']/60)} min)"
if name not in filtered_jobs:
print(label)
continue
ts = ci_cache.get_job_timeseries(name, branch=branch)
if len(ts) == 0:
print(f'{label} time series is empty!')
continue
print(f'{label} time series has {len(ts)} elements')
labels.append(label[len(prefix):])
series.append(ts)
x, y = zip(*aggregate_by_day(ts))
plt.plot(x, [i / 60.0 for i in y], styles[len(labels) % len(styles)])
plt.legend(labels, loc='upper left')
plt.title(f'{prefix} timeseries')
ax.set_ylabel("Duration (m)")
# Format date
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
# Rotate tick labels
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
if output_file is not None:
plt.savefig(output_file)
else:
plt.show()
def print_line(line: str, padding: Optional[int] = None, newline: bool = True) -> None:
if padding is not None and len(line) < padding:
line += ' ' * (padding - len(line))
print(line, end='\n' if newline else '\r', flush=True)
def fetch_status(branch=None, item_count=50):
isatty = sys.stdout.isatty()
padding = os.get_terminal_size().columns - 1 if isatty else None
ci_cache = CircleCICache(token=get_circleci_token())
print(f"About to fetch {item_count} latest pipelines against {branch if branch is not None else 'all branches'}")
pipelines = ci_cache.get_pipelines(branch=branch, item_count=item_count)
total_price, total_master_price = 0, 0
for pipeline_idx, pipeline in enumerate(pipelines):
revision = pipeline['vcs']['revision']
branch = pipeline['vcs']['branch']
workflows = ci_cache.get_pipeline_workflows(pipeline['id'])
known_job_ids = []
for workflow in workflows:
url = f'https://app.circleci.com/pipelines/github/pytorch/pytorch/{workflow["pipeline_number"]}/workflows/{workflow["id"]}'
if is_workflow_in_progress(workflow):
print_line(f'Skipping {url} name:{workflow["name"]} status:{workflow["status"]}',
newline=not sys.stdout.isatty())
continue
rerun = False
total_credits, test_credits, gpu_credits, wincpu_credits, wingpu_credits = 0, 0, 0, 0, 0
jobs = ci_cache.get_workflow_jobs(workflow['id'])
for job in jobs:
job_name, job_status, job_number = job['name'], job['status'], job.get('job_number', None)
if job_status in ['blocked', 'canceled', 'unauthorized', 'running', 'not_run', 'failing']:
continue
if job_number is None:
print(job)
continue
if job_number in known_job_ids:
rerun = True
continue
job_info = ci_cache.get_job(job['project_slug'], job_number)
if 'executor' not in job_info:
print(f'executor not found in {job_info}')
continue
job_executor = job_info['executor']
resource_class = job_executor['resource_class']
if resource_class is None:
print(f'resource_class is none for {job_info}')
continue
job_on_gpu = 'gpu' in resource_class
job_on_win = 'windows' in resource_class
if job_status != 'infrastructure_fail':
duration = str2date(job_info['stopped_at']) - str2date(job_info['started_at'])
job_credits = get_executor_price_rate(job_executor) * int(job_info['duration']) * 1e-3 / 60
else:
job_credits, duration = 0, 0
job_cost = job_credits * price_per_credit
total_credits += job_credits
if 'test' in job_name or job_name.startswith('smoke_'):
test_credits += job_credits
elif job_on_gpu:
print(f'Running build job {job_name} on GPU!!!')
if job_on_gpu:
gpu_credits += job_credits
if job_on_win:
wingpu_credits += job_credits
if job_on_win and not job_on_gpu:
wincpu_credits += job_credits
known_job_ids.append(job_number)
print_line(f' {job_name} {job_status} {duration} ${job_cost:.2f}',
padding=padding, newline=not isatty)
# Increment totals
total_price += total_credits * price_per_credit
if branch in ['master', 'nightly', 'postnightly', 'release/1.6']:
total_master_price += total_credits * price_per_credit
# skip small jobs
if total_credits * price_per_credit < .1:
continue
workflow_status = f'[{pipeline_idx}/{len(pipelines)}]'
workflow_status += f' {url} {workflow["name"]} status:{workflow["status"]}'
workflow_status += f' price: ${total_credits * price_per_credit:.2f}'
workflow_status += ' (Rerun?)' if rerun else ''
workflow_status += f'\n\t\tdate: {workflow["created_at"]} branch:{branch} revision:{revision}'
workflow_status += f'\n\t\ttotal credits: {int(total_credits)}'
if test_credits != 0:
workflow_status += f' testing: {100 * test_credits / total_credits:.1f}%'
if gpu_credits != 0:
workflow_status += f' GPU testing: {100 * gpu_credits / total_credits:.1f}%'
if wingpu_credits != 0:
workflow_status += f' WINGPU/GPU: {100 * wingpu_credits / gpu_credits:.1f}%'
if wincpu_credits != 0:
workflow_status += f' Win CPU: {100 * wincpu_credits / total_credits:.1f}%'
workflow_status += f' Total: ${total_price:.2f} master fraction: {100 * total_master_price/ total_price:.1f}%'
print_line(workflow_status, padding=padding)
def plot_heatmap(cov_matrix, names):
import numpy as np
import matplotlib.pyplot as plt
assert cov_matrix.shape == (len(names), len(names))
fig, ax = plt.subplots()
ax.imshow(cov_matrix)
ax.set_xticks(np.arange(len(names)))
ax.set_yticks(np.arange(len(names)))
ax.set_xticklabels(names)
ax.set_yticklabels(names)
# Rotate tick labels
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
# Annotate values
for i in range(len(names)):
for j in range(len(names)):
ax.text(j, i, f'{cov_matrix[i, j]:.2f}', ha='center', va='center', color='w')
plt.show()
def filter_service_jobs(name):
if name.startswith('docker'):
return True
if name.startswith('binary'):
return True
return False
def filter_cuda_test(name):
if filter_service_jobs(name):
return False
if 'libtorch' in name:
return False
if 'test' not in name:
return False
# Skip jit-profiling tests
if 'jit-profiling' in name:
return False
if 'cuda11' in name:
return False
# Skip VS2017 tests
if 'vs2017' in name:
return False
return 'cuda' in name and 'nogpu' not in name
def filter_cuda_build(name):
if filter_service_jobs(name):
return False
if 'libtorch' in name:
return False
return 'cuda' in name and name.endswith('build')
def filter_windows_test(name):
if filter_service_jobs(name):
return False
# Skip jit-profiling tests
if 'jit-profiling' in name:
return False
return 'test' in name and 'windows' in name
def compute_covariance(branch='master', name_filter: Optional[Callable[[str], bool]] = None):
import numpy as np
revisions: MutableSet[str] = set()
job_summary: Dict[str, Dict[str, float]] = {}
# Extract data
print(f"Computing covariance for {branch if branch is not None else 'all branches'}")
ci_cache = CircleCICache(None)
pipelines = ci_cache.get_pipelines(branch=branch)
for pipeline in pipelines:
if pipeline['trigger']['type'] == 'schedule':
continue
revision = pipeline['vcs']['revision']
pipeline_jobs: Dict[str, float] = {}
blocked_jobs: MutableSet[str] = set()
workflows = ci_cache.get_pipeline_workflows(pipeline['id'])
for workflow in workflows:
if is_workflow_in_progress(workflow):
continue
jobs = ci_cache.get_workflow_jobs(workflow['id'])
for job in jobs:
job_name = job['name']
job_status = job['status']
# Handle renames
if job_name == 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_NO_AVX2_test':
job_name = 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_nogpu_NO_AVX2_test'
if job_name == 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_NO_AVX_NO_AVX2_test':
job_name = 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_nogpu_NO_AVX_test'
if job_status in ['infrastructure_fail', 'canceled']:
continue
if callable(name_filter) and not name_filter(job_name):
continue
if job_status == 'blocked':
blocked_jobs.add(job_name)
continue
if job_name in blocked_jobs:
blocked_jobs.remove(job_name)
result = 1.0 if job_status == 'success' else -1.0
pipeline_jobs[job_name] = result
# Skip build with blocked job [which usually means build failed due to the test failure]
if len(blocked_jobs) != 0:
continue
# Skip all success workflows
if all(result == 1.0 for result in pipeline_jobs.values()):
continue
revisions.add(revision)
for job_name in pipeline_jobs:
if job_name not in job_summary:
job_summary[job_name] = {}
job_summary[job_name][revision] = pipeline_jobs[job_name]
# Analyze results
job_names = sorted(job_summary.keys())
# revisions = sorted(revisions)
job_data = np.zeros((len(job_names), len(revisions)), dtype=np.float)
print(f"Number of observations: {len(revisions)}")
for job_idx, job_name in enumerate(job_names):
job_row = job_summary[job_name]
for rev_idx, revision in enumerate(revisions):
if revision in job_row:
job_data[job_idx, rev_idx] = job_row[revision]
success_rate = job_data[job_idx, ].sum(where=job_data[job_idx, ] > 0.0) / len(job_row)
present_rate = 1.0 * len(job_row) / len(revisions)
print(f"{job_name}: missing {100.0 * (1.0 - present_rate):.2f}% success rate: {100 * success_rate:.2f}%")
cov_matrix = np.corrcoef(job_data)
plot_heatmap(cov_matrix, job_names)
def print_artifacts(branch, item_count, name_filter: Callable[[str], bool]) -> None:
ci_cache = CircleCICache(token=get_circleci_token())
for pipeline, _, job in ci_cache.get_pipeline_jobs(branch=branch, item_count=item_count):
revision = pipeline['vcs']['revision']
if not name_filter(job["name"]):
continue
job_number = job.get("job_number")
if job_number is None:
continue
artifacts = ci_cache.get_job_artifacts('gh/pytorch/pytorch', job_number)
for artifact in artifacts:
name = os.path.basename(artifact['path'])
url = artifact["url"]
print(f"{revision} {name} {url}")
def print_duration(branch, item_count, name_filter: Callable[[str], bool]) -> None:
ci_cache = CircleCICache(token=get_circleci_token())
for pipeline, workflow, job in ci_cache.get_pipeline_jobs(branch=branch, item_count=item_count):
job_name, job_status, job_number = job['name'], job['status'], job.get("job_number")
revision = pipeline['vcs']['revision']
if not name_filter(job_name) or job_number is None:
continue
if job_status in ['blocked', 'canceled', 'unauthorized', 'running', 'not_run', 'failing']:
continue
started_at = str2date(job['started_at'])
stopped_at = str2date(job['stopped_at'])
duration = stopped_at - started_at
print(f"{job_name} {revision} {duration} {started_at}")
def parse_arguments():
from argparse import ArgumentParser
parser = ArgumentParser(description="Download and analyze circle logs")
parser.add_argument('--plot-graph', type=str, nargs='?', help="Plot job time trends", const='')
parser.add_argument('--output', type=str, help="Output file name for the graphs")
parser.add_argument('--get_artifacts', type=str)
parser.add_argument('--print-duration', type=str)
parser.add_argument('--branch', type=str)
parser.add_argument('--item_count', type=int, default=100)
parser.add_argument('--compute_covariance', choices=['cuda_test', 'cuda_build', 'windows_test'])
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
if args.get_artifacts is not None:
print_artifacts(branch=args.branch,
item_count=args.item_count,
name_filter=lambda x: args.get_artifacts in x)
sys.exit(0)
if args.print_duration is not None:
print_duration(branch=args.branch,
item_count=args.item_count,
name_filter=lambda x: args.print_duration in x)
sys.exit(0)
if args.compute_covariance is not None:
name_filter = {
'cuda_test': filter_cuda_test,
'cuda_build': filter_cuda_build,
'windows_test': filter_windows_test,
}[args.compute_covariance]
compute_covariance(branch=args.branch, name_filter=name_filter)
sys.exit(0)
if args.plot_graph is not None:
plot_graph(args.plot_graph, args.output, args.branch)
sys.exit(0)
fetch_status(branch=args.branch, item_count=args.item_count)
| []
| []
| [
"CIRCLECI_TOKEN",
"HOME"
]
| [] | ["CIRCLECI_TOKEN", "HOME"] | python | 2 | 0 | |
contrib/devtools/symbol-check.py | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function, unicode_literals
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# scicoind and scicoin-qt
b'libgcc_s.so.1', # GCC base support
b'libc.so.6', # C library
b'libpthread.so.0', # threading
b'libanl.so.1', # DNS resolve
b'libm.so.6', # math library
b'librt.so.1', # real-time (clock)
b'ld-linux-x86-64.so.2', # 64-bit dynamic linker
b'ld-linux.so.2', # 32-bit dynamic linker
# scicoin-qt only
b'libX11-xcb.so.1', # part of X11
b'libX11.so.6', # part of X11
b'libxcb.so.1', # part of X11
b'libfontconfig.so.1', # font support
b'libfreetype.so.6', # font parsing
b'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + b'\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if b'_' in version:
(lib, _, ver) = version.rpartition(b'_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split(b'.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>2 and tokens[1] == b'(NEEDED)':
match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8')))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8')))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8')))
retval = 1
exit(retval)
| []
| []
| [
"READELF",
"CPPFILT"
]
| [] | ["READELF", "CPPFILT"] | python | 2 | 0 | |
img/img.go | // Copyright 2017 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// Package img implements OS image related functionality for micro computers.
//
// It includes fetching images and flashing them on an SDCard.
//
// It includes gathering environmental information, like the current country
// and location on the host to enable configuring the board with the same
// settings.
package img // import "periph.io/x/bootstrap/img"
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"time"
"github.com/DHowett/go-plist"
)
// GetTimeLocation returns the time location, e.g. America/Toronto.
//
// This is then used by Debian to figure out the right timezone (e.g. EST/EDT)
// based on the location via tables.
//
// We didn't find a way on Windows to retrieve the local "time zone location"
// in expected format. In the meantime, "Etc/UTC" is returned on Windows.
func GetTimeLocation() string {
// OSX and Ubuntu
if d, _ := os.Readlink("/etc/localtime"); len(d) != 0 {
const p = "/usr/share/zoneinfo/"
if strings.HasPrefix(d, p) {
return d[len(p):]
}
}
// systemd
if d, _ := exec.Command("timedatectl").Output(); len(d) != 0 {
re := regexp.MustCompile(`(?m)Time zone\: ([^\s]+)`)
if match := re.FindSubmatch(d); len(match) != 0 {
return string(match[1])
}
}
return "Etc/UTC"
}
// GetCountry returns the automatically detected country.
//
// WARNING: This causes an outgoing HTTP request.
func GetCountry() string {
// TODO(maruel): Ask the OS first if possible.
b, err := fetchURL("https://ipinfo.io/country")
if err != nil {
return ""
}
return strings.TrimSpace(string(b))
}
// GetSetupSH returns the content of setup.sh.
//
// Returns nil in case of catastrophic error.
func GetSetupSH() []byte {
var p []string
if v, err := os.Getwd(); err == nil {
p = append(p, v)
}
if gp := os.Getenv("GOPATH"); len(gp) != 0 {
for _, v := range strings.Split(gp, string(os.PathListSeparator)) {
p = append(p, filepath.Join(v, "go", "src", "periph.io", "x", "bootstrap"))
}
} else {
p = append(p, filepath.Join(getHome(), "go", "src", "periph.io", "x", "bootstrap"))
}
for _, v := range p {
b, err := ioutil.ReadFile(filepath.Join(v, "setup.sh"))
if err == nil && len(b) != 0 {
return b
}
}
b, _ := fetchURL("https://raw.githubusercontent.com/periph/bootstrap/master/setup.sh")
return b
}
// FindPublicKey returns the absolute path to a public key for the user, if any.
func FindPublicKey() string {
home := getHome()
for _, i := range []string{"authorized_keys", "id_ed25519.pub", "id_ecdsa.pub", "id_rsa.pub"} {
p := filepath.Join(home, ".ssh", i)
if f, _ := os.Open(p); f != nil {
f.Close()
return p
}
}
return ""
}
// ListSDCards returns the SD cards found.
//
// Returns nil in case of error.
func ListSDCards() []string {
switch runtime.GOOS {
case "linux":
return listSDCardsLinux()
case "darwin":
return listSDCardsOSX()
case "windows":
return listSDCardsWindows()
default:
return nil
}
}
// Flash flashes imgPath to disk.
//
// Before flashing, it unmounts any partition mounted on disk.
func Flash(imgPath, disk string) error {
if err := Umount(disk); err != nil {
return nil
}
switch runtime.GOOS {
case "darwin":
if err := ddFlash(imgPath, toRawDiskOSX(disk)); err != nil {
return err
}
time.Sleep(time.Second)
// Assumes this image has at least one partition.
p := disk + "s1"
for {
if _, err := os.Stat(p); err == nil {
break
}
fmt.Printf(" (still waiting for partition %s to show up)\n", p)
time.Sleep(time.Second)
}
return nil
case "linux":
if err := ddFlash(imgPath, disk); err != nil {
return err
}
// Wait a bit to try to workaround "Error looking up object for device" when
// immediately using "/usr/bin/udisksctl mount" after this script.
time.Sleep(time.Second)
// Needs suffix 'p' for /dev/mmcblkN but not for /dev/sdX
p := disk
if strings.Contains(p, "mmcblk") {
p += "p"
}
// Assumes this image has at least one partition.
p += "1"
for {
if _, err := os.Stat(p); err == nil {
break
}
fmt.Printf(" (still waiting for partition %s to show up)\n", p)
time.Sleep(time.Second)
}
return nil
case "windows":
return flashWindows(imgPath, disk)
default:
return errors.New("Flash() is not implemented on this OS")
}
}
// Mount mounts a partition number n on disk p and returns the mount path.
func Mount(disk string, n int) (string, error) {
switch runtime.GOOS {
case "darwin":
// diskutil doesn't report which volume was mounted, so look at the ones
// before and the ones after and hope for the best.
before, err := getMountedVolumesOSX()
if err != nil {
return "", err
}
mnt := fmt.Sprintf("%ss%d", disk, n)
log.Printf("- Mounting %s", mnt)
if _, err = capture("", "diskutil", "mountDisk", mnt); err != nil {
return "", err
}
after, err := getMountedVolumesOSX()
if err != nil {
return "", err
}
if len(before)+1 != len(after) {
return "", errors.New("unexpected number of mounted drives")
}
found := ""
for i, a := range after {
if i == len(before) || a != before[i] {
found = "/Volumes/" + a
break
}
}
log.Printf(" Mounted as %s", found)
return found, nil
case "linux":
// Needs 'p' for /dev/mmcblkN but not for /dev/sdX
if strings.Contains(disk, "mmcblk") {
disk += "p"
}
mnt := fmt.Sprintf("%s%d", disk, n)
log.Printf("- Mounting %s", mnt)
// TODO(maruel): This assumes Ubuntu.
txt, _ := capture("", "/usr/bin/udisksctl", "mount", "-b", mnt)
if match := reMountLinux1.FindStringSubmatch(txt); len(match) != 0 {
log.Printf(" Mounted as %s", match[1])
return match[1], nil
}
if match := reMountLinux2.FindStringSubmatch(txt); len(match) != 0 {
log.Printf(" Mounted as %s", match[1])
return match[1], nil
}
return "", fmt.Errorf("failed to mount %q: %q", mnt, txt)
case "windows":
return mountWindows(disk, n)
default:
return "", errors.New("Mount() is not implemented on this OS")
}
}
// Umount unmounts all the partitions on disk 'disk'.
func Umount(disk string) error {
switch runtime.GOOS {
case "darwin":
log.Printf("- Unmounting %s", disk)
_, _ = capture("", "diskutil", "unmountDisk", disk)
return nil
case "linux":
matches, err := filepath.Glob(disk + "*")
if err != nil {
return err
}
sort.Strings(matches)
for _, m := range matches {
if m != disk {
// TODO(maruel): This assumes Ubuntu.
log.Printf("- Unmounting %s", m)
if _, err1 := capture("", "/usr/bin/udisksctl", "unmount", "-f", "-b", m); err == nil {
err = err1
}
}
}
return nil
case "windows":
return umountWindows(disk)
default:
return errors.New("Umount() is not implemented on this OS")
}
}
//
// run runs a command.
func run(name string, arg ...string) error {
log.Printf("run(%s %s)", name, strings.Join(arg, " "))
cmd := exec.Command(name, arg...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// capture runs a command and return the stdout and stderr merged.
func capture(in, name string, arg ...string) (string, error) {
//log.Printf("capture(%s %s)", name, strings.Join(arg, " "))
cmd := exec.Command(name, arg...)
cmd.Stdin = strings.NewReader(in)
out, err := cmd.CombinedOutput()
return string(out), err
}
func getHome() string {
if usr, err := user.Current(); err == nil && len(usr.HomeDir) != 0 {
return usr.HomeDir
}
return os.Getenv("HOME")
}
func ddFlash(imgPath, dst string) error {
fmt.Printf("- Flashing (takes 2 minutes)\n")
// OSX uses 'M' but Ubuntu uses 'm' but using numbers works everywhere.
if err := run("sudo", "dd", fmt.Sprintf("bs=%d", 4*1024*1024), "if="+imgPath, "of="+dst); err != nil {
return err
}
if runtime.GOOS != "darwin" {
// Tells the OS to wake up with the fact that the partitions changed. It's
// fine even if the cache is not written to the disk yet, as the cached
// data is in the OS cache. :)
if err := run("sudo", "partprobe"); err != nil {
return err
}
}
// This step may take a while for writeback cache.
fmt.Printf("- Flushing I/O cache\n")
if err := run("sudo", "sync"); err != nil {
return err
}
return nil
}
// Linux
var (
// "Mounted /dev/sdh2 at /media/<user>/<GUID>."
reMountLinux1 = regexp.MustCompile(`Mounted (?:[^ ]+) at ([^\\]+)\..*`)
// "Error mounting /dev/sdh2: GDBus.Error:org.freedesktop.UDisks2.Error.AlreadyMounted: Device /dev/sdh2"
// "is already mounted at `/media/<user>/<GUID>'.
reMountLinux2 = regexp.MustCompile(`is already mounted at ` + "`" + `([^\']+)\'`)
)
type lsblk struct {
BlockDevices []struct {
Name string
MajMin string `json:"maj:min"`
RM string
Size string
RO string
Type string
MountPoint string
}
}
func listSDCardsLinux() []string {
b, err := capture("", "lsblk", "--json")
if err != nil {
return nil
}
v := lsblk{}
err = json.Unmarshal([]byte(b), &v)
if err != nil {
return nil
}
var out []string
for _, dev := range v.BlockDevices {
if dev.RM == "1" && dev.RO == "0" && dev.Type == "disk" {
out = append(out, "/dev/"+dev.Name)
}
}
return out
}
// OSX
type diskutilList struct {
AllDisks []string
AllDisksAndPartitions []struct {
Content string
DeviceIdentifier string
Partitions []map[string]interface{}
MountPoint string
Size int64
VolumeName string
}
VolumesFromDisks []string
WholeDisks []string
}
type diskutilInfo struct {
Bootable bool
BusProtocol string
CanBeMadeBootable bool
CanBeMadeBootableRequiresDestroy bool
content string
DeviceBlockSize int64
DeviceIdentifier string
DeviceNode string
DeviceTreePath string
Ejectable bool
EjectableMediaAutomaticUnderSoftwareControl bool
EjectableOnly bool
FreeSpace int64
GlobalPermissionsEnabled bool
IOKitSize int64
IORegistryEntryName string
Internal bool
LowLevelFormatSupported bool
MediaName string
MediaType string
MountPoint string
OS9DriversInstalled bool
ParentWholeDisk string
RAIDMaster bool
RAIDSlice bool
Removable bool
RemovableMedia bool
RemovableMediaOrExternalDevice bool
SMARTStatus string
Size int64
SupportsGlobalPermissionsDisable bool
SystemImage bool
TotalSize int64
VirtualOrPhysical string
VolumeName string
VolumeSize int64
WholeDisk bool
Writable bool
WritableMedia bool
WritableVolume bool
}
func listSDCardsOSX() []string {
b, err := capture("", "diskutil", "list", "-plist")
if err != nil {
return nil
}
disks := diskutilList{}
_, err = plist.Unmarshal([]byte(b), &disks)
if err != nil {
return nil
}
var out []string
for _, d := range disks.WholeDisks {
b, err = capture("", "diskutil", "info", "-plist", d)
if err != nil {
continue
}
info := diskutilInfo{}
_, err = plist.Unmarshal([]byte(b), &info)
if err != nil {
continue
}
if info.RemovableMedia && info.Writable {
out = append(out, info.DeviceNode)
}
}
return out
}
// toRawDiskOSX replaces a path to a buffered disk to the raw equivalent device
// node.
//
// rdisk is several times faster than disk.
func toRawDiskOSX(p string) string {
const prefix = "/dev/disk"
if strings.HasPrefix(p, prefix) {
return "/dev/rdisk" + p[len(prefix):]
}
return p
}
func getMountedVolumesOSX() ([]string, error) {
f, err := os.Open("/Volumes")
if err != nil {
return nil, err
}
defer f.Close()
all, err := f.Readdir(-1)
if err != nil {
return nil, err
}
var actual []string
for _, f := range all {
if f.Mode()&os.ModeSymlink == 0 {
actual = append(actual, f.Name())
}
}
sort.Strings(actual)
return actual, nil
}
| [
"\"GOPATH\"",
"\"HOME\""
]
| []
| [
"GOPATH",
"HOME"
]
| [] | ["GOPATH", "HOME"] | go | 2 | 0 | |
projects/views.py | import os
from itertools import chain
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.db import models
from django.db.models import Q
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from comanage.comanage_api import create_co_person_role, remove_co_person_role, remove_co_cou
from datasets.models import Dataset, NSTemplate
from infrastructure.models import Infrastructure
from nsmessages.nsmessages import join_project_request, role_added_to_project, role_removed_from_project
from users.models import Affiliation
from users.models import ComanageCou
from users.models import NotaryServiceUser
from workflows import views as wf_views
from workflows.models import WorkflowNeo4j
from .forms import ProjectCreateForm, ProjectUpdateStaffForm, ProjectUpdatePiForm, \
ProjectUpdateAdminForm, ProjectUpdateInfrastructureForm, ProjectUpdateDatasetForm
from .models import Project, MembershipProjectWorkflow
from .projects import create_new_project
from .workflows import create_base_project_workflows, generate_neo4j_user_workflow_status
@login_required()
def projects(request):
"""
Display list of projects based on user role
:param request:
:return:
"""
if request.method == "POST":
if request.user.is_ig():
if request.POST.get("ig-self-assign-project"):
ns_project = Project.objects.filter(
uuid=request.POST.get("project_uuid")
).first()
if ns_project:
ns_project.project_igs.add(request.user)
ns_project.save()
role_added_to_project(request=request, user=request.user,
project=ns_project, role='Institutional Governance')
if request.POST.get("ig-self-unassign-project"):
ns_project = Project.objects.filter(
uuid=request.POST.get("project_uuid")
).first()
if ns_project:
ns_project.project_igs.remove(request.user)
ns_project.save()
role_removed_from_project(request=request, user=request.user,
project=ns_project, role='Institutional Governance')
if request.POST.get("join-project-request"):
ns_project = Project.objects.filter(
uuid=request.POST.get("project_uuid")
).first()
join_project_request(request=request, project=ns_project)
messages.success(request, '[INFO] Request to join Project "{0}" has been sent'.format(ns_project.name))
my_projects = Project.objects.filter(
Q(created_by=request.user.email) |
Q(comanage_pi_admins__in=[request.user]) |
Q(comanage_pi_members__in=[request.user]) |
Q(comanage_staff__in=[request.user]) |
Q(project_igs__in=[request.user]) |
Q(datasets__created_by__in=[request.user]) |
Q(infrastructure__created_by__in=[request.user])
).order_by('name').distinct()
other_projects = Project.objects.all().difference(my_projects).order_by('name')
return render(request, 'projects.html',
{'my_projects': my_projects,
'other_projects': other_projects,
'projects_page': 'active'})
def project_validate(request, project_uuid: str, user: NotaryServiceUser):
"""
Validate project components
:param ds_objs:
:param show_uuid:
:param project_uuid:
:param user:
:return:
"""
# TODO - be more rigorous in what it means to be "valid"
ns_project = Project.objects.filter(uuid=project_uuid).first()
if ns_project:
if not ns_project.affiliation.all():
messages.error(request, '[WARNING] must assign PI or STAFF members first ...')
return False, None
dsets = ns_project.datasets.all()
if dsets:
for ds in dsets:
if not ds.is_valid:
messages.error(request, 'Dataset (' + str(ds.description)[:34] + '..) is not validated')
return False, None
else:
messages.error(request, '[WARNING] Datasets have not been assigned ...')
return False, None
try:
if not ns_project.infrastructure.is_valid:
messages.error(request, 'Infrastructure (' + ns_project.infrastructure.name + ') is not validated')
return False, None
except Exception as e:
print(e)
messages.error(request, '[WARNING] Infrastructure in not assigned ...')
return False, None
create_base_project_workflows(project_uuid, user)
else:
return False, 'Project ' + str(project_uuid) + ' is not found ...'
return True, None
@login_required()
def project_detail(request, uuid):
"""
Show project details based on user role
:param request:
:param uuid:
:return:
"""
project = get_object_or_404(Project, uuid=uuid)
pi_admins = project.comanage_pi_admins.all().order_by('display_name')
pi_members = project.comanage_pi_members.all().order_by('display_name')
staff = project.comanage_staff.all().order_by('display_name')
igs = project.project_igs.all().order_by('display_name')
datasets = project.datasets.all()
dso = [u.owner for u in datasets]
infrastructure = project.infrastructure
if infrastructure:
ifo = [infrastructure.owner]
else:
ifo = []
workflows = project.workflows.all()
aff_set = list(chain(pi_members, staff))
if [True for x in aff_set if request.user.id == x.id]:
is_member = True
else:
is_member = False
affiliation_orig = project.affiliation.all().order_by('co_person_id')
co_person_ids = list(set([x.co_person_id for x in aff_set]))
affiliation = Affiliation.objects.filter(
co_person_id__in=co_person_ids
).order_by('co_person_id')
affiliation_added = list(set(affiliation).difference(set(affiliation_orig)))
affiliation_removed = list(set(affiliation_orig).difference(set(affiliation)))
for a in affiliation_added:
project.affiliation.add(a)
for a in affiliation_removed:
project.affiliation.remove(a)
project.save()
affiliations = project.affiliation.all().order_by('name').values('name').annotate(n=models.Count('pk'))
project_error = None
if request.method == "POST":
if request.POST.get("validate"):
project.is_valid, project_error = project_validate(request, project_uuid=project.uuid, user=request.user)
if request.POST.get("clear-project-datasets"):
# delete workflows
for workflow in workflows:
project.workflows.remove(workflow)
workflow.delete()
# remove datasets / infrastructure
for dataset in datasets:
project.datasets.remove(dataset)
project.is_valid = False
project.save()
return redirect('project_detail', uuid=project.uuid)
generate_neo4j_user_workflow_status(project, request.user)
project.save()
# print('### Project Workflows ###')
# for wf in workflows:
# print('-', wf.uuid, '|', wf.affiliation.name, '|', wf.name)
return render(request, 'project_detail.html', {
'projects_page': 'active',
'project': project,
'affiliations': affiliations,
'project_pi_admins': pi_admins,
'project_pi_members': pi_members,
'project_staff': staff,
'datasets': datasets,
'dataset_owners': dso,
'project_error': project_error,
'workflows': workflows,
'infrastructure': infrastructure,
'infrastructure_owner': ifo,
'institutional_governance': igs,
'is_member': is_member
})
def create_new_workflow(project_obj: Project,
dataset_obj: Dataset,
template_obj: NSTemplate,
user_obj: NotaryServiceUser):
workflow = WorkflowNeo4j.objects.create(
name='neo4j_',
description=template_obj.description,
dataset=dataset_obj,
template=template_obj,
created_by=user_obj,
created_date=timezone.now(),
modified_by=user_obj,
modified_date=timezone.now(),
)
workflow.name = 'neo4j_' + str(workflow.uuid)
workflow.save()
wf_created = wf_views.create_neo4j_workflow(
graphml_file=template_obj.graphml_definition.name,
workflow_uuid=str(workflow.uuid)
)
if wf_created:
workflow.loaded_in_neo4j = True
workflow.save()
project_obj.workflows.add(workflow)
project_obj.save()
MembershipProjectWorkflow.objects.create(
project=project_obj,
dataset=dataset_obj,
template=template_obj,
workflow=workflow,
is_generated=True,
)
def project_new(request):
"""
Create a new project based on existing COmanage groups
:param request:
:return:
"""
if request.method == "POST":
form = ProjectCreateForm(request.POST)
if form.is_valid():
project_name = form.cleaned_data['name']
project_uuid = create_new_project(
request,
project_name=project_name,
project_description=form.cleaned_data['description'],
is_public=form.cleaned_data['is_public']
)
messages.success(request, '[INFO] Project {0} has been created'.format(project_name))
return redirect('project_detail', uuid=project_uuid)
else:
form = ProjectCreateForm()
return render(request, 'project_new.html', {'projects_page': 'active', 'form': form})
@login_required()
def project_edit(request, uuid):
"""
Edit existing project - allow PI_ADMIN to add datasets and infrastructure to projects
:param request:
:param uuid:
:return:
"""
project = get_object_or_404(Project, uuid=uuid)
project_pi_admins = project.comanage_pi_admins.all()
if request.method == "POST":
form = ProjectCreateForm(request.POST, instance=project)
if form.is_valid():
project.name = form.cleaned_data['name']
project.description = form.cleaned_data['description']
project.is_public = form.cleaned_data['is_public']
project.modified_by = request.user.email
project.is_valid = False
project.save()
messages.success(request, '[INFO] Project {0} has been updated'.format(project.name))
return redirect('project_detail', uuid=project.uuid)
else:
form = ProjectCreateForm(instance=project)
return render(request, 'project_edit.html', {'projects_page': 'active', 'form': form, 'project': project,
'project_pi_admins': project_pi_admins})
@login_required()
def project_delete(request, uuid):
"""
Delete existing project and database table relationships
:param request:
:param uuid:
:return:
"""
project = get_object_or_404(Project, uuid=uuid)
affiliations = project.affiliation.all().order_by('name').distinct('name')
comanage_staff = project.comanage_staff.all().order_by('display_name')
comanage_pi_members = project.comanage_pi_members.all().order_by('display_name')
comanage_pi_admins = project.comanage_pi_admins.all().order_by('display_name')
igs = project.project_igs.all().order_by('display_name')
datasets = project.datasets.all().order_by('name')
workflows = project.workflows.all().order_by('affiliation', 'name')
if request.method == "POST":
# delete workflows
for workflow in workflows:
project.workflows.remove(workflow)
workflow.delete()
# remove datasets / infrastructure
for dataset in datasets:
project.datasets.remove(dataset)
if project.infrastructure:
project.infrastructure = None
# remove personnel
staff_cou = ComanageCou.objects.filter(name=str(project.uuid) + os.getenv('COU_FLAG_STAFF')).first()
if staff_cou:
for staff in comanage_staff:
rm_role = remove_co_person_role(co_person_id=staff.co_person_id, co_cou_id=staff_cou.co_cou_id)
if rm_role:
project.comanage_staff.remove(staff)
pi_member_cou = ComanageCou.objects.filter(name=str(project.uuid) + os.getenv('COU_FLAG_PI_MEMBER')).first()
if pi_member_cou:
for pi_member in comanage_pi_members:
rm_role = remove_co_person_role(co_person_id=pi_member.co_person_id, co_cou_id=pi_member_cou.co_cou_id)
if rm_role:
project.comanage_staff.remove(pi_member)
pi_admin_cou = ComanageCou.objects.filter(name=str(project.uuid) + os.getenv('COU_FLAG_PI_ADMIN')).first()
if pi_admin_cou:
for pi_admin in comanage_pi_admins:
rm_role = remove_co_person_role(co_person_id=pi_admin.co_person_id, co_cou_id=pi_admin_cou.co_cou_id)
if rm_role:
project.comanage_staff.remove(pi_admin)
# remove IGs
for ig in igs:
project.project_igs.remove(ig)
# remove affiliations
for aff in affiliations:
project.affiliation.remove(aff)
# delete project
if remove_co_cou(staff_cou) and remove_co_cou(pi_member_cou) and remove_co_cou(pi_admin_cou):
messages.success(
request,
'[INFO] Project "{0}" has been deleted'.format(project.name))
project.delete()
else:
messages.error(
request,
'[ERROR] Failed to delete Project "{0}" ...'.format(project.name))
return redirect('projects')
return render(request, 'project_delete.html', {
'projects_page': 'active',
'project': project,
'affiliations': affiliations,
'project_pi_admins': comanage_pi_admins,
'project_pi_members': comanage_pi_members,
'project_staff': comanage_staff,
'datasets': datasets,
'workflows': workflows,
})
@login_required()
def project_update_staff(request, uuid):
project = get_object_or_404(Project, uuid=uuid)
pi_admins = project.comanage_pi_admins.all()
pi_members = project.comanage_pi_members.all()
comanage_staff_orig = list(project.comanage_staff.all())
if request.method == "POST":
form = ProjectUpdateStaffForm(request.POST, instance=project, request=request)
if form.is_valid():
comanage_staff = list(form.cleaned_data.get('comanage_staff'))
comanage_staff_added = list(set(comanage_staff).difference(set(comanage_staff_orig)))
comanage_staff_removed = list(set(comanage_staff_orig).difference(set(comanage_staff)))
co_cou = ComanageCou.objects.filter(name=str(uuid) + os.getenv('COU_FLAG_STAFF')).first()
# update staff
for staff in comanage_staff_added:
# create co_person role and add ns_role
if create_co_person_role(co_person_id=staff.co_person_id, co_cou_id=co_cou.co_cou_id):
# add user to comanage_staff
project.comanage_staff.add(staff)
role_added_to_project(request=request, user=staff, project=project, role='Staff')
messages.success(
request,
'[INFO] "{0}" added as STAFF to project "{1}"'.format(staff.display_name, project.name))
for staff in comanage_staff_removed:
# remove co_person role and add ns_role
if remove_co_person_role(co_person_id=staff.co_person_id, co_cou_id=co_cou.co_cou_id):
# remove user from comanage_staff
project.comanage_staff.remove(staff)
role_removed_from_project(request=request, user=staff, project=project, role='Staff')
messages.success(
request,
'[INFO] "{0}" removed as STAFF from project "{1}:'.format(staff.display_name, project.name))
project.is_valid = False
project.save()
return redirect('project_detail', uuid=uuid)
else:
form = ProjectUpdateStaffForm(instance=project, request=request)
return render(request, 'project_update_staff.html', {
'form': form,
'projects_page': 'active',
'project': project,
'project_pi_admins': pi_admins,
'project_pi_members': pi_members
})
@login_required()
def project_update_pi(request, uuid):
project = get_object_or_404(Project, uuid=uuid)
pi_admins = project.comanage_pi_admins.all()
pi_members = project.comanage_pi_members.all()
comanage_pi_members_orig = list(project.comanage_pi_members.all())
if request.method == "POST":
form = ProjectUpdatePiForm(request.POST, instance=project, request=request)
if form.is_valid():
comanage_pi_members = list(form.cleaned_data.get('comanage_pi_members'))
comanage_pi_members_added = list(set(comanage_pi_members).difference(set(comanage_pi_members_orig)))
comanage_pi_members_removed = list(set(comanage_pi_members_orig).difference(set(comanage_pi_members)))
co_cou = ComanageCou.objects.filter(name=str(uuid) + os.getenv('COU_FLAG_PI_MEMBER')).first()
# update staff
for pi_member in comanage_pi_members_added:
# create co_person role and add ns_role
if create_co_person_role(co_person_id=pi_member.co_person_id, co_cou_id=co_cou.co_cou_id):
# add user to comanage_staff
project.comanage_pi_members.add(pi_member)
role_added_to_project(request=request, user=pi_member, project=project, role='PI or Co-PI')
messages.success(
request,
'[INFO] "{0}" added as PI to project "{1}"'.format(pi_member.display_name, project.name))
for pi_member in comanage_pi_members_removed:
# remove co_person role and add ns_role
if remove_co_person_role(co_person_id=pi_member.co_person_id, co_cou_id=co_cou.co_cou_id):
# remove user from comanage_staff
project.comanage_pi_members.remove(pi_member)
role_removed_from_project(request=request, user=pi_member, project=project, role='PI or Co-PI')
messages.success(
request,
'[INFO] "{0}" removed as PI from project "{1}:'.format(pi_member.display_name, project.name))
project.is_valid = False
project.save()
return redirect('project_detail', uuid=uuid)
else:
form = ProjectUpdatePiForm(instance=project, request=request)
return render(request, 'project_update_pi.html', {
'form': form,
'projects_page': 'active',
'project': project,
'project_pi_admins': pi_admins,
'project_pi_members': pi_members
})
@login_required()
def project_update_admin(request, uuid):
project = get_object_or_404(Project, uuid=uuid)
pi_admins = project.comanage_pi_admins.all()
pi_members = project.comanage_pi_members.all()
comanage_pi_admins_orig = list(project.comanage_pi_admins.all())
if request.method == "POST":
form = ProjectUpdateAdminForm(request.POST, instance=project)
if form.is_valid():
comanage_pi_admins = list(form.cleaned_data.get('comanage_pi_admins'))
comanage_pi_admins_added = list(set(comanage_pi_admins).difference(set(comanage_pi_admins_orig)))
comanage_pi_admins_removed = list(set(comanage_pi_admins_orig).difference(set(comanage_pi_admins)))
co_cou = ComanageCou.objects.filter(name=str(uuid) + os.getenv('COU_FLAG_PI_ADMIN')).first()
# update staff
for pi_admin in comanage_pi_admins_added:
# create co_person role and add ns_role
if create_co_person_role(co_person_id=pi_admin.co_person_id, co_cou_id=co_cou.co_cou_id):
# add user to comanage_staff
project.comanage_pi_admins.add(pi_admin)
role_added_to_project(request=request, user=pi_admin, project=project, role='Project Manager')
messages.success(
request,
'[INFO] "{0}" added as MANAGER to project "{1}"'.format(pi_admin.display_name, project.name))
for pi_admin in comanage_pi_admins_removed:
# remove co_person role and add ns_role
if remove_co_person_role(co_person_id=pi_admin.co_person_id, co_cou_id=co_cou.co_cou_id):
# remove user from comanage_staff
project.comanage_pi_admins.remove(pi_admin)
role_removed_from_project(request=request, user=pi_admin, project=project, role='Project Manager')
messages.success(
request,
'[INFO] "{0}" removed as MANAGER from project "{1}:'.format(pi_admin.display_name,
project.name))
project.is_valid = False
project.save()
return redirect('project_detail', uuid=uuid)
else:
form = ProjectUpdateAdminForm(instance=project)
return render(request, 'project_update_admin.html', {
'form': form,
'projects_page': 'active',
'project': project,
'project_pi_admins': pi_admins,
'project_pi_members': pi_members
})
@login_required()
def project_update_infra(request, uuid):
project = get_object_or_404(Project, uuid=uuid)
project_pi_admins = project.comanage_pi_admins.all()
project_pi_members = project.comanage_pi_members.all()
infra = project.infrastructure
if request.method == "POST":
form = ProjectUpdateInfrastructureForm(request.POST, instance=project)
if form.is_valid():
infra_choice = form.cleaned_data['infrastructure']
# print(infra_choice)
if infra_choice is not None:
project.infrastructure = Infrastructure.objects.filter(id=infra_choice.id).first()
project.save()
role_added_to_project(request=request, user=project.infrastructure.owner,
project=project, role='Infrastructure Provider')
if infra:
if infra.id != project.infrastructure.id:
role_removed_from_project(request=request, user=infra.owner,
project=project, role='Infrastructure Provider')
messages.success(request, '[INFO] Infrastructure "{0}" added to Project "{1}"'.format(infra_choice,
project.name))
else:
project.save()
messages.success(request, '[INFO] No Infrastructure assigned to Project "{0}"'.format(project.name))
project.is_valid = False
project.save()
return redirect('project_detail', uuid=project.uuid)
else:
form = ProjectUpdateInfrastructureForm(instance=project)
return render(request, 'project_update_infrastructure.html',
{'projects_page': 'active', 'form': form,
'project_pi_admins': project_pi_admins, 'project_pi_members': project_pi_members,
'project': project})
@login_required()
def project_update_dataset(request, uuid):
project = get_object_or_404(Project, uuid=uuid)
project_pi_admins = project.comanage_pi_admins.all()
project_pi_members = project.comanage_pi_members.all()
dataset_choices_orig = list(project.datasets.all())
if request.method == "POST":
form = ProjectUpdateDatasetForm(request.POST, instance=project)
if form.is_valid():
dataset_choices = list[form.cleaned_data['datasets']]
dataset_choices_added = list(set(dataset_choices).difference(set(dataset_choices_orig)))
dataset_choices_removed = list(set(dataset_choices_orig).difference(set(dataset_choices)))
for dataset in dataset_choices_added:
project.datasets.add(dataset)
role_added_to_project(request=request, user=dataset.owner, project=project, role='Dataset Provider')
messages.success(request, '[INFO] Dataset "{0}" added to Project "{1}"'.format(dataset,
project.name))
for dataset in dataset_choices_removed:
project.datasets.remove(dataset)
role_removed_from_project(request=request, user=dataset.owner, project=project, role='Dataset Provider')
messages.success(request, '[INFO] Removed Dataset "{0}" from Project "{1}"'.format(dataset,
project.name))
project.save()
else:
project.save()
messages.success(request, '[INFO] No Dataset assigned to Project "{0}"'.format(project.name))
project.is_valid = False
project.save()
return redirect('project_detail', uuid=project.uuid)
else:
form = ProjectUpdateDatasetForm(instance=project)
return render(request, 'project_update_dataset.html',
{'projects_page': 'active', 'form': form,
'project_pi_admins': project_pi_admins, 'project_pi_members': project_pi_members,
'project': project})
| []
| []
| [
"COU_FLAG_STAFF",
"COU_FLAG_PI_ADMIN",
"COU_FLAG_PI_MEMBER"
]
| [] | ["COU_FLAG_STAFF", "COU_FLAG_PI_ADMIN", "COU_FLAG_PI_MEMBER"] | python | 3 | 0 | |
internal/ingress/controller/template/template.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package template
import (
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/url"
"os"
"os/exec"
"reflect"
"regexp"
"sort"
"strings"
text_template "text/template"
"time"
"github.com/pkg/errors"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/ingress-nginx/internal/ingress"
"k8s.io/ingress-nginx/internal/ingress/annotations/influxdb"
"k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit"
"k8s.io/ingress-nginx/internal/ingress/controller/config"
ing_net "k8s.io/ingress-nginx/internal/net"
)
const (
slash = "/"
nonIdempotent = "non_idempotent"
defBufferSize = 65535
defAuthSigninRedirectParam = "rd"
)
// TemplateWriter is the interface to render a template
type TemplateWriter interface {
Write(conf config.TemplateConfig) ([]byte, error)
}
// Template ...
type Template struct {
tmpl *text_template.Template
//fw watch.FileWatcher
bp *BufferPool
}
//NewTemplate returns a new Template instance or an
//error if the specified template file contains errors
func NewTemplate(file string) (*Template, error) {
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, errors.Wrapf(err, "unexpected error reading template %v", file)
}
tmpl, err := text_template.New("nginx.tmpl").Funcs(funcMap).Parse(string(data))
if err != nil {
return nil, err
}
return &Template{
tmpl: tmpl,
bp: NewBufferPool(defBufferSize),
}, nil
}
// Write populates a buffer using a template with NGINX configuration
// and the servers and upstreams created by Ingress rules
func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) {
tmplBuf := t.bp.Get()
defer t.bp.Put(tmplBuf)
outCmdBuf := t.bp.Get()
defer t.bp.Put(outCmdBuf)
if klog.V(3).Enabled() {
b, err := json.Marshal(conf)
if err != nil {
klog.Errorf("unexpected error: %v", err)
}
klog.InfoS("NGINX", "configuration", string(b))
}
err := t.tmpl.Execute(tmplBuf, conf)
if err != nil {
return nil, err
}
// squeezes multiple adjacent empty lines to be single
// spaced this is to avoid the use of regular expressions
cmd := exec.Command("/ingress-controller/clean-nginx-conf.sh")
cmd.Stdin = tmplBuf
cmd.Stdout = outCmdBuf
if err := cmd.Run(); err != nil {
klog.Warningf("unexpected error cleaning template: %v", err)
return tmplBuf.Bytes(), nil
}
return outCmdBuf.Bytes(), nil
}
var (
funcMap = text_template.FuncMap{
"empty": func(input interface{}) bool {
check, ok := input.(string)
if ok {
return len(check) == 0
}
return true
},
"escapeLiteralDollar": escapeLiteralDollar,
"buildLuaSharedDictionaries": buildLuaSharedDictionaries,
"luaConfigurationRequestBodySize": luaConfigurationRequestBodySize,
"buildLocation": buildLocation,
"buildAuthLocation": buildAuthLocation,
"shouldApplyGlobalAuth": shouldApplyGlobalAuth,
"buildAuthResponseHeaders": buildAuthResponseHeaders,
"buildAuthProxySetHeaders": buildAuthProxySetHeaders,
"buildProxyPass": buildProxyPass,
"filterRateLimits": filterRateLimits,
"buildRateLimitZones": buildRateLimitZones,
"buildRateLimit": buildRateLimit,
"configForLua": configForLua,
"locationConfigForLua": locationConfigForLua,
"buildResolvers": buildResolvers,
"buildUpstreamName": buildUpstreamName,
"isLocationInLocationList": isLocationInLocationList,
"isLocationAllowed": isLocationAllowed,
"buildDenyVariable": buildDenyVariable,
"getenv": os.Getenv,
"contains": strings.Contains,
"hasPrefix": strings.HasPrefix,
"hasSuffix": strings.HasSuffix,
"trimSpace": strings.TrimSpace,
"toUpper": strings.ToUpper,
"toLower": strings.ToLower,
"formatIP": formatIP,
"quote": quote,
"buildNextUpstream": buildNextUpstream,
"getIngressInformation": getIngressInformation,
"serverConfig": func(all config.TemplateConfig, server *ingress.Server) interface{} {
return struct{ First, Second interface{} }{all, server}
},
"isValidByteSize": isValidByteSize,
"buildForwardedFor": buildForwardedFor,
"buildAuthSignURL": buildAuthSignURL,
"buildAuthSignURLLocation": buildAuthSignURLLocation,
"buildOpentracing": buildOpentracing,
"proxySetHeader": proxySetHeader,
"buildInfluxDB": buildInfluxDB,
"enforceRegexModifier": enforceRegexModifier,
"buildCustomErrorDeps": buildCustomErrorDeps,
"buildCustomErrorLocationsPerServer": buildCustomErrorLocationsPerServer,
"shouldLoadModSecurityModule": shouldLoadModSecurityModule,
"buildHTTPListener": buildHTTPListener,
"buildHTTPSListener": buildHTTPSListener,
"buildOpentracingForLocation": buildOpentracingForLocation,
"shouldLoadOpentracingModule": shouldLoadOpentracingModule,
"buildModSecurityForLocation": buildModSecurityForLocation,
"buildMirrorLocations": buildMirrorLocations,
"shouldLoadAuthDigestModule": shouldLoadAuthDigestModule,
"shouldLoadInfluxDBModule": shouldLoadInfluxDBModule,
"buildServerName": buildServerName,
}
)
// escapeLiteralDollar will replace the $ character with ${literal_dollar}
// which is made to work via the following configuration in the http section of
// the template:
// geo $literal_dollar {
// default "$";
// }
func escapeLiteralDollar(input interface{}) string {
inputStr, ok := input.(string)
if !ok {
return ""
}
return strings.Replace(inputStr, `$`, `${literal_dollar}`, -1)
}
// formatIP will wrap IPv6 addresses in [] and return IPv4 addresses
// without modification. If the input cannot be parsed as an IP address
// it is returned without modification.
func formatIP(input string) string {
ip := net.ParseIP(input)
if ip == nil {
return input
}
if v4 := ip.To4(); v4 != nil {
return input
}
return fmt.Sprintf("[%s]", input)
}
func quote(input interface{}) string {
var inputStr string
switch input := input.(type) {
case string:
inputStr = input
case fmt.Stringer:
inputStr = input.String()
case *string:
inputStr = *input
default:
inputStr = fmt.Sprintf("%v", input)
}
return fmt.Sprintf("%q", inputStr)
}
func buildLuaSharedDictionaries(c interface{}, s interface{}) string {
var out []string
cfg, ok := c.(config.Configuration)
if !ok {
klog.Errorf("expected a 'config.Configuration' type but %T was returned", c)
return ""
}
_, ok = s.([]*ingress.Server)
if !ok {
klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s)
return ""
}
for name, size := range cfg.LuaSharedDicts {
out = append(out, fmt.Sprintf("lua_shared_dict %s %dM", name, size))
}
sort.Strings(out)
return strings.Join(out, ";\n") + ";\n"
}
func luaConfigurationRequestBodySize(c interface{}) string {
cfg, ok := c.(config.Configuration)
if !ok {
klog.Errorf("expected a 'config.Configuration' type but %T was returned", c)
return "100" // just a default number
}
size := cfg.LuaSharedDicts["configuration_data"]
if size < cfg.LuaSharedDicts["certificate_data"] {
size = cfg.LuaSharedDicts["certificate_data"]
}
size = size + 1
return fmt.Sprintf("%d", size)
}
// configForLua returns some general configuration as Lua table represented as string
func configForLua(input interface{}) string {
all, ok := input.(config.TemplateConfig)
if !ok {
klog.Errorf("expected a 'config.TemplateConfig' type but %T was given", input)
return "{}"
}
return fmt.Sprintf(`{
use_forwarded_headers = %t,
use_proxy_protocol = %t,
is_ssl_passthrough_enabled = %t,
http_redirect_code = %v,
listen_ports = { ssl_proxy = "%v", https = "%v" },
hsts = %t,
hsts_max_age = %v,
hsts_include_subdomains = %t,
hsts_preload = %t,
}`,
all.Cfg.UseForwardedHeaders,
all.Cfg.UseProxyProtocol,
all.IsSSLPassthroughEnabled,
all.Cfg.HTTPRedirectCode,
all.ListenPorts.SSLProxy,
all.ListenPorts.HTTPS,
all.Cfg.HSTS,
all.Cfg.HSTSMaxAge,
all.Cfg.HSTSIncludeSubdomains,
all.Cfg.HSTSPreload,
)
}
// locationConfigForLua formats some location specific configuration into Lua table represented as string
func locationConfigForLua(l interface{}, a interface{}) string {
location, ok := l.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was given", l)
return "{}"
}
all, ok := a.(config.TemplateConfig)
if !ok {
klog.Errorf("expected a 'config.TemplateConfig' type but %T was given", a)
return "{}"
}
return fmt.Sprintf(`{
force_ssl_redirect = %t,
ssl_redirect = %t,
force_no_ssl_redirect = %t,
use_port_in_redirects = %t,
}`,
location.Rewrite.ForceSSLRedirect,
location.Rewrite.SSLRedirect,
isLocationInLocationList(l, all.Cfg.NoTLSRedirectLocations),
location.UsePortInRedirects,
)
}
// buildResolvers returns the resolvers reading the /etc/resolv.conf file
func buildResolvers(res interface{}, disableIpv6 interface{}) string {
// NGINX need IPV6 addresses to be surrounded by brackets
nss, ok := res.([]net.IP)
if !ok {
klog.Errorf("expected a '[]net.IP' type but %T was returned", res)
return ""
}
no6, ok := disableIpv6.(bool)
if !ok {
klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6)
return ""
}
if len(nss) == 0 {
return ""
}
r := []string{"resolver"}
for _, ns := range nss {
if ing_net.IsIPV6(ns) {
if no6 {
continue
}
r = append(r, fmt.Sprintf("[%v]", ns))
} else {
r = append(r, fmt.Sprintf("%v", ns))
}
}
r = append(r, "valid=30s")
if no6 {
r = append(r, "ipv6=off")
}
return strings.Join(r, " ") + ";"
}
func needsRewrite(location *ingress.Location) bool {
if len(location.Rewrite.Target) > 0 && location.Rewrite.Target != location.Path {
return true
}
return false
}
// enforceRegexModifier checks if the "rewrite-target" or "use-regex" annotation
// is used on any location path within a server
func enforceRegexModifier(input interface{}) bool {
locations, ok := input.([]*ingress.Location)
if !ok {
klog.Errorf("expected an '[]*ingress.Location' type but %T was returned", input)
return false
}
for _, location := range locations {
if needsRewrite(location) || location.Rewrite.UseRegex {
return true
}
}
return false
}
// buildLocation produces the location string, if the ingress has redirects
// (specified through the nginx.ingress.kubernetes.io/rewrite-target annotation)
func buildLocation(input interface{}, enforceRegex bool) string {
location, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return slash
}
path := location.Path
if enforceRegex {
return fmt.Sprintf(`~* "^%s"`, path)
}
if location.PathType != nil && *location.PathType == networkingv1beta1.PathTypeExact {
return fmt.Sprintf(`= %s`, path)
}
return path
}
func buildAuthLocation(input interface{}, globalExternalAuthURL string) string {
location, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return ""
}
if (location.ExternalAuth.URL == "") && (!shouldApplyGlobalAuth(input, globalExternalAuthURL)) {
return ""
}
str := base64.URLEncoding.EncodeToString([]byte(location.Path))
// removes "=" after encoding
str = strings.Replace(str, "=", "", -1)
pathType := "default"
if location.PathType != nil {
pathType = fmt.Sprintf("%v", *location.PathType)
}
return fmt.Sprintf("/_external-auth-%v-%v", str, pathType)
}
// shouldApplyGlobalAuth returns true only in case when ExternalAuth.URL is not set and
// GlobalExternalAuth is set and enabled
func shouldApplyGlobalAuth(input interface{}, globalExternalAuthURL string) bool {
location, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
}
if (location.ExternalAuth.URL == "") && (globalExternalAuthURL != "") && (location.EnableGlobalAuth) {
return true
}
return false
}
func buildAuthResponseHeaders(headers []string) []string {
res := []string{}
if len(headers) == 0 {
return res
}
for i, h := range headers {
hvar := strings.ToLower(h)
hvar = strings.NewReplacer("-", "_").Replace(hvar)
res = append(res, fmt.Sprintf("auth_request_set $authHeader%v $upstream_http_%v;", i, hvar))
res = append(res, fmt.Sprintf("proxy_set_header '%v' $authHeader%v;", h, i))
}
return res
}
func buildAuthProxySetHeaders(headers map[string]string) []string {
res := []string{}
if len(headers) == 0 {
return res
}
for name, value := range headers {
res = append(res, fmt.Sprintf("proxy_set_header '%v' '%v';", name, value))
}
sort.Strings(res)
return res
}
// buildProxyPass produces the proxy pass string, if the ingress has redirects
// (specified through the nginx.ingress.kubernetes.io/rewrite-target annotation)
// If the annotation nginx.ingress.kubernetes.io/add-base-url:"true" is specified it will
// add a base tag in the head of the response from the service
func buildProxyPass(host string, b interface{}, loc interface{}) string {
backends, ok := b.([]*ingress.Backend)
if !ok {
klog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b)
return ""
}
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return ""
}
path := location.Path
proto := "http://"
proxyPass := "proxy_pass"
switch location.BackendProtocol {
case "HTTPS":
proto = "https://"
case "GRPC":
proto = "grpc://"
proxyPass = "grpc_pass"
case "GRPCS":
proto = "grpcs://"
proxyPass = "grpc_pass"
case "AJP":
proto = ""
proxyPass = "ajp_pass"
case "FCGI":
proto = ""
proxyPass = "fastcgi_pass"
}
upstreamName := "upstream_balancer"
for _, backend := range backends {
if backend.Name == location.Backend {
if backend.SSLPassthrough {
proto = "https://"
if location.BackendProtocol == "GRPCS" {
proto = "grpcs://"
}
}
break
}
}
// TODO: add support for custom protocols
if location.Backend == "upstream-default-backend" {
proto = "http://"
proxyPass = "proxy_pass"
}
// defProxyPass returns the default proxy_pass, just the name of the upstream
defProxyPass := fmt.Sprintf("%v %s%s;", proxyPass, proto, upstreamName)
// if the path in the ingress rule is equals to the target: no special rewrite
if path == location.Rewrite.Target {
return defProxyPass
}
if len(location.Rewrite.Target) > 0 {
var xForwardedPrefix string
if len(location.XForwardedPrefix) > 0 {
xForwardedPrefix = fmt.Sprintf("proxy_set_header X-Forwarded-Prefix \"%s\";\n", location.XForwardedPrefix)
}
return fmt.Sprintf(`
rewrite "(?i)%s" %s break;
%v%v %s%s;`, path, location.Rewrite.Target, xForwardedPrefix, proxyPass, proto, upstreamName)
}
// default proxy_pass
return defProxyPass
}
func filterRateLimits(input interface{}) []ratelimit.Config {
ratelimits := []ratelimit.Config{}
found := sets.String{}
servers, ok := input.([]*ingress.Server)
if !ok {
klog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input)
return ratelimits
}
for _, server := range servers {
for _, loc := range server.Locations {
if loc.RateLimit.ID != "" && !found.Has(loc.RateLimit.ID) {
found.Insert(loc.RateLimit.ID)
ratelimits = append(ratelimits, loc.RateLimit)
}
}
}
return ratelimits
}
// buildRateLimitZones produces an array of limit_conn_zone in order to allow
// rate limiting of request. Each Ingress rule could have up to three zones, one
// for connection limit by IP address, one for limiting requests per minute, and
// one for limiting requests per second.
func buildRateLimitZones(input interface{}) []string {
zones := sets.String{}
servers, ok := input.([]*ingress.Server)
if !ok {
klog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input)
return zones.List()
}
for _, server := range servers {
for _, loc := range server.Locations {
if loc.RateLimit.Connections.Limit > 0 {
zone := fmt.Sprintf("limit_conn_zone $limit_%s zone=%v:%vm;",
loc.RateLimit.ID,
loc.RateLimit.Connections.Name,
loc.RateLimit.Connections.SharedSize)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
if loc.RateLimit.RPM.Limit > 0 {
zone := fmt.Sprintf("limit_req_zone $limit_%s zone=%v:%vm rate=%vr/m;",
loc.RateLimit.ID,
loc.RateLimit.RPM.Name,
loc.RateLimit.RPM.SharedSize,
loc.RateLimit.RPM.Limit)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
if loc.RateLimit.RPS.Limit > 0 {
zone := fmt.Sprintf("limit_req_zone $limit_%s zone=%v:%vm rate=%vr/s;",
loc.RateLimit.ID,
loc.RateLimit.RPS.Name,
loc.RateLimit.RPS.SharedSize,
loc.RateLimit.RPS.Limit)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
}
}
return zones.List()
}
// buildRateLimit produces an array of limit_req to be used inside the Path of
// Ingress rules. The order: connections by IP first, then RPS, and RPM last.
func buildRateLimit(input interface{}) []string {
limits := []string{}
loc, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return limits
}
if loc.RateLimit.Connections.Limit > 0 {
limit := fmt.Sprintf("limit_conn %v %v;",
loc.RateLimit.Connections.Name, loc.RateLimit.Connections.Limit)
limits = append(limits, limit)
}
if loc.RateLimit.RPS.Limit > 0 {
limit := fmt.Sprintf("limit_req zone=%v burst=%v nodelay;",
loc.RateLimit.RPS.Name, loc.RateLimit.RPS.Burst)
limits = append(limits, limit)
}
if loc.RateLimit.RPM.Limit > 0 {
limit := fmt.Sprintf("limit_req zone=%v burst=%v nodelay;",
loc.RateLimit.RPM.Name, loc.RateLimit.RPM.Burst)
limits = append(limits, limit)
}
if loc.RateLimit.LimitRateAfter > 0 {
limit := fmt.Sprintf("limit_rate_after %vk;",
loc.RateLimit.LimitRateAfter)
limits = append(limits, limit)
}
if loc.RateLimit.LimitRate > 0 {
limit := fmt.Sprintf("limit_rate %vk;",
loc.RateLimit.LimitRate)
limits = append(limits, limit)
}
return limits
}
func isLocationInLocationList(location interface{}, rawLocationList string) bool {
loc, ok := location.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", location)
return false
}
locationList := strings.Split(rawLocationList, ",")
for _, locationListItem := range locationList {
locationListItem = strings.Trim(locationListItem, " ")
if locationListItem == "" {
continue
}
if strings.HasPrefix(loc.Path, locationListItem) {
return true
}
}
return false
}
func isLocationAllowed(input interface{}) bool {
loc, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return false
}
return loc.Denied == nil
}
var (
denyPathSlugMap = map[string]string{}
)
// buildDenyVariable returns a nginx variable for a location in a
// server to be used in the whitelist check
// This method uses a unique id generator library to reduce the
// size of the string to be used as a variable in nginx to avoid
// issue with the size of the variable bucket size directive
func buildDenyVariable(a interface{}) string {
l, ok := a.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", a)
return ""
}
if _, ok := denyPathSlugMap[l]; !ok {
denyPathSlugMap[l] = randomString()
}
return fmt.Sprintf("$deny_%v", denyPathSlugMap[l])
}
func buildUpstreamName(loc interface{}) string {
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return ""
}
upstreamName := location.Backend
return upstreamName
}
func buildNextUpstream(i, r interface{}) string {
nextUpstream, ok := i.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", i)
return ""
}
retryNonIdempotent := r.(bool)
parts := strings.Split(nextUpstream, " ")
nextUpstreamCodes := make([]string, 0, len(parts))
for _, v := range parts {
if v != "" && v != nonIdempotent {
nextUpstreamCodes = append(nextUpstreamCodes, v)
}
if v == nonIdempotent {
retryNonIdempotent = true
}
}
if retryNonIdempotent {
nextUpstreamCodes = append(nextUpstreamCodes, nonIdempotent)
}
return strings.Join(nextUpstreamCodes, " ")
}
// refer to http://nginx.org/en/docs/syntax.html
// Nginx differentiates between size and offset
// offset directives support gigabytes in addition
var nginxSizeRegex = regexp.MustCompile("^[0-9]+[kKmM]{0,1}$")
var nginxOffsetRegex = regexp.MustCompile("^[0-9]+[kKmMgG]{0,1}$")
// isValidByteSize validates size units valid in nginx
// http://nginx.org/en/docs/syntax.html
func isValidByteSize(input interface{}, isOffset bool) bool {
s, ok := input.(string)
if !ok {
klog.Errorf("expected an 'string' type but %T was returned", input)
return false
}
s = strings.TrimSpace(s)
if s == "" {
klog.V(2).Info("empty byte size, hence it will not be set")
return false
}
if isOffset {
return nginxOffsetRegex.MatchString(s)
}
return nginxSizeRegex.MatchString(s)
}
type ingressInformation struct {
Namespace string
Rule string
Service string
ServicePort string
Annotations map[string]string
}
func (info *ingressInformation) Equal(other *ingressInformation) bool {
if info.Namespace != other.Namespace {
return false
}
if info.Rule != other.Rule {
return false
}
if info.Service != other.Service {
return false
}
if info.ServicePort != other.ServicePort {
return false
}
if !reflect.DeepEqual(info.Annotations, other.Annotations) {
return false
}
return true
}
func getIngressInformation(i, h, p interface{}) *ingressInformation {
ing, ok := i.(*ingress.Ingress)
if !ok {
klog.Errorf("expected an '*ingress.Ingress' type but %T was returned", i)
return &ingressInformation{}
}
hostname, ok := h.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", h)
return &ingressInformation{}
}
path, ok := p.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", p)
return &ingressInformation{}
}
if ing == nil {
return &ingressInformation{}
}
info := &ingressInformation{
Namespace: ing.GetNamespace(),
Rule: ing.GetName(),
Annotations: ing.Annotations,
}
if ing.Spec.Backend != nil {
info.Service = ing.Spec.Backend.ServiceName
if ing.Spec.Backend.ServicePort.String() != "0" {
info.ServicePort = ing.Spec.Backend.ServicePort.String()
}
}
for _, rule := range ing.Spec.Rules {
if rule.HTTP == nil {
continue
}
if hostname != "_" && rule.Host == "" {
continue
}
if hostname != rule.Host {
continue
}
for _, rPath := range rule.HTTP.Paths {
if path == rPath.Path {
info.Service = rPath.Backend.ServiceName
if rPath.Backend.ServicePort.String() != "0" {
info.ServicePort = rPath.Backend.ServicePort.String()
}
return info
}
}
}
return info
}
func buildForwardedFor(input interface{}) string {
s, ok := input.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", input)
return ""
}
ffh := strings.Replace(s, "-", "_", -1)
ffh = strings.ToLower(ffh)
return fmt.Sprintf("$http_%v", ffh)
}
func buildAuthSignURL(authSignURL, authRedirectParam string) string {
u, _ := url.Parse(authSignURL)
q := u.Query()
if authRedirectParam == "" {
authRedirectParam = defaultGlobalAuthRedirectParam
}
if len(q) == 0 {
return fmt.Sprintf("%v?%v=$pass_access_scheme://$http_host$escaped_request_uri", authSignURL, authRedirectParam)
}
if q.Get(authRedirectParam) != "" {
return authSignURL
}
return fmt.Sprintf("%v&%v=$pass_access_scheme://$http_host$escaped_request_uri", authSignURL, authRedirectParam)
}
func buildAuthSignURLLocation(location, authSignURL string) string {
hasher := sha1.New()
hasher.Write([]byte(location))
hasher.Write([]byte(authSignURL))
return "@" + hex.EncodeToString(hasher.Sum(nil))
}
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func init() {
rand.Seed(time.Now().UnixNano())
}
func randomString() string {
b := make([]rune, 32)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func buildOpentracing(c interface{}, s interface{}) string {
cfg, ok := c.(config.Configuration)
if !ok {
klog.Errorf("expected a 'config.Configuration' type but %T was returned", c)
return ""
}
servers, ok := s.([]*ingress.Server)
if !ok {
klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s)
return ""
}
if !shouldLoadOpentracingModule(cfg, servers) {
return ""
}
buf := bytes.NewBufferString("")
if cfg.DatadogCollectorHost != "" {
buf.WriteString("opentracing_load_tracer /usr/local/lib64/libdd_opentracing.so /etc/nginx/opentracing.json;")
} else if cfg.ZipkinCollectorHost != "" {
buf.WriteString("opentracing_load_tracer /usr/local/lib/libzipkin_opentracing_plugin.so /etc/nginx/opentracing.json;")
} else if cfg.JaegerCollectorHost != "" {
buf.WriteString("opentracing_load_tracer /usr/local/lib/libjaegertracing_plugin.so /etc/nginx/opentracing.json;")
}
buf.WriteString("\r\n")
if cfg.OpentracingOperationName != "" {
buf.WriteString(fmt.Sprintf("opentracing_operation_name \"%s\";\n", cfg.OpentracingOperationName))
}
if cfg.OpentracingLocationOperationName != "" {
buf.WriteString(fmt.Sprintf("opentracing_location_operation_name \"%s\";\n", cfg.OpentracingLocationOperationName))
}
return buf.String()
}
// buildInfluxDB produces the single line configuration
// needed by the InfluxDB module to send request's metrics
// for the current resource
func buildInfluxDB(input interface{}) string {
cfg, ok := input.(influxdb.Config)
if !ok {
klog.Errorf("expected an 'influxdb.Config' type but %T was returned", input)
return ""
}
if !cfg.InfluxDBEnabled {
return ""
}
return fmt.Sprintf(
"influxdb server_name=%s host=%s port=%s measurement=%s enabled=true;",
cfg.InfluxDBServerName,
cfg.InfluxDBHost,
cfg.InfluxDBPort,
cfg.InfluxDBMeasurement,
)
}
func proxySetHeader(loc interface{}) string {
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "proxy_set_header"
}
if location.BackendProtocol == "GRPC" || location.BackendProtocol == "GRPCS" {
return "grpc_set_header"
}
return "proxy_set_header"
}
// buildCustomErrorDeps is a utility function returning a struct wrapper with
// the data required to build the 'CUSTOM_ERRORS' template
func buildCustomErrorDeps(upstreamName string, errorCodes []int, enableMetrics bool) interface{} {
return struct {
UpstreamName string
ErrorCodes []int
EnableMetrics bool
}{
UpstreamName: upstreamName,
ErrorCodes: errorCodes,
EnableMetrics: enableMetrics,
}
}
type errorLocation struct {
UpstreamName string
Codes []int
}
// buildCustomErrorLocationsPerServer is a utility function which will collect all
// custom error codes for all locations of a server block, deduplicates them,
// and returns a set which is unique by default-upstream and error code. It returns an array
// of errorLocations, each of which contain the upstream name and a list of
// error codes for that given upstream, so that sufficiently unique
// @custom error location blocks can be created in the template
func buildCustomErrorLocationsPerServer(input interface{}) []errorLocation {
server, ok := input.(*ingress.Server)
if !ok {
klog.Errorf("expected a '*ingress.Server' type but %T was returned", input)
return nil
}
codesMap := make(map[string]map[int]bool)
for _, loc := range server.Locations {
backendUpstream := loc.DefaultBackendUpstreamName
var dedupedCodes map[int]bool
if existingMap, ok := codesMap[backendUpstream]; ok {
dedupedCodes = existingMap
} else {
dedupedCodes = make(map[int]bool)
}
for _, code := range loc.CustomHTTPErrors {
dedupedCodes[code] = true
}
codesMap[backendUpstream] = dedupedCodes
}
errorLocations := []errorLocation{}
for upstream, dedupedCodes := range codesMap {
codesForUpstream := []int{}
for code := range dedupedCodes {
codesForUpstream = append(codesForUpstream, code)
}
sort.Ints(codesForUpstream)
errorLocations = append(errorLocations, errorLocation{
UpstreamName: upstream,
Codes: codesForUpstream,
})
}
sort.Slice(errorLocations, func(i, j int) bool {
return errorLocations[i].UpstreamName < errorLocations[j].UpstreamName
})
return errorLocations
}
func opentracingPropagateContext(location *ingress.Location) string {
if location == nil {
return ""
}
if location.BackendProtocol == "GRPC" || location.BackendProtocol == "GRPCS" {
return "opentracing_grpc_propagate_context;"
}
return "opentracing_propagate_context;"
}
// shouldLoadModSecurityModule determines whether or not the ModSecurity module needs to be loaded.
// First, it checks if `enable-modsecurity` is set in the ConfigMap. If it is not, it iterates over all locations to
// check if ModSecurity is enabled by the annotation `nginx.ingress.kubernetes.io/enable-modsecurity`.
func shouldLoadModSecurityModule(c interface{}, s interface{}) bool {
cfg, ok := c.(config.Configuration)
if !ok {
klog.Errorf("expected a 'config.Configuration' type but %T was returned", c)
return false
}
servers, ok := s.([]*ingress.Server)
if !ok {
klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s)
return false
}
// Determine if ModSecurity is enabled globally.
if cfg.EnableModsecurity {
return true
}
// If ModSecurity is not enabled globally, check if any location has it enabled via annotation.
for _, server := range servers {
for _, location := range server.Locations {
if location.ModSecurity.Enable {
return true
}
}
}
// Not enabled globally nor via annotation on a location, no need to load the module.
return false
}
func buildHTTPListener(t interface{}, s interface{}) string {
var out []string
tc, ok := t.(config.TemplateConfig)
if !ok {
klog.Errorf("expected a 'config.TemplateConfig' type but %T was returned", t)
return ""
}
hostname, ok := s.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", s)
return ""
}
addrV4 := []string{""}
if len(tc.Cfg.BindAddressIpv4) > 0 {
addrV4 = tc.Cfg.BindAddressIpv4
}
co := commonListenOptions(tc, hostname)
out = append(out, httpListener(addrV4, co, tc)...)
if !tc.IsIPV6Enabled {
return strings.Join(out, "\n")
}
addrV6 := []string{"[::]"}
if len(tc.Cfg.BindAddressIpv6) > 0 {
addrV6 = tc.Cfg.BindAddressIpv6
}
out = append(out, httpListener(addrV6, co, tc)...)
return strings.Join(out, "\n")
}
func buildHTTPSListener(t interface{}, s interface{}) string {
var out []string
tc, ok := t.(config.TemplateConfig)
if !ok {
klog.Errorf("expected a 'config.TemplateConfig' type but %T was returned", t)
return ""
}
hostname, ok := s.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", s)
return ""
}
co := commonListenOptions(tc, hostname)
addrV4 := []string{""}
if len(tc.Cfg.BindAddressIpv4) > 0 {
addrV4 = tc.Cfg.BindAddressIpv4
}
out = append(out, httpsListener(addrV4, co, tc)...)
if !tc.IsIPV6Enabled {
return strings.Join(out, "\n")
}
addrV6 := []string{"[::]"}
if len(tc.Cfg.BindAddressIpv6) > 0 {
addrV6 = tc.Cfg.BindAddressIpv6
}
out = append(out, httpsListener(addrV6, co, tc)...)
return strings.Join(out, "\n")
}
func commonListenOptions(template config.TemplateConfig, hostname string) string {
var out []string
if template.Cfg.UseProxyProtocol {
out = append(out, "proxy_protocol")
}
if hostname != "_" {
return strings.Join(out, " ")
}
// setup options that are valid only once per port
out = append(out, "default_server")
if template.Cfg.ReusePort {
out = append(out, "reuseport")
}
out = append(out, fmt.Sprintf("backlog=%v", template.BacklogSize))
return strings.Join(out, " ")
}
func httpListener(addresses []string, co string, tc config.TemplateConfig) []string {
out := make([]string, 0)
for _, address := range addresses {
lo := []string{"listen"}
if address == "" {
lo = append(lo, fmt.Sprintf("%v", tc.ListenPorts.HTTP))
} else {
lo = append(lo, fmt.Sprintf("%v:%v", address, tc.ListenPorts.HTTP))
}
lo = append(lo, co)
lo = append(lo, ";")
out = append(out, strings.Join(lo, " "))
}
return out
}
func httpsListener(addresses []string, co string, tc config.TemplateConfig) []string {
out := make([]string, 0)
for _, address := range addresses {
lo := []string{"listen"}
if tc.IsSSLPassthroughEnabled {
if address == "" {
lo = append(lo, fmt.Sprintf("%v", tc.ListenPorts.SSLProxy))
} else {
lo = append(lo, fmt.Sprintf("%v:%v", address, tc.ListenPorts.SSLProxy))
}
if !strings.Contains(co, "proxy_protocol") {
lo = append(lo, "proxy_protocol")
}
} else {
if address == "" {
lo = append(lo, fmt.Sprintf("%v", tc.ListenPorts.HTTPS))
} else {
lo = append(lo, fmt.Sprintf("%v:%v", address, tc.ListenPorts.HTTPS))
}
}
lo = append(lo, co)
lo = append(lo, "ssl")
if tc.Cfg.UseHTTP2 {
lo = append(lo, "http2")
}
lo = append(lo, ";")
out = append(out, strings.Join(lo, " "))
}
return out
}
func buildOpentracingForLocation(isOTEnabled bool, location *ingress.Location) string {
isOTEnabledInLoc := location.Opentracing.Enabled
isOTSetInLoc := location.Opentracing.Set
if isOTEnabled {
if isOTSetInLoc && !isOTEnabledInLoc {
return "opentracing off;"
}
opc := opentracingPropagateContext(location)
if opc != "" {
opc = fmt.Sprintf("opentracing on;\n%v", opc)
}
return opc
}
if isOTSetInLoc && isOTEnabledInLoc {
opc := opentracingPropagateContext(location)
if opc != "" {
opc = fmt.Sprintf("opentracing on;\n%v", opc)
}
return opc
}
return ""
}
// shouldLoadOpentracingModule determines whether or not the Opentracing module needs to be loaded.
// First, it checks if `enable-opentracing` is set in the ConfigMap. If it is not, it iterates over all locations to
// check if Opentracing is enabled by the annotation `nginx.ingress.kubernetes.io/enable-opentracing`.
func shouldLoadOpentracingModule(c interface{}, s interface{}) bool {
cfg, ok := c.(config.Configuration)
if !ok {
klog.Errorf("expected a 'config.Configuration' type but %T was returned", c)
return false
}
servers, ok := s.([]*ingress.Server)
if !ok {
klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s)
return false
}
if cfg.EnableOpentracing {
return true
}
for _, server := range servers {
for _, location := range server.Locations {
if location.Opentracing.Enabled {
return true
}
}
}
return false
}
func buildModSecurityForLocation(cfg config.Configuration, location *ingress.Location) string {
isMSEnabledInLoc := location.ModSecurity.Enable
isMSEnableSetInLoc := location.ModSecurity.EnableSet
isMSEnabled := cfg.EnableModsecurity
if !isMSEnabled && !isMSEnabledInLoc {
return ""
}
if isMSEnableSetInLoc && !isMSEnabledInLoc {
return "modsecurity off;"
}
var buffer bytes.Buffer
if !isMSEnabled {
buffer.WriteString(`modsecurity on;
`)
}
if location.ModSecurity.Snippet != "" {
buffer.WriteString(fmt.Sprintf(`modsecurity_rules '
%v
';
`, location.ModSecurity.Snippet))
}
if location.ModSecurity.TransactionID != "" {
buffer.WriteString(fmt.Sprintf(`modsecurity_transaction_id "%v";
`, location.ModSecurity.TransactionID))
}
if !isMSEnabled {
buffer.WriteString(`modsecurity_rules_file /etc/nginx/modsecurity/modsecurity.conf;
`)
}
if !cfg.EnableOWASPCoreRules && location.ModSecurity.OWASPRules {
buffer.WriteString(`modsecurity_rules_file /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf;
`)
}
return buffer.String()
}
func buildMirrorLocations(locs []*ingress.Location) string {
var buffer bytes.Buffer
mapped := sets.String{}
for _, loc := range locs {
if loc.Mirror.Source == "" || loc.Mirror.Target == "" {
continue
}
if mapped.Has(loc.Mirror.Source) {
continue
}
mapped.Insert(loc.Mirror.Source)
buffer.WriteString(fmt.Sprintf(`location = %v {
internal;
proxy_pass %v;
}
`, loc.Mirror.Source, loc.Mirror.Target))
}
return buffer.String()
}
// shouldLoadAuthDigestModule determines whether or not the ngx_http_auth_digest_module module needs to be loaded.
func shouldLoadAuthDigestModule(s interface{}) bool {
servers, ok := s.([]*ingress.Server)
if !ok {
klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s)
return false
}
for _, server := range servers {
for _, location := range server.Locations {
if !location.BasicDigestAuth.Secured {
continue
}
if location.BasicDigestAuth.Type == "digest" {
return true
}
}
}
return false
}
// shouldLoadInfluxDBModule determines whether or not the ngx_http_auth_digest_module module needs to be loaded.
func shouldLoadInfluxDBModule(s interface{}) bool {
servers, ok := s.([]*ingress.Server)
if !ok {
klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s)
return false
}
for _, server := range servers {
for _, location := range server.Locations {
if location.InfluxDB.InfluxDBEnabled {
return true
}
}
}
return false
}
// buildServerName ensures wildcard hostnames are valid
func buildServerName(hostname string) string {
if !strings.HasPrefix(hostname, "*") {
return hostname
}
hostname = strings.Replace(hostname, "*.", "", 1)
parts := strings.Split(hostname, ".")
return `~^(?<subdomain>[\w-]+)\.` + strings.Join(parts, "\\.") + `$`
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
src/net/http/fs_test.go | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net"
. "net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strings"
"testing"
"time"
)
const (
testFile = "testdata/file"
testFileLen = 11
)
type wantRange struct {
start, end int64 // range [start,end)
}
var ServeFileRangeTests = []struct {
r string
code int
ranges []wantRange
}{
{r: "", code: StatusOK},
{r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
{r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
{r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
{r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
{r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
{r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
{r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
{r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}},
{r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
{r: "bytes=0-9", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}},
{r: "bytes=0-10", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=0-11", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=10-11", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
{r: "bytes=10-", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
{r: "bytes=11-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=11-12", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=12-12", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=11-100", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=12-100", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=100-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=100-1000", code: StatusRequestedRangeNotSatisfiable},
}
func TestServeFile(t *testing.T) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
c := ts.Client()
var err error
file, err := ioutil.ReadFile(testFile)
if err != nil {
t.Fatal("reading file:", err)
}
// set up the Request (re-used for all tests)
var req Request
req.Header = make(Header)
if req.URL, err = url.Parse(ts.URL); err != nil {
t.Fatal("ParseURL:", err)
}
req.Method = "GET"
// straight GET
_, body := getBody(t, "straight get", req, c)
if !bytes.Equal(body, file) {
t.Fatalf("body mismatch: got %q, want %q", body, file)
}
// Range tests
Cases:
for _, rt := range ServeFileRangeTests {
if rt.r != "" {
req.Header.Set("Range", rt.r)
}
resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req, c)
if resp.StatusCode != rt.code {
t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
}
if rt.code == StatusRequestedRangeNotSatisfiable {
continue
}
wantContentRange := ""
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
}
cr := resp.Header.Get("Content-Range")
if cr != wantContentRange {
t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
}
ct := resp.Header.Get("Content-Type")
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
if strings.HasPrefix(ct, "multipart/byteranges") {
t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
}
}
if len(rt.ranges) > 1 {
typ, params, err := mime.ParseMediaType(ct)
if err != nil {
t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
continue
}
if typ != "multipart/byteranges" {
t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
continue
}
if params["boundary"] == "" {
t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
continue
}
if g, w := resp.ContentLength, int64(len(body)); g != w {
t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
continue
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for ri, rng := range rt.ranges {
part, err := mr.NextPart()
if err != nil {
t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
continue Cases
}
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
}
body, err := ioutil.ReadAll(part)
if err != nil {
t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
continue Cases
}
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
}
_, err = mr.NextPart()
if err != io.EOF {
t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
}
}
}
}
func TestServeFile_DotDot(t *testing.T) {
tests := []struct {
req string
wantStatus int
}{
{"/testdata/file", 200},
{"/../file", 400},
{"/..", 400},
{"/../", 400},
{"/../foo", 400},
{"/..\\foo", 400},
{"/file/a", 200},
{"/file/a..", 200},
{"/file/a/..", 400},
{"/file/a\\..", 400},
}
for _, tt := range tests {
req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET " + tt.req + " HTTP/1.1\r\nHost: foo\r\n\r\n")))
if err != nil {
t.Errorf("bad request %q: %v", tt.req, err)
continue
}
rec := httptest.NewRecorder()
ServeFile(rec, req, "testdata/file")
if rec.Code != tt.wantStatus {
t.Errorf("for request %q, status = %d; want %d", tt.req, rec.Code, tt.wantStatus)
}
}
}
var fsRedirectTestData = []struct {
original, redirect string
}{
{"/test/index.html", "/test/"},
{"/test/testdata", "/test/testdata/"},
{"/test/testdata/file/", "/test/testdata/file"},
}
func TestFSRedirect(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
defer ts.Close()
for _, data := range fsRedirectTestData {
res, err := Get(ts.URL + data.original)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if g, e := res.Request.URL.Path, data.redirect; g != e {
t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
}
}
}
type testFileSystem struct {
open func(name string) (File, error)
}
func (fs *testFileSystem) Open(name string) (File, error) {
return fs.open(name)
}
func TestFileServerCleans(t *testing.T) {
defer afterTest(t)
ch := make(chan string, 1)
fs := FileServer(&testFileSystem{func(name string) (File, error) {
ch <- name
return nil, errors.New("file does not exist")
}})
tests := []struct {
reqPath, openArg string
}{
{"/foo.txt", "/foo.txt"},
{"//foo.txt", "/foo.txt"},
{"/../foo.txt", "/foo.txt"},
}
req, _ := NewRequest("GET", "http://example.com", nil)
for n, test := range tests {
rec := httptest.NewRecorder()
req.URL.Path = test.reqPath
fs.ServeHTTP(rec, req)
if got := <-ch; got != test.openArg {
t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
}
}
}
func TestFileServerEscapesNames(t *testing.T) {
defer afterTest(t)
const dirListPrefix = "<pre>\n"
const dirListSuffix = "\n</pre>\n"
tests := []struct {
name, escaped string
}{
{`simple_name`, `<a href="simple_name">simple_name</a>`},
{`"'<>&`, `<a href="%22%27%3C%3E&">"'<>&</a>`},
{`?foo=bar#baz`, `<a href="%3Ffoo=bar%23baz">?foo=bar#baz</a>`},
{`<combo>?foo`, `<a href="%3Ccombo%3E%3Ffoo"><combo>?foo</a>`},
{`foo:bar`, `<a href="./foo:bar">foo:bar</a>`},
}
// We put each test file in its own directory in the fakeFS so we can look at it in isolation.
fs := make(fakeFS)
for i, test := range tests {
testFile := &fakeFileInfo{basename: test.name}
fs[fmt.Sprintf("/%d", i)] = &fakeFileInfo{
dir: true,
modtime: time.Unix(1000000000, 0).UTC(),
ents: []*fakeFileInfo{testFile},
}
fs[fmt.Sprintf("/%d/%s", i, test.name)] = testFile
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
for i, test := range tests {
url := fmt.Sprintf("%s/%d", ts.URL, i)
res, err := Get(url)
if err != nil {
t.Fatalf("test %q: Get: %v", test.name, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("test %q: read Body: %v", test.name, err)
}
s := string(b)
if !strings.HasPrefix(s, dirListPrefix) || !strings.HasSuffix(s, dirListSuffix) {
t.Errorf("test %q: listing dir, full output is %q, want prefix %q and suffix %q", test.name, s, dirListPrefix, dirListSuffix)
}
if trimmed := strings.TrimSuffix(strings.TrimPrefix(s, dirListPrefix), dirListSuffix); trimmed != test.escaped {
t.Errorf("test %q: listing dir, filename escaped to %q, want %q", test.name, trimmed, test.escaped)
}
res.Body.Close()
}
}
func TestFileServerSortsNames(t *testing.T) {
defer afterTest(t)
const contents = "I am a fake file"
dirMod := time.Unix(123, 0).UTC()
fileMod := time.Unix(1000000000, 0).UTC()
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{
{
basename: "b",
modtime: fileMod,
contents: contents,
},
{
basename: "a",
modtime: fileMod,
contents: contents,
},
},
},
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatalf("Get: %v", err)
}
defer res.Body.Close()
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("read Body: %v", err)
}
s := string(b)
if !strings.Contains(s, "<a href=\"a\">a</a>\n<a href=\"b\">b</a>") {
t.Errorf("output appears to be unsorted:\n%s", s)
}
}
func mustRemoveAll(dir string) {
err := os.RemoveAll(dir)
if err != nil {
panic(err)
}
}
func TestFileServerImplicitLeadingSlash(t *testing.T) {
defer afterTest(t)
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
defer mustRemoveAll(tempDir)
if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
defer ts.Close()
get := func(suffix string) string {
res, err := Get(ts.URL + suffix)
if err != nil {
t.Fatalf("Get %s: %v", suffix, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("ReadAll %s: %v", suffix, err)
}
res.Body.Close()
return string(b)
}
if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
t.Logf("expected a directory listing with foo.txt, got %q", s)
}
if s := get("/bar/foo.txt"); s != "Hello world" {
t.Logf("expected %q, got %q", "Hello world", s)
}
}
func TestDirJoin(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping test on windows")
}
wfi, err := os.Stat("/etc/hosts")
if err != nil {
t.Skip("skipping test; no /etc/hosts file")
}
test := func(d Dir, name string) {
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
gfi, err := f.Stat()
if err != nil {
t.Fatalf("stat of %s: %v", name, err)
}
if !os.SameFile(gfi, wfi) {
t.Errorf("%s got different file", name)
}
}
test(Dir("/etc/"), "/hosts")
test(Dir("/etc/"), "hosts")
test(Dir("/etc/"), "../../../../hosts")
test(Dir("/etc"), "/hosts")
test(Dir("/etc"), "hosts")
test(Dir("/etc"), "../../../../hosts")
// Not really directories, but since we use this trick in
// ServeFile, test it:
test(Dir("/etc/hosts"), "")
test(Dir("/etc/hosts"), "/")
test(Dir("/etc/hosts"), "../")
}
func TestEmptyDirOpenCWD(t *testing.T) {
test := func(d Dir) {
name := "fs_test.go"
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
}
test(Dir(""))
test(Dir("."))
test(Dir("./"))
}
func TestServeFileContentType(t *testing.T) {
defer afterTest(t)
const ctype = "icecream/chocolate"
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
switch r.FormValue("override") {
case "1":
w.Header().Set("Content-Type", ctype)
case "2":
// Explicitly inhibit sniffing.
w.Header()["Content-Type"] = []string{}
}
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
get := func(override string, want []string) {
resp, err := Get(ts.URL + "?override=" + override)
if err != nil {
t.Fatal(err)
}
if h := resp.Header["Content-Type"]; !reflect.DeepEqual(h, want) {
t.Errorf("Content-Type mismatch: got %v, want %v", h, want)
}
resp.Body.Close()
}
get("0", []string{"text/plain; charset=utf-8"})
get("1", []string{ctype})
get("2", nil)
}
func TestServeFileMimeType(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/style.css")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
want := "text/css; charset=utf-8"
if h := resp.Header.Get("Content-Type"); h != want {
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
func TestServeFileFromCWD(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "fs_test.go")
}))
defer ts.Close()
r, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if r.StatusCode != 200 {
t.Fatalf("expected 200 OK, got %s", r.Status)
}
}
// Issue 13996
func TestServeDirWithoutTrailingSlash(t *testing.T) {
e := "/testdata/"
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, ".")
}))
defer ts.Close()
r, err := Get(ts.URL + "/testdata")
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if g := r.Request.URL.Path; g != e {
t.Errorf("got %s, want %s", g, e)
}
}
// Tests that ServeFile doesn't add a Content-Length if a Content-Encoding is
// specified.
func TestServeFileWithContentEncoding_h1(t *testing.T) { testServeFileWithContentEncoding(t, h1Mode) }
func TestServeFileWithContentEncoding_h2(t *testing.T) { testServeFileWithContentEncoding(t, h2Mode) }
func testServeFileWithContentEncoding(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Encoding", "foo")
ServeFile(w, r, "testdata/file")
// Because the testdata is so small, it would fit in
// both the h1 and h2 Server's write buffers. For h1,
// sendfile is used, though, forcing a header flush at
// the io.Copy. http2 doesn't do a header flush so
// buffers all 11 bytes and then adds its own
// Content-Length. To prevent the Server's
// Content-Length and test ServeFile only, flush here.
w.(Flusher).Flush()
}))
defer cst.close()
resp, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
if g, e := resp.ContentLength, int64(-1); g != e {
t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
}
}
func TestServeIndexHtml(t *testing.T) {
defer afterTest(t)
const want = "index.html says hello\n"
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
for _, path := range []string{"/testdata/", "/testdata/index.html"} {
res, err := Get(ts.URL + path)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if s := string(b); s != want {
t.Errorf("for path %q got %q, want %q", path, s, want)
}
res.Body.Close()
}
}
func TestFileServerZeroByte(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
res, err := Get(ts.URL + "/..\x00")
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if res.StatusCode == 200 {
t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
}
}
type fakeFileInfo struct {
dir bool
basename string
modtime time.Time
ents []*fakeFileInfo
contents string
err error
}
func (f *fakeFileInfo) Name() string { return f.basename }
func (f *fakeFileInfo) Sys() interface{} { return nil }
func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
func (f *fakeFileInfo) IsDir() bool { return f.dir }
func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
func (f *fakeFileInfo) Mode() os.FileMode {
if f.dir {
return 0755 | os.ModeDir
}
return 0644
}
type fakeFile struct {
io.ReadSeeker
fi *fakeFileInfo
path string // as opened
entpos int
}
func (f *fakeFile) Close() error { return nil }
func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
if !f.fi.dir {
return nil, os.ErrInvalid
}
var fis []os.FileInfo
limit := f.entpos + count
if count <= 0 || limit > len(f.fi.ents) {
limit = len(f.fi.ents)
}
for ; f.entpos < limit; f.entpos++ {
fis = append(fis, f.fi.ents[f.entpos])
}
if len(fis) == 0 && count > 0 {
return fis, io.EOF
} else {
return fis, nil
}
}
type fakeFS map[string]*fakeFileInfo
func (fs fakeFS) Open(name string) (File, error) {
name = path.Clean(name)
f, ok := fs[name]
if !ok {
return nil, os.ErrNotExist
}
if f.err != nil {
return nil, f.err
}
return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
}
func TestDirectoryIfNotModified(t *testing.T) {
defer afterTest(t)
const indexContents = "I am a fake index.html file"
fileMod := time.Unix(1000000000, 0).UTC()
fileModStr := fileMod.Format(TimeFormat)
dirMod := time.Unix(123, 0).UTC()
indexFile := &fakeFileInfo{
basename: "index.html",
modtime: fileMod,
contents: indexContents,
}
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{indexFile},
},
"/index.html": indexFile,
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != indexContents {
t.Fatalf("Got body %q; want %q", b, indexContents)
}
res.Body.Close()
lastMod := res.Header.Get("Last-Modified")
if lastMod != fileModStr {
t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
}
req, _ := NewRequest("GET", ts.URL, nil)
req.Header.Set("If-Modified-Since", lastMod)
c := ts.Client()
res, err = c.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 304 {
t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
}
res.Body.Close()
// Advance the index.html file's modtime, but not the directory's.
indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
res, err = c.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
}
res.Body.Close()
}
func mustStat(t *testing.T, fileName string) os.FileInfo {
fi, err := os.Stat(fileName)
if err != nil {
t.Fatal(err)
}
return fi
}
func TestServeContent(t *testing.T) {
defer afterTest(t)
type serveParam struct {
name string
modtime time.Time
content io.ReadSeeker
contentType string
etag string
}
servec := make(chan serveParam, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
p := <-servec
if p.etag != "" {
w.Header().Set("ETag", p.etag)
}
if p.contentType != "" {
w.Header().Set("Content-Type", p.contentType)
}
ServeContent(w, r, p.name, p.modtime, p.content)
}))
defer ts.Close()
type testCase struct {
// One of file or content must be set:
file string
content io.ReadSeeker
modtime time.Time
serveETag string // optional
serveContentType string // optional
reqHeader map[string]string
wantLastMod string
wantContentType string
wantContentRange string
wantStatus int
}
htmlModTime := mustStat(t, "testdata/index.html").ModTime()
tests := map[string]testCase{
"no_last_modified": {
file: "testdata/style.css",
wantContentType: "text/css; charset=utf-8",
wantStatus: 200,
},
"with_last_modified": {
file: "testdata/index.html",
wantContentType: "text/html; charset=utf-8",
modtime: htmlModTime,
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
wantStatus: 200,
},
"not_modified_modtime": {
file: "testdata/style.css",
serveETag: `"foo"`, // Last-Modified sent only when no ETag
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_modtime_with_contenttype": {
file: "testdata/style.css",
serveContentType: "text/css", // explicit content type
serveETag: `"foo"`, // Last-Modified sent only when no ETag
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_etag": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"foo"`,
},
wantStatus: 304,
},
"not_modified_etag_no_seek": {
content: panicOnSeek{nil}, // should never be called
serveETag: `W/"foo"`, // If-None-Match uses weak ETag comparison
reqHeader: map[string]string{
"If-None-Match": `"baz", W/"foo"`,
},
wantStatus: 304,
},
"if_none_match_mismatch": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"Foo"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_good": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
},
"range_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"A"`,
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
},
"range_match_weak_etag": {
file: "testdata/style.css",
serveETag: `W/"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `W/"A"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_no_overlap": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=10-20",
},
wantStatus: StatusRequestedRangeNotSatisfiable,
wantContentType: "text/plain; charset=utf-8",
wantContentRange: "bytes */8",
},
// An If-Range resource for entity "A", but entity "B" is now current.
// The Range request should be ignored.
"range_no_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"B"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_with_modtime": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 0 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"range_with_modtime_mismatch": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 0 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:19 GMT",
},
wantStatus: StatusOK,
wantContentType: "text/css; charset=utf-8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"range_with_modtime_nanos": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 123 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"unix_zero_modtime": {
content: strings.NewReader("<html>foo"),
modtime: time.Unix(0, 0),
wantStatus: StatusOK,
wantContentType: "text/html; charset=utf-8",
},
"ifmatch_matches": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `"Z", "A"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"ifmatch_star": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `*`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"ifmatch_failed": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `"B"`,
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
},
"ifmatch_fails_on_weak_etag": {
file: "testdata/style.css",
serveETag: `W/"A"`,
reqHeader: map[string]string{
"If-Match": `W/"A"`,
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
},
"if_unmodified_since_true": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Unmodified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
},
"if_unmodified_since_false": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Unmodified-Since": htmlModTime.Add(-2 * time.Second).UTC().Format(TimeFormat),
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
},
}
for testName, tt := range tests {
var content io.ReadSeeker
if tt.file != "" {
f, err := os.Open(tt.file)
if err != nil {
t.Fatalf("test %q: %v", testName, err)
}
defer f.Close()
content = f
} else {
content = tt.content
}
for _, method := range []string{"GET", "HEAD"} {
//restore content in case it is consumed by previous method
if content, ok := content.(*strings.Reader); ok {
content.Seek(io.SeekStart, 0)
}
servec <- serveParam{
name: filepath.Base(tt.file),
content: content,
modtime: tt.modtime,
etag: tt.serveETag,
contentType: tt.serveContentType,
}
req, err := NewRequest(method, ts.URL, nil)
if err != nil {
t.Fatal(err)
}
for k, v := range tt.reqHeader {
req.Header.Set(k, v)
}
c := ts.Client()
res, err := c.Do(req)
if err != nil {
t.Fatal(err)
}
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
if res.StatusCode != tt.wantStatus {
t.Errorf("test %q using %q: got status = %d; want %d", testName, method, res.StatusCode, tt.wantStatus)
}
if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
t.Errorf("test %q using %q: got content-type = %q, want %q", testName, method, g, e)
}
if g, e := res.Header.Get("Content-Range"), tt.wantContentRange; g != e {
t.Errorf("test %q using %q: got content-range = %q, want %q", testName, method, g, e)
}
if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
t.Errorf("test %q using %q: got last-modified = %q, want %q", testName, method, g, e)
}
}
}
}
// Issue 12991
func TestServerFileStatError(t *testing.T) {
rec := httptest.NewRecorder()
r, _ := NewRequest("GET", "http://foo/", nil)
redirect := false
name := "file.txt"
fs := issue12991FS{}
ExportServeFile(rec, r, fs, name, redirect)
if body := rec.Body.String(); !strings.Contains(body, "403") || !strings.Contains(body, "Forbidden") {
t.Errorf("wanted 403 forbidden message; got: %s", body)
}
}
type issue12991FS struct{}
func (issue12991FS) Open(string) (File, error) { return issue12991File{}, nil }
type issue12991File struct{ File }
func (issue12991File) Stat() (os.FileInfo, error) { return nil, os.ErrPermission }
func (issue12991File) Close() error { return nil }
func TestServeContentErrorMessages(t *testing.T) {
defer afterTest(t)
fs := fakeFS{
"/500": &fakeFileInfo{
err: errors.New("random error"),
},
"/403": &fakeFileInfo{
err: &os.PathError{Err: os.ErrPermission},
},
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
c := ts.Client()
for _, code := range []int{403, 404, 500} {
res, err := c.Get(fmt.Sprintf("%s/%d", ts.URL, code))
if err != nil {
t.Errorf("Error fetching /%d: %v", code, err)
continue
}
if res.StatusCode != code {
t.Errorf("For /%d, status code = %d; want %d", code, res.StatusCode, code)
}
res.Body.Close()
}
}
// verifies that sendfile is being used on Linux
func TestLinuxSendfile(t *testing.T) {
setParallel(t)
defer afterTest(t)
if runtime.GOOS != "linux" {
t.Skip("skipping; linux-only test")
}
if _, err := exec.LookPath("strace"); err != nil {
t.Skip("skipping; strace not found in path")
}
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
lnf, err := ln.(*net.TCPListener).File()
if err != nil {
t.Fatal(err)
}
defer ln.Close()
syscalls := "sendfile,sendfile64"
switch runtime.GOARCH {
case "mips64", "mips64le", "s390x":
// strace on the above platforms doesn't support sendfile64
// and will error out if we specify that with `-e trace='.
syscalls = "sendfile"
}
// Attempt to run strace, and skip on failure - this test requires SYS_PTRACE.
if err := exec.Command("strace", "-f", "-q", "-e", "trace="+syscalls, os.Args[0], "-test.run=^$").Run(); err != nil {
t.Skipf("skipping; failed to run strace: %v", err)
}
var buf bytes.Buffer
child := exec.Command("strace", "-f", "-q", "-e", "trace="+syscalls, os.Args[0], "-test.run=TestLinuxSendfileChild")
child.ExtraFiles = append(child.ExtraFiles, lnf)
child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
child.Stdout = &buf
child.Stderr = &buf
if err := child.Start(); err != nil {
t.Skipf("skipping; failed to start straced child: %v", err)
}
res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
if err != nil {
t.Fatalf("http client error: %v", err)
}
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
t.Fatalf("client body read error: %v", err)
}
res.Body.Close()
// Force child to exit cleanly.
Post(fmt.Sprintf("http://%s/quit", ln.Addr()), "", nil)
child.Wait()
rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+`)
out := buf.String()
if !rx.MatchString(out) {
t.Errorf("no sendfile system call found in:\n%s", out)
}
}
func getBody(t *testing.T, testName string, req Request, client *Client) (*Response, []byte) {
r, err := client.Do(&req)
if err != nil {
t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
}
return r, b
}
// TestLinuxSendfileChild isn't a real test. It's used as a helper process
// for TestLinuxSendfile.
func TestLinuxSendfileChild(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
fd3 := os.NewFile(3, "ephemeral-port-listener")
ln, err := net.FileListener(fd3)
if err != nil {
panic(err)
}
mux := NewServeMux()
mux.Handle("/", FileServer(Dir("testdata")))
mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
os.Exit(0)
})
s := &Server{Handler: mux}
err = s.Serve(ln)
if err != nil {
panic(err)
}
}
// Issue 18984: tests that requests for paths beyond files return not-found errors
func TestFileServerNotDirError(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(FileServer(Dir("testdata")))
defer ts.Close()
res, err := Get(ts.URL + "/index.html/not-a-file")
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if res.StatusCode != 404 {
t.Errorf("StatusCode = %v; want 404", res.StatusCode)
}
test := func(name string, dir Dir) {
t.Run(name, func(t *testing.T) {
_, err = dir.Open("/index.html/not-a-file")
if err == nil {
t.Fatal("err == nil; want != nil")
}
if !os.IsNotExist(err) {
t.Errorf("err = %v; os.IsNotExist(err) = %v; want true", err, os.IsNotExist(err))
}
_, err = dir.Open("/index.html/not-a-dir/not-a-file")
if err == nil {
t.Fatal("err == nil; want != nil")
}
if !os.IsNotExist(err) {
t.Errorf("err = %v; os.IsNotExist(err) = %v; want true", err, os.IsNotExist(err))
}
})
}
absPath, err := filepath.Abs("testdata")
if err != nil {
t.Fatal("get abs path:", err)
}
test("RelativePath", Dir("testdata"))
test("AbsolutePath", Dir(absPath))
}
func TestFileServerCleanPath(t *testing.T) {
tests := []struct {
path string
wantCode int
wantOpen []string
}{
{"/", 200, []string{"/", "/index.html"}},
{"/dir", 301, []string{"/dir"}},
{"/dir/", 200, []string{"/dir", "/dir/index.html"}},
}
for _, tt := range tests {
var log []string
rr := httptest.NewRecorder()
req, _ := NewRequest("GET", "http://foo.localhost"+tt.path, nil)
FileServer(fileServerCleanPathDir{&log}).ServeHTTP(rr, req)
if !reflect.DeepEqual(log, tt.wantOpen) {
t.Logf("For %s: Opens = %q; want %q", tt.path, log, tt.wantOpen)
}
if rr.Code != tt.wantCode {
t.Logf("For %s: Response code = %d; want %d", tt.path, rr.Code, tt.wantCode)
}
}
}
type fileServerCleanPathDir struct {
log *[]string
}
func (d fileServerCleanPathDir) Open(path string) (File, error) {
*(d.log) = append(*(d.log), path)
if path == "/" || path == "/dir" || path == "/dir/" {
// Just return back something that's a directory.
return Dir(".").Open(".")
}
return nil, os.ErrNotExist
}
type panicOnSeek struct{ io.ReadSeeker }
func Test_scanETag(t *testing.T) {
tests := []struct {
in string
wantETag string
wantRemain string
}{
{`W/"etag-1"`, `W/"etag-1"`, ""},
{`"etag-2"`, `"etag-2"`, ""},
{`"etag-1", "etag-2"`, `"etag-1"`, `, "etag-2"`},
{"", "", ""},
{"W/", "", ""},
{`W/"truc`, "", ""},
{`w/"case-sensitive"`, "", ""},
{`"spaced etag"`, "", ""},
}
for _, test := range tests {
etag, remain := ExportScanETag(test.in)
if etag != test.wantETag || remain != test.wantRemain {
t.Errorf("scanETag(%q)=%q %q, want %q %q", test.in, etag, remain, test.wantETag, test.wantRemain)
}
}
}
| [
"\"GO_WANT_HELPER_PROCESS\""
]
| []
| [
"GO_WANT_HELPER_PROCESS"
]
| [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
src/main/java/org/java2uml/java2umlapi/config/ServerConfig.java | package org.java2uml.java2umlapi.config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.web.server.ConfigurableWebServerFactory;
import org.springframework.boot.web.server.WebServerFactoryCustomizer;
import org.springframework.stereotype.Component;
@Component
public class ServerConfig implements WebServerFactoryCustomizer<ConfigurableWebServerFactory> {
private final Logger logger = LoggerFactory.getLogger(ServerConfig.class);
@Override
public void customize(ConfigurableWebServerFactory factory) {
String portStr = null;
try {
portStr = System.getenv("PORT");
if (portStr == null) {
logger.info("$PORT not defined in system environment.");
logger.info("Setting port to 8080");
portStr = "8080";
}
} catch (SecurityException e) {
logger.warn("Unable to get system variable $PORT");
portStr = "8080";
} finally {
factory.setPort(Integer.parseInt(portStr == null ? "8080" : portStr));
}
}
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | java | 1 | 0 | |
tests/unit/test_yaml.py | import yaml
import os
import unittest
import textwrap
from contextlib import ExitStack
from unittest import mock
from unittest.mock import MagicMock, mock_open
import mimesis
from deploy2ecscli.yaml import setup_loader
class TestSetupLoader(unittest.TestCase):
def test_join(self):
expect = 'value1,value2,value3'
template = """
value: !Join
- ','
- - value1
- value2
- value3
"""
filename = mimesis.File().file_name()
with mock.patch('builtins.open', mock_open(read_data=template)):
with open(filename) as file:
actual = yaml.load(file, Loader=setup_loader())
actual = actual['value']
self.assertEqual(expect, actual)
def test_join_with_sub(self):
token = mimesis.Cryptographic.token_hex()
expect = 'region=xxxxx;token=' + token
template = """
value: !Join
- ';'
- - region=xxxxx
- !Sub token=${TOKEN}
"""
filename = mimesis.File().file_name()
with ExitStack() as stack:
stack.enter_context(
mock.patch.dict(os.environ, {'TOKEN': token}))
stack.enter_context(
mock.patch('builtins.open', mock_open(read_data=template)))
with open(filename) as file:
actual = yaml.load(file, Loader=setup_loader())
actual = actual['value']
self.assertEqual(expect, actual)
def test_split(self):
expect = ['value1', 'value2', 'value3']
template = """
values: !Split
- ','
- value1, value2, value3
"""
filename = mimesis.File().file_name()
with mock.patch('builtins.open', mock_open(read_data=template)):
with open(filename) as file:
actual = yaml.load(file, Loader=setup_loader())
actual = actual['values']
self.assertEqual(expect, actual)
def test_ref_should_exist_env(self):
expect = mimesis.Cryptographic.token_hex()
template = """
value: !Ref REFERENCE_KEY
"""
filename = mimesis.File().file_name()
with ExitStack() as stack:
stack.enter_context(
mock.patch.dict(os.environ, {'REFERENCE_KEY': expect}))
stack.enter_context(
mock.patch('builtins.open', mock_open(read_data=template)))
with open(filename) as file:
actual = yaml.load(file, Loader=setup_loader())
actual = actual['value']
self.assertEqual(expect, actual)
def test_ref_should_exist_args(self):
expect = mimesis.Cryptographic.token_hex()
params = {
'REFERENCE_KEY': expect
}
template = """
value: !Ref REFERENCE_KEY
"""
filename = mimesis.File().file_name()
with ExitStack() as stack:
stack.enter_context(
mock.patch('builtins.open', mock_open(read_data=template)))
with open(filename) as file:
actual = yaml.load(file, Loader=setup_loader(params))
actual = actual['value']
self.assertEqual(expect, actual)
def test_ref_should_notexis(self):
template = """
value: !Ref REFERENCE_KEY
"""
filename = mimesis.File().file_name()
with ExitStack() as stack:
stack.enter_context(
mock.patch.dict(os.environ, {}))
stack.enter_context(
mock.patch('builtins.open', mock_open(read_data=template)))
with open(filename) as file:
actual = yaml.load(file, Loader=setup_loader())
actual = actual['value']
self.assertIsNone(actual)
def test_sub_should_exist_args(self):
params = {
'REFERENCE_KEY': mimesis.Cryptographic.token_hex()
}
template = """
value: !Sub token is ${REFERENCE_KEY}
"""
expect = 'token is {0}'.format(params['REFERENCE_KEY'])
filename = mimesis.File().file_name()
with ExitStack() as stack:
stack.enter_context(
mock.patch('builtins.open', mock_open(read_data=template)))
with open(filename) as file:
actual = yaml.load(file, Loader=setup_loader(params))
actual = actual['value']
self.assertEqual(expect, actual)
def test_sub_should_multiline(self):
params = {
'USER_NAME': mimesis.Person().username(),
'TOKEN': mimesis.Cryptographic.token_hex()
}
template = """
value: !Sub |
user_name is ${USER_NAME}
token is ${TOKEN}
"""
expect = """
user_name is {0}
token is {1}
"""
expect = expect.format(
params['USER_NAME'],
params['TOKEN'])
expect = textwrap.dedent(expect).strip()
expect = expect + '\n'
filename = mimesis.File().file_name()
with ExitStack() as stack:
stack.enter_context(
mock.patch('builtins.open', mock_open(read_data=template)))
with open(filename) as file:
actual = yaml.load(file, Loader=setup_loader(params))
actual = actual['value']
self.assertEqual(expect, actual)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test_abiquo_inventory.py | from __future__ import annotations
import configparser
import os
import unittest
from unittest import TestCase
from unittest.mock import patch
from abiquo.client import Abiquo
from requests import Response
from abiquo_inventory import InventoryGenerator, InventoryGeneratorParameters, ConfigProvider
class ApiResponseCallable(object):
def load_file_content(self, path):
with open(path, encoding='utf-8-sig') as json_file:
return json_file.read()
def __call__(self, method: str, url: str, **kwargs):
response = Response()
response.status_code = 200
data = None
if url.endswith('/api/cloud/virtualdatacenters/8/action/virtualmachines') or url.endswith(
'/api/cloud/virtualmachines'):
data = self.load_file_content("fixtures/test_full_response/virtualmachines.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/233/network/nics'):
data = self.load_file_content("fixtures/test_full_response/233/nics.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/233/storage/disks'):
data = self.load_file_content("fixtures/test_full_response/233/disks.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/233/storage/volumes'):
data = self.load_file_content("fixtures/test_full_response/233/volumes.json")
if url.endswith('/api/admin/enterprises/1/datacenterrepositories/2/virtualmachinetemplates/106'):
data = self.load_file_content("fixtures/test_full_response/233/virtualmachinetemplate.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/233/tags'):
data = self.load_file_content("fixtures/test_full_response/233/tags.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/233/metadata'):
data = self.load_file_content("fixtures/test_full_response/233/metadata.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/234/network/nics'):
data = self.load_file_content("fixtures/test_full_response/234/nics.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/234/storage/disks'):
data = self.load_file_content("fixtures/test_full_response/234/disks.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/234/storage/volumes'):
data = self.load_file_content("fixtures/test_full_response/234/volumes.json")
if url.endswith('/api/admin/enterprises/1/datacenterrepositories/2/virtualmachinetemplates/106'):
data = self.load_file_content("fixtures/test_full_response/234/virtualmachinetemplate.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/234/tags'):
data = self.load_file_content("fixtures/test_full_response/234/tags.json")
if url.endswith('/api/cloud/virtualdatacenters/8/virtualappliances/15/virtualmachines/234/metadata'):
data = self.load_file_content("fixtures/test_full_response/234/metadata.json")
if data is None:
raise Exception('Invalid path: ' + url)
b = bytearray()
b.extend(data.encode())
response._content = b
return response
class InventoryGeneratorCase(unittest.TestCase):
def setUp(self) -> None:
self.mock_get_patcher = patch('requests.sessions.Session.request', new_callable=ApiResponseCallable)
self.mock_get = self.mock_get_patcher.start()
def tearDown(self) -> None:
self.mock_get_patcher.stop()
def test_it_returns_two_vms_full_response(self):
url = 'https://localhost/api'
api_user = ''
api_pass = ''
api = Abiquo(
url=url,
auth=(api_user, api_pass),
verify=False
)
parameters = InventoryGeneratorParameters(
default_net_iface='nic0',
vdc_id="8",
deployed_only=True,
public_ip_only=False,
get_metadata=True
)
generator = InventoryGenerator(parameters, api)
generated_inventory = generator.generate()
expectedInventory = {
'_meta': {
'hostvars': {
'abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain': {
'ansible_host': '10.60.13.203',
'ansible_user': ''
},
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain': {
'ansible_host': '10.60.13.202',
'ansible_user': ''}
}
},
'ABQ_6feae9be-0c2b-48c6-9501-462ef8941b12': [
'abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain'],
'template_ubuntu1804': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'vapp_wordpress': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'vdc_AWX-XAAS': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'vdc_AWX-XAAS_vapp_wordpress': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'hwprof_test': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'network_External_support': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'dstier_Default_Tier': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain',
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'tag_type_wordpressdb': ['abq-6feae9be-0c2b-48c6-9501-462ef8941b12.localdomain'],
'ABQ_b93e3155-4e4a-445e-8523-e19834cf57dc': [
'abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain'],
'tag_type_wordpressvm': ['abq-b93e3155-4e4a-445e-8523-e19834cf57dc.localdomain']}
self.maxDiff = None
self.assertEqual(expectedInventory, generated_inventory)
class TestConfigProvider(TestCase):
def setUp(self) -> None:
config_parser = configparser.ConfigParser()
self.default_api_user_value = "user"
self.default_ssl_verify_value = False
config_parser.read_dict({
"auth": {
"apiuser": self.default_api_user_value,
"apipass": "pass"
},
"api": {
"ssl_verify": self.default_ssl_verify_value
}
})
self.config_provider = ConfigProvider(config_parser)
os.environ.clear()
def test_find_value_returns_env_value_if_present(self):
os.environ['APIUSER'] = 'admin'
value = self.config_provider.find_value('APIUSER', 'auth', 'apiuser')
self.assertEqual('admin', value)
def test_find_value_returns_none_if_value_is_missing(self):
value = self.config_provider.find_value('APIUSER', 'auth', 'fake')
self.assertEqual(None, value)
def test_find_value_returns_config_value_if_value_is_present(self):
value = self.config_provider.find_value('APIUSER', 'auth', 'apiuser')
self.assertEqual(self.default_api_user_value, value)
def test_find_boolean_value_returns_env_value_if_present(self):
os.environ['SSL_VERIFY'] = "true"
value = self.config_provider.find_boolean_value('SSL_VERIFY', 'api', 'ssl_verify')
self.assertEqual(True, value)
def test_find_boolean_value_returns_none_if_value_is_missing(self):
value = self.config_provider.find_boolean_value('SSL_VERIFY', 'api', 'ssl_verify')
self.assertEqual(self.default_ssl_verify_value, value)
def test_find_boolean_value_returns_config_value_if_value_is_present(self):
value = self.config_provider.find_boolean_value('SSL_VERIFY', 'api', 'ssl_verify')
self.assertEqual(False, value)
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"APIUSER",
"SSL_VERIFY"
]
| [] | ["APIUSER", "SSL_VERIFY"] | python | 2 | 0 | |
website/app.py | import os
import requests
import pymongo
import redis
import json
import binascii
import datetime
import time
import logging
import paypalrestsdk
from math import floor
import re
from functools import wraps
from requests_oauthlib import OAuth2Session
from flask import Flask, session, request, url_for, render_template, redirect, \
jsonify, flash, abort, Response
from itsdangerous import JSONWebSignatureSerializer
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get("SECRET_KEY",
"qdaopdsjDJ9u&çed&ndlnad&pjéà&jdndqld"
)
REDIS_URL = os.environ.get('REDIS_URL')
OAUTH2_CLIENT_ID = os.environ['OAUTH2_CLIENT_ID']
OAUTH2_CLIENT_SECRET = os.environ['OAUTH2_CLIENT_SECRET']
OAUTH2_REDIRECT_URI = os.environ.get('OAUTH2_REDIRECT_URI',
'http://localhost:5000/confirm_login')
API_BASE_URL = os.environ.get('API_BASE_URL', 'https://discordapp.com/api')
AUTHORIZATION_BASE_URL = API_BASE_URL + '/oauth2/authorize'
AVATAR_BASE_URL = "https://cdn.discordapp.com/avatars/"
ICON_BASE_URL = "https://cdn.discordapp.com/icons/"
CREATE_DM_URL = API_BASE_URL + '/users/@me/channels'
CREATE_MESSAGE_URL = API_BASE_URL + '/channels/{}/messages'
DEFAULT_AVATAR = "https://discordapp.com/assets/"\
"1cbd08c76f8af6dddce02c5138971129.png"
DOMAIN = os.environ.get('VIRTUAL_HOST', 'localhost:5000')
TOKEN_URL = API_BASE_URL + '/oauth2/token'
MEE6_TOKEN = os.getenv('MEE6_TOKEN')
MONGO_URL = os.environ.get('MONGO_URL')
FLASK_DEBUG = os.getenv('FLASK_DEBUG')
db = redis.Redis.from_url(REDIS_URL, decode_responses=True)
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
mongo = pymongo.MongoClient(MONGO_URL)
def strip(arg):
if type(arg) == list:
return [strip(e) for e in arg]
return arg.strip()
"""
JINJA2 Filters
"""
@app.template_filter('avatar')
def avatar(user):
if user.get('avatar'):
return AVATAR_BASE_URL + user['id'] + '/' + user['avatar'] + '.jpg'
else:
return DEFAULT_AVATAR
"""
Discord DATA logic
"""
def get_user(token):
# If it's an api_token, go fetch the discord_token
user_id = None
if token.get('api_key'):
user_id = token['user_id']
discord_token_str = db.get('user:{}:discord_token'.format(
token['user_id']
))
if not discord_token_str:
return None
token = json.loads(discord_token_str)
discord = make_session(token=token)
if user_id:
ttl = db.ttl('user:{}'.format(user_id))
if not ttl or ttl == -1:
db.delete('user:{}'.format(user_id))
cached_user = db.get('user:{}'.format(user_id))
if cached_user:
user = json.loads(cached_user)
points = db.get('user:'+user['id']+':points') or 0
user['points'] = int(points)
return user
try:
req = discord.get(API_BASE_URL + '/users/@me')
except Exception:
return None
if req.status_code != 200:
abort(req.status_code)
user = req.json()
# Saving that to the session for easy template access
session['user'] = user
# Saving that to the db
db.sadd('users', user['id'])
db.set('user:{}'.format(user['id']), json.dumps(user))
db.expire('user:{}'.format(user['id']), 30)
points = db.get('user:'+user['id']+':points') or 0
user['points'] = int(points)
return user
def get_user_guilds(token):
# If it's an api_token, go fetch the discord_token
if token.get('api_key'):
user_id = token['user_id']
discord_token_str = db.get('user:{}:discord_token'.format(
token['user_id']
))
token = json.loads(discord_token_str)
else:
user_id = get_user(token)['id']
discord = make_session(token=token)
ttl = db.ttl('user:{}:guilds'.format(user_id))
if not ttl or ttl == -1:
db.delete('user:{}:guilds'.format(user_id))
cached_guilds = db.get('user:{}:guilds'.format(user_id))
if cached_guilds:
return json.loads(cached_guilds)
req = discord.get(API_BASE_URL + '/users/@me/guilds')
if req.status_code != 200:
abort(req.status_code)
guilds = req.json()
# Saving that to the db
db.set('user:{}:guilds'.format(user_id), json.dumps(guilds))
db.expire('user:{}:guilds'.format(user_id), 30)
return guilds
def get_user_managed_servers(user, guilds):
return list(
filter(
lambda g: (g['owner'] is True) or
bool((int(g['permissions']) >> 5) & 1),
guilds)
)
"""
CRSF Security
"""
@app.before_request
def csrf_protect():
if request.method == "POST":
token = session.pop('_csrf_token', None)
if not token or token != request.form.get('_csrf_token'):
abort(403)
def generate_csrf_token():
if '_csrf_token' not in session:
session['_csrf_token'] = str(binascii.hexlify(os.urandom(15)))
return session['_csrf_token']
app.jinja_env.globals['csrf_token'] = generate_csrf_token
"""
AUTH logic
"""
def token_updater(discord_token):
user = get_user(discord_token)
# Save the new discord_token
db.set('user:{}:discord_token'.format(user['id']),
json.dumps(discord_token))
def make_session(token=None, state=None, scope=None):
return OAuth2Session(
client_id=OAUTH2_CLIENT_ID,
token=token,
state=state,
scope=scope,
redirect_uri=OAUTH2_REDIRECT_URI,
auto_refresh_kwargs={
'client_id': OAUTH2_CLIENT_ID,
'client_secret': OAUTH2_CLIENT_SECRET,
},
auto_refresh_url=TOKEN_URL,
token_updater=token_updater
)
@app.route('/login')
def login():
scope = ['identify', 'guilds']
discord = make_session(scope=scope)
authorization_url, state = discord.authorization_url(
AUTHORIZATION_BASE_URL,
access_type="offline"
)
session['oauth2_state'] = state
return redirect(authorization_url)
@app.route('/confirm_login')
def confirm_login():
# Check for state and for 0 errors
state = session.get('oauth2_state')
if not state or request.values.get('error'):
return redirect(url_for('index'))
# Fetch token
discord = make_session(state=state)
discord_token = discord.fetch_token(
TOKEN_URL,
client_secret=OAUTH2_CLIENT_SECRET,
authorization_response=request.url)
if not discord_token:
return redirect(url_for('index'))
# Fetch the user
user = get_user(discord_token)
if not user:
return redirect(url_for('logout'))
# Generate api_key from user_id
serializer = JSONWebSignatureSerializer(app.config['SECRET_KEY'])
api_key = str(serializer.dumps({'user_id': user['id']}))
# Store api_key
db.set('user:{}:api_key'.format(user['id']), api_key)
# Store token
db.set('user:{}:discord_token'.format(user['id']),
json.dumps(discord_token))
# Store api_token in client session
api_token = {
'api_key': api_key,
'user_id': user['id']
}
session.permanent = True
session['api_token'] = api_token
return redirect(url_for('select_server'))
def require_auth(f):
@wraps(f)
def wrapper(*args, **kwargs):
# Does the user have an api_token?
api_token = session.get('api_token')
if api_token is None:
return redirect(url_for('login'))
# Does his api_key is in the db?
user_api_key = db.get('user:{}:api_key'.format(api_token['user_id']))
if user_api_key != api_token['api_key']:
return redirect(url_for('logout'))
return f(*args, **kwargs)
return wrapper
@app.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
@app.route('/recovery')
@require_auth
def recovery():
return render_template('recovery.html')
@app.route('/recovery-confirm', methods=['POST'])
@require_auth
def recovery_confirm():
email = request.form.get('email')
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
if not email:
flash('Please enter an email', 'warning')
return redirect(url_for('recovery'))
email = email.lower()
amount = db.get('recover:'+email)
if not amount:
flash('Sorry we didn\'t find any of your contributions. Or we already gave you back your points.'
'If you think it\'s a mistake, please join our support discord'
' server and send a private message to Jackson', 'warning')
return redirect(url_for('recovery'))
amount = int(amount)
points = amount*100
db.set('user:'+user['id']+':points', user['points']+points)
db.delete('recover:'+email)
flash('We gave you back your '+str(points)+' points. You can now purchase back the '
'Music potion.', 'success')
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
return redirect(url_for('recovery'))
"""
DISCORD RELATED PARSERS
"""
def typeahead_members(_members):
members = []
for m in _members:
user = {
'username': m['user']['username']+'#'+m['user']['discriminator'],
'name': m['user']['username'],
}
if m['user']['avatar']:
user['image'] = 'https://cdn.discordapp.com/'\
'avatars/{}/{}.jpg'.format(
m['user']['id'],
m['user']['avatar']
)
else:
user['image'] = url_for('static', filename='img/no_logo.png')
members.append(user)
return members
def get_mention_parser(server_id, members=None, guild=None):
_members = members
if members is None:
_members = get_guild_members(server_id)
__members = {}
for member in _members:
key = '<@{}>'.format(member['user']['id'])
__members[key] = '@{}#{}'.format(member['user']['username'],
member['user']['discriminator'])
member_pattern = r'(<@[0-9]*>)'
def members_repl(k):
key = k.groups()[0]
val = __members.get(key)
if val:
return val
return key
channels = get_guild_channels(server_id)
__channels = {'<#{}>'.format(c['id']): '#'+c['name'] for c in channels}
channel_pattern = r'(<#[0-9]*>)'
def channels_repl(k):
key = k.groups()[0]
val = __channels.get(key)
if val:
return val
return key
guild = guild or get_guild(server_id)
emojis = guild['emojis']
__emojis = {'<:'+e['name']+':'+e['id']+'>': ':'+e['name']+':' for e in emojis}
emoji_pattern = r'(<:[A-Za-z0-9_-]*:[0-9]*>)'
def emoji_repl(k):
key = k.groups()[0]
val = __emojis.get(key)
if val:
return val
return key
roles = [role for role in guild['roles'] if role['mentionable']]
__roles = {'<@&'+r['id']+'>': '@'+r['name'] for r in roles}
role_pattern = r'(<@&[0-9]*>)'
def role_repl(k):
key = k.groups()[0]
val = __roles.get(key)
if val:
return val
return key
def func(string):
string = re.sub(member_pattern, members_repl, string)
string = re.sub(channel_pattern, channels_repl, string)
string = re.sub(emoji_pattern, emoji_repl, string)
string = re.sub(role_pattern, role_repl, string)
return string
return func
def get_mention_decoder(server_id, members=None):
_members = members
if members is None:
_members = get_guild_members(server_id)
members = {}
for member in _members:
key = member['user']['username']+'#'+member['user']['discriminator']
members[key] = "<@{}>".format(member['user']['id'])
member_pattern = r'@(((?!@).)*?#[0-9]{4})'
def members_repl(k):
key = k.groups()[0]
val = members.get(key)
if val:
return val
return '@'+key
channels = get_guild_channels(server_id)
__channels = {c['name']: '<#{}>'.format(c['id']) for c in channels}
channel_pattern = r'#(((?!(#| )).)*)'
def channels_repl(k):
key = k.groups()[0]
val = __channels.get(key)
if val:
return val
return '#'+key
guild = get_guild(server_id)
emojis = guild['emojis']
__emojis = {e['name']: '<:'+e['name']+':'+e['id']+'>' for e in emojis}
emoji_pattern = r':([A-Za-z0-9_-]*):'
def emoji_repl(k):
key = k.groups()[0]
val = __emojis.get(key)
if val:
return val
return ':' + key + ':'
roles = [role for role in guild['roles'] if role['mentionable']]
__roles = {r['name']: '<@&'+r['id']+'>' for r in roles}
role_pattern = r'@(((?!(@| |#)).)*)'
def role_repl(k):
key = k.groups()[0]
val = __roles.get(key)
if val:
return val
return '@' + key
def func(string):
string = re.sub(member_pattern, members_repl, string)
string = re.sub(channel_pattern, channels_repl, string)
string = re.sub(emoji_pattern, emoji_repl, string)
string = re.sub(role_pattern, role_repl, string)
return string
return func
"""
STATIC pages
"""
@app.route('/')
def index():
return render_template('index.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/thanks')
@require_auth
def thanks():
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
return render_template('thanks.html',
points=user['points'])
@app.route('/debug_token')
def debug_token():
if not session.get('api_token'):
return jsonify({'error': 'no api_token'})
token = db.get('user:{}:discord_token'.format(
session['api_token']['user_id']
))
return token
MSG = """
Hi `{username}` :wave: :smile: ! Thanks for adding me to your discord server `{guild_name}` :tada: :smile: .
In order for me to work in your server, you have to enable **plugins** in your **server dashboard**
-> :satellite_orbital: <https://ctfegame.com/dashboard/{guild_id}>.
---------
:warning: **The Commands Plugin** will let you add and manage custom commands in your server
:alarm_clock: **The Timers Plugin** will let you send messages at specific interval of time
:first_place: **The Levels Plugin** will let your server members gain XP and LEVELS by participating in the chat
:hammer_pick: **The Moderator Plugin** will give you some cool moderator commands like !clear, !mute or !slowmode
:musical_note: **The Music Plugin** will let you listen to any music you want with your server members
:mag_right: **The Search Plugin** has some nice search commands like !youtube, !imgur or !urban
:movie_camera: **The Twitch Plugin** will let you notify your server members whenever your favourite twitch streamers are live
:notebook: **The Reddit Plugin** will let you notify your server members whenever a post is sent to your favourite subbredits
:wave: **The Welcome Plugin** will let you welcome members that have joined your server
---------
To enable those plugin, go to your server dashboard here ->
:satellite_orbital: <https://ctfegame.com/dashboard/{guild_id}>
If you need any more help, feel free to join our **support server**. (Click on Support in the top of our website)
"""
def send_join_announce(guild_id, user):
guild = get_guild(guild_id)
msg = MSG.format(username=user['username'],
guild_name=guild['name'],
guild_id=guild_id)
r = requests.post(CREATE_DM_URL,
headers={'Authorization': 'Bot '+MEE6_TOKEN},
json={'recipient_id': user['id']})
if r.status_code >= 300:
return
channel_id = r.json()['id']
requests.post(CREATE_MESSAGE_URL.format(channel_id),
headers={'Authorization': 'Bot '+MEE6_TOKEN},
json={'content': msg})
@app.route('/servers')
@require_auth
def select_server():
guild_id = request.args.get('guild_id')
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
if guild_id:
send_join_announce(guild_id, user)
return redirect(url_for('dashboard', server_id=int(guild_id),
force=1))
guilds = get_user_guilds(session['api_token'])
user_servers = sorted(
get_user_managed_servers(user, guilds),
key=lambda s: s['name'].lower()
)
return render_template('select-server.html',
user=user, user_servers=user_servers)
def get_invite_link(server_id):
url = "https://discordapp.com/oauth2/authorize?&client_id={}"\
"&scope=bot&permissions={}&guild_id={}&response_type=code"\
"&redirect_uri=http://{}/servers".format(OAUTH2_CLIENT_ID,
'66321471',
server_id,
DOMAIN)
return url
def server_check(f):
@wraps(f)
def wrapper(*args, **kwargs):
if request.args.get('force'):
return f(*args, **kwargs)
server_id = kwargs.get('server_id')
if not db.sismember('servers', server_id):
url = get_invite_link(server_id)
return redirect(url)
return f(*args, **kwargs)
return wrapper
ADMINS = ['296084893459283968']
def require_bot_admin(f):
@wraps(f)
def wrapper(*args, **kwargs):
server_id = kwargs.get('server_id')
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
guilds = get_user_guilds(session['api_token'])
user_servers = get_user_managed_servers(user, guilds)
if user['id'] not in ADMINS and str(server_id) not in map(lambda g: g['id'], user_servers):
return redirect(url_for('select_server'))
return f(*args, **kwargs)
return wrapper
def my_dash(f):
# tfw when elixir's |>...
return require_auth(require_bot_admin(server_check(f)))
def plugin_method(f):
return my_dash(f)
def plugin_page(plugin_name, buff=None):
def decorator(f):
@require_auth
@require_bot_admin
@server_check
@wraps(f)
def wrapper(server_id):
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
if buff:
not_buff = db.get('buffs:'+str(server_id)+':'+buff) is None
if not_buff:
db.srem('plugins:{}'.format(server_id), plugin_name)
db.srem('plugin.{}.guilds'.format(plugin_name), server_id)
return redirect(url_for('shop', server_id=server_id))
disable = request.args.get('disable')
if disable:
db.srem('plugins:{}'.format(server_id), plugin_name)
db.srem('plugin.{}.guilds'.format(plugin_name), server_id)
return redirect(url_for('dashboard', server_id=server_id))
db.sadd('plugins:{}'.format(server_id), plugin_name)
db.sadd('plugin.{}.guilds'.format(plugin_name), server_id)
server = get_guild(server_id)
enabled_plugins = db.smembers('plugins:{}'.format(server_id))
ignored = db.get('user:{}:ignored'.format(user['id']))
notification = not ignored
return render_template(
f.__name__.replace('_', '-') + '.html',
server=server,
enabled_plugins=enabled_plugins,
notification=notification,
**f(server_id)
)
return wrapper
return decorator
@app.route('/dashboard/<int:server_id>')
@my_dash
def dashboard(server_id):
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
guild = get_guild(server_id)
if guild is None:
return redirect(get_invite_link(server_id))
enabled_plugins = db.smembers('plugins:{}'.format(server_id))
ignored = db.get('user:{}:ignored'.format(user['id']))
notification = not ignored
buffs_base = 'buffs:'+guild['id']+':'
music_buff = {'name': 'music',
'active': db.get(buffs_base+'music')
is not None,
'remaining': db.ttl(buffs_base+'music')}
guild['buffs'] = [music_buff]
return render_template('dashboard.html',
server=guild,
enabled_plugins=enabled_plugins,
notification=notification)
@app.route('/dashboard/<int:server_id>/member-list')
@my_dash
def member_list(server_id):
import io
import csv
members = get_guild_members(server_id)
if request.args.get('csv'):
output = io.StringIO()
writer = csv.writer(output)
writer.writerow(["username", "discriminator"])
for m in members:
writer.writerow([m['user']['username'],
m['user']['discriminator']])
return Response(output.getvalue(),
mimetype="text/csv",
headers={"Content-disposition": "attachement; file"
"name=guild_{}.csv".format(server_id)})
else:
return jsonify({"members": members})
@app.route('/dashboard/notification/<int:server_id>')
@my_dash
def notification(server_id):
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
ignored = db.get('user:{}:ignored'.format(user['id']))
if ignored:
db.delete('user:{}:ignored'.format(user['id']))
else:
db.set('user:{}:ignored'.format(user['id']), '1')
return redirect(url_for('dashboard', server_id=server_id))
def get_guild(server_id):
headers = {'Authorization': 'Bot '+MEE6_TOKEN}
r = requests.get(API_BASE_URL+'/guilds/{}'.format(server_id),
headers=headers)
if r.status_code == 200:
return r.json()
return None
def get_guild_members(server_id):
headers = {'Authorization': 'Bot '+MEE6_TOKEN}
members = []
ttl = db.ttl('guild:{}:members'.format(server_id))
if not ttl or ttl == -1:
db.delete('guild:{}:members'.format(server_id))
cached_members = db.get('guild:{}:members'.format(server_id))
if cached_members:
return json.loads(cached_members)
# Quick fix for huge guilds
# preventing a timeout from the app
MAX_MEMBERS = 3000
while True:
params = {'limit': 1000}
if len(members):
params['after'] = members[-1]['user']['id']
url = API_BASE_URL + '/guilds/{}/members'.format(server_id)
r = requests.get(url,
params=params,
headers=headers)
if r.status_code == 200:
chunk = r.json()
members += chunk
if chunk == [] or len(members) >= MAX_MEMBERS:
break
else:
break
db.set('guild:{}:members'.format(server_id), json.dumps(members))
db.expire('guild:{}:members'.format(server_id), 300)
return members
def get_guild_channels(server_id, voice=True, text=True):
headers = {'Authorization': 'Bot '+MEE6_TOKEN}
r = requests.get(API_BASE_URL+'/guilds/{}/channels'.format(server_id),
headers=headers)
if r.status_code == 200:
channels = r.json()
if not voice:
channels = list(filter(lambda c: c['type'] != 'voice',
channels))
if not text:
channels = list(filter(lambda c: c['type'] != 'text', channels))
return channels
return None
"""
Shop
"""
BUFFS = {'music30': {'name': 'music30',
'buff_name': 'music',
'fancy_name': 'Music Plugin',
'description': 'Enables the music plugin in your server'
' for 30 days.',
'price': 500,
'duration': 3600*24*30},
'musicinfinite': {'name': 'musicinfinite',
'buff_name': 'music',
'fancy_name': 'Infinite Music Plugin',
'description': 'Enabled the music plugin in your'
' server for life!',
'price': 2500,
'duration': -1}}
@app.route('/dashboard/<int:server_id>/shop')
@my_dash
def shop(server_id):
last_buys = db.lrange('shop:buys', 0, 10) or []
last_buys = list(map(json.loads, last_buys))
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
is_earlybacker = user['id'] in db.smembers('early_backers')
is_elligible = is_earlybacker and user['id'] not in db.smembers('eb_served')
guilds = get_user_guilds(session['api_token'])
server = list(filter(lambda g: g['id'] == str(server_id), guilds))[0]
enabled_plugins = db.smembers('plugins:{}'.format(server_id))
ignored = db.get('user:{}:ignored'.format(user['id']))
notification = not ignored
return render_template('shop.html',
last_buys=last_buys,
server=server,
is_elligible=is_elligible,
enabled_plugins=enabled_plugins,
notification=notification)
@app.route('/dashboard/<int:server_id>/buy')
@plugin_method
def buy(server_id):
item = request.args.get('item')
if not item or item not in BUFFS.keys():
return redirect(url_for('index'))
item = BUFFS[item]
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
# EARLY BACKER PROMO
eb_promo = request.args.get('eb_promo')
if eb_promo:
is_earlybacker = user['id'] in db.smembers('early_backers')
is_elligible = is_earlybacker and user['id'] \
not in db.smembers('eb_served')
if not is_elligible:
return redirect(url_for('index'))
item_key = 'buffs:'+str(server_id)+':music'
db.set(item_key, "1")
db.sadd('eb_served', user['id'])
flash('Thanks for your purchase!', 'success')
return redirect(url_for('shop', server_id=server_id))
if user['points'] < item['price']:
flash('Sorry you don\'t have enough points to buy that :(', 'warning')
return redirect(url_for('shop', server_id=server_id))
item_key = 'buffs:'+str(server_id)+':'+BUFFS[item['name']]['buff_name']
item_buff = db.get(item_key)
if not item_buff:
db.set(item_key, "1")
if item['duration'] != -1:
db.expire(item_key, item['duration'])
else:
item_buff_ttl = db.ttl(item_key)
if not item_buff_ttl:
flash('You already have that buff!', 'warning')
return redirect(url_for('shop', server_id=server_id))
else:
db.set(item_key, "1")
if item['duration'] != -1:
db.expire(item_key, item_buff_ttl+item['duration'])
db.set('user:'+user['id']+':points', user['points']-item['price'])
purchase = {"item": item,
"buyer": {"id": user['id'],
"name": user['username'],
"discriminator": user['discriminator']},
"guild_id": str(server_id),
"timestamp": time.time()}
db.lpush("shop:buys", json.dumps(purchase))
db.lpush("shop:buys:{}".format(server_id), json.dumps(purchase))
flash('Thanks for your purchase!', 'success')
return redirect(url_for('shop', server_id=server_id))
@app.route('/donate')
@require_auth
def donate():
return render_template('donate.html')
@app.route('/checkout', methods=['POST'])
@require_auth
def checkout():
donation_amount = request.form.get('amount')
if not donation_amount:
return redirect(url_for('index'))
donation_amount = "{0:.2f}".format(float(donation_amount))
payer = {"payment_method": "paypal"}
items = [{"name": "Mee6 Contribution",
"price": donation_amount,
"currency": "EUR",
"quantity": "1"}]
amount = {"total": donation_amount,
"currency": "EUR"}
description = "Contribute to the Mee6 Bot project!"
redirect_urls = {"return_url": "http://" + DOMAIN +
url_for('checkout_confirm') + "?success=true",
"cancel_url": "http://" + DOMAIN + url_for('index')}
payment = paypalrestsdk.Payment({"intent": "sale",
"payer": payer,
"redirect_urls": redirect_urls,
"transactions": [{"item_list": {"items":
items},
"amount": amount,
"description":
description}]})
if payment.create():
for link in payment.links:
if link['method'] == "REDIRECT":
return redirect(link["href"])
return redirect(url_for('index'))
@app.route("/checkout-confirm")
@require_auth
def checkout_confirm():
if not request.args.get('success'):
return redirect(url_for('index'))
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
payment = paypalrestsdk.Payment.find(request.args.get('paymentId'))
if payment.execute({"payer_id": request.args.get('PayerID')}):
amount = float(payment.transactions[0]["amount"]["total"])
points = int(amount * 100)
new_points = user['points'] + points
db.set('user:'+user['id']+':points', new_points)
return redirect(url_for('thanks'))
else:
return redirect(url_for('index'))
"""
Command Plugin
"""
@app.route('/dashboard/<int:server_id>/commands')
@plugin_page('Commands')
def plugin_commands(server_id):
commands = []
commands_names = db.smembers('Commands.{}:commands'.format(server_id))
_members = get_guild_members(server_id)
guild = get_guild(server_id)
mention_parser = get_mention_parser(server_id, _members, guild)
members = typeahead_members(_members)
for cmd in commands_names:
message = db.get('Commands.{}:command:{}'.format(server_id, cmd))
message = mention_parser(message)
command = {
'name': cmd,
'message': message
}
commands.append(command)
commands = sorted(commands, key=lambda k: k['name'])
return {
'guild_roles': guild['roles'],
'guild_members': members,
'commands': commands
}
@app.route('/dashboard/<int:server_id>/commands/add', methods=['POST'])
@plugin_method
def add_command(server_id):
cmd_name = request.form.get('cmd_name', '')
cmd_message = request.form.get('cmd_message', '')
guild = get_guild(server_id)
mention_decoder = get_mention_decoder(server_id)
cmd_message = mention_decoder(cmd_message)
edit = cmd_name in db.smembers('Commands.{}:commands'.format(server_id))
cb = url_for('plugin_commands', server_id=server_id)
if len(cmd_name) == 0 or len(cmd_name) > 15:
flash('A command name should be between 1 and 15 character long !',
'danger')
elif not edit and not re.match("^[A-Za-z0-9_-]*$", cmd_name):
flash('A command name should only contain '
'letters from a to z, numbers, _ or -', 'danger')
elif len(cmd_message) == 0 or len(cmd_message) > 2000:
flash('A command message should be between '
'1 and 2000 character long !', 'danger')
else:
if not edit:
cmd_name = '!'+cmd_name
db.sadd('Commands.{}:commands'.format(server_id), cmd_name)
db.set('Commands.{}:command:{}'.format(server_id, cmd_name),
cmd_message)
if edit:
flash('Command {} edited !'.format(cmd_name), 'success')
else:
flash('Command {} added !'.format(cmd_name), 'success')
return redirect(cb)
@app.route('/dashboard/<int:server_id>/commands/<string:command>/delete')
@plugin_method
def delete_command(server_id, command):
db.srem('Commands.{}:commands'.format(server_id), command)
db.delete('Commands.{}:command:{}'.format(server_id, command))
flash('Command {} deleted !'.format(command), 'success')
return redirect(url_for('plugin_commands', server_id=server_id))
"""
Timers Plugin
"""
@app.route('/dashboard/<int:server_id>/timers')
@plugin_page('Timers')
def plugin_timers(server_id):
_members = get_guild_members(server_id)
guild = get_guild(server_id)
guild_channels = get_guild_channels(server_id, voice=False)
mention_parser = get_mention_parser(server_id, _members, guild)
members = typeahead_members(_members)
config = timers.get_config(server_id)
ts = []
for timer in config['timers']:
ts.append(timer)
ts[-1]['message'] = mention_parser(ts[-1]['message'])
ts[-1]['interval'] //= 60
return {
'guild_roles': guild['roles'],
'guild_members': members,
'guild_channels': guild_channels,
'timers': ts,
}
@app.route('/dashboard/<int:server_id>/timers/add', methods=['post'])
@plugin_method
def add_timer(server_id):
interval = request.form.get('interval', '')
message = request.form.get('message', '')
channel = request.form.get('channel', '')
guild = get_guild(server_id)
mention_decoder = get_mention_decoder(server_id)
message = mention_decoder(message)
config = timers.get_config(server_id)
cb = url_for('plugin_timers', server_id=server_id)
if len(config['timers']) >= 5:
flash('You cannot have more than 5 timers running', 'danger')
return redirect(cb)
try:
interval = int(interval)
except ValueError as e:
flash('The interval should be an integer number', 'danger')
return redirect(cb)
if interval <= 0:
flash('The interval should be a positive number', 'danger')
return redirect(cb)
if len(message) > 2000:
flash('The message should not be longer than 2000 characters', 'danger')
return redirect(cb)
if len(message) == 0:
flash('The message should not be empty', 'danger')
return redirect(cb)
t = {'channel': channel, 'interval': interval * 60,
'message': message}
config['timers'].append(t)
timers.patch_config(server_id, config)
flash('Timer added!', 'success')
return redirect(cb)
@app.route('/dashboard/<int:server_id>/timers/<int:timer_index>/update', methods=['post'])
@plugin_method
def update_timer(server_id, timer_index):
interval = request.form.get('interval', '')
message = request.form.get('message', '')
channel = request.form.get('channel', '')
mention_decoder = get_mention_decoder(server_id)
message = mention_decoder(message)
config = timers.get_config(server_id)
cb = url_for('plugin_timers', server_id=server_id)
try:
interval = int(interval)
except ValueError as e:
flash('The interval should be an integer number', 'danger')
return redirect(cb)
if interval <= 0:
flash('The interval should be a positive number', 'danger')
return redirect(cb)
if len(message) > 2000:
flash('The message should not be longer than 2000 characters', 'danger')
return redirect(cb)
if len(message) == 0:
flash('The message should not be empty', 'danger')
return redirect(cb)
t = {'channel': channel, 'interval': interval * 60,
'message': message}
config['timers'][timer_index-1] = t
timers.patch_config(server_id, config)
flash('Timer modified!', 'success')
return redirect(cb)
@app.route('/dashboard/<int:server_id>/commands/<int:timer_index>/delete')
@plugin_method
def delete_timer(server_id, timer_index):
config = timers.get_config(server_id)
del config['timers'][timer_index - 1]
timers.patch_config(server_id, config)
flash('Timer deleted!', 'success')
return redirect(url_for('plugin_timers', server_id=server_id))
"""
Help Plugin
"""
@app.route('/dashboard/<int:server_id>/help')
@plugin_page('Help')
def plugin_help(server_id):
if db.get('Help.{}:whisp'.format(server_id)):
whisp = "1"
else:
whisp = None
return {
"whisp": whisp
}
@app.route('/dashboard/<int:server_id>/update_help', methods=['POST'])
@plugin_method
def update_help(server_id):
whisp = request.form.get('whisp')
db.delete('Help.{}:whisp'.format(server_id))
if whisp:
db.set('Help.{}:whisp'.format(server_id), "1")
flash('Plugin updated!', 'success')
return redirect(url_for('plugin_help', server_id=server_id))
"""
Levels Plugin
"""
@app.route('/dashboard/<int:server_id>/levels')
@plugin_page('Levels')
def plugin_levels(server_id):
initial_announcement = 'GG {player}, '\
'you just advanced to **level {level}** !'
announcement_enabled = db.get('Levels.{}:announcement_enabled'.format(
server_id))
whisp = db.get('Levels.{}:whisp'.format(server_id))
announcement = db.get('Levels.{}:announcement'.format(server_id))
if announcement is None:
db.set('Levels.{}:announcement'.format(server_id), initial_announcement)
db.set('Levels.{}:announcement_enabled'.format(server_id), '1')
announcement_enabled = '1'
announcement = db.get('Levels.{}:announcement'.format(server_id))
db_banned_roles = db.smembers('Levels.{}:banned_roles'.format(server_id))\
or []
guild = get_guild(server_id)
guild_roles = list(filter(lambda r: not r['managed'], guild['roles']))
banned_roles = list(filter(
lambda r: r['name'] in db_banned_roles or r['id'] in db_banned_roles,
guild_roles
))
reward_roles = list(map(
lambda r: {'name': r['name'],
'id': r['id'],
'color': hex(r['color']).split('0x')[1],
'level': int(db.get('Levels.{}:reward:{}'.format(
server_id,
r['id'])) or 0)
},
guild_roles
))
cooldown = db.get('Levels.{}:cooldown'.format(server_id)) or 0
return {
'announcement': announcement,
'announcement_enabled': announcement_enabled,
'banned_roles': banned_roles,
'guild_roles': guild_roles,
'reward_roles': reward_roles,
'cooldown': cooldown,
'whisp': whisp
}
@app.route('/dashboard/<int:server_id>/levels/update', methods=['POST'])
@plugin_method
def update_levels(server_id):
banned_roles = request.form.get('banned_roles').split(',')
announcement = request.form.get('announcement')
enable = request.form.get('enable')
whisp = request.form.get('whisp')
cooldown = request.form.get('cooldown')
for k, v in request.form.items():
if k.startswith('rolereward_'):
db.set('Levels.{}:reward:{}'.format(
server_id,
k.split('_')[1]),
v)
try:
cooldown = int(cooldown)
except ValueError:
flash('The cooldown that you provided isn\'t an integer!', 'warning')
return redirect(url_for('plugin_levels', server_id=server_id))
if announcement == '' or len(announcement) > 2000:
flash('The level up announcement'
' could not be empty or have 2000+ characters.', 'warning')
else:
db.set('Levels.{}:announcement'.format(server_id), announcement)
db.set('Levels.{}:cooldown'.format(server_id), cooldown)
db.delete('Levels.{}:banned_roles'.format(server_id))
if len(banned_roles) > 0:
db.sadd('Levels.{}:banned_roles'.format(server_id), *banned_roles)
if enable:
db.set('Levels.{}:announcement_enabled'.format(server_id), '1')
else:
db.delete('Levels.{}:announcement_enabled'.format(server_id))
if whisp:
db.set('Levels.{}:whisp'.format(server_id), '1')
else:
db.delete('Levels.{}:whisp'.format(server_id))
flash('Settings updated ;) !', 'success')
def get_level_xp(n):
return 5*(n**2)+50*n+100
def get_level_from_xp(xp):
remaining_xp = int(xp)
level = 0
while remaining_xp >= get_level_xp(level):
remaining_xp -= get_level_xp(level)
level += 1
return level
@app.route('/levels/<int:server_id>')
def levels(server_id):
is_admin = False
num = int(request.args.get('limit', 100))
if session.get('api_token'):
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
user_servers = get_user_managed_servers(
user,
get_user_guilds(session['api_token'])
)
is_admin = str(server_id) in list(map(lambda s: s['id'], user_servers))
server_check = str(server_id) in db.smembers('servers')
if not server_check:
return redirect(url_for('index'))
plugin_check = 'Levels' in db.smembers('plugins:{}'.format(server_id))
if not plugin_check:
return redirect(url_for('index'))
server = {
'id': server_id,
'icon': db.get('server:{}:icon'.format(server_id)),
'name': db.get('server:{}:name'.format(server_id))
}
guild = get_guild(server_id) or {}
roles = guild.get('roles', [])
from collections import defaultdict
reward_roles = defaultdict(list)
reward_levels = []
for role in roles:
level = int(db.get('Levels.{}:reward:{}'.format(
server_id,
role['id'])) or 0)
if level == 0:
continue
reward_levels.append(level)
role['color'] = hex(role['color']).split('0x')[1]
reward_roles[level].append(
role
)
reward_levels = list(sorted(set(reward_levels)))
_players = db.sort('Levels.{}:players'.format(server_id),
by='Levels.{}:player:*:xp'.format(server_id),
get=[
'Levels.{}:player:*:xp'.format(server_id),
'Levels.{}:player:*:name'.format(server_id),
'Levels.{}:player:*:avatar'.format(server_id),
'Levels.{}:player:*:discriminator'.format(server_id),
'#'
],
start=0,
num=num,
desc=True)
players = []
for i in range(0, len(_players), 5):
if not _players[i]:
continue
total_xp = int(_players[i])
lvl = get_level_from_xp(total_xp)
lvl_xp = get_level_xp(lvl)
x = 0
for l in range(0, lvl):
x += get_level_xp(l)
remaining_xp = int(total_xp - x)
player = {
'total_xp': int(_players[i]),
'xp': remaining_xp,
'lvl_xp': lvl_xp,
'lvl': lvl,
'xp_percent': floor(100*(remaining_xp)/lvl_xp),
'name': _players[i+1],
'avatar': _players[i+2],
'discriminator': _players[i+3],
'id': _players[i+4]
}
players.append(player)
json_format = request.args.get('json')
if json_format:
return jsonify({'server': server,
'reward_roles': reward_roles,
'players': players})
return render_template(
'levels.html',
small_title="Leaderboard",
is_admin=is_admin,
players=players,
server=server,
reward_roles=reward_roles,
reward_levels=reward_levels,
title="{} leaderboard - Mee6 bot".format(server['name'])
)
@app.route('/levels/reset/<int:server_id>/<int:player_id>')
@plugin_method
def reset_player(server_id, player_id):
csrf = session.pop('_csrf_token', None)
if not csrf or csrf != request.args.get('csrf'):
abort(403)
db.delete('Levels.{}:player:{}:xp'.format(server_id, player_id))
db.delete('Levels.{}:player:{}:lvl'.format(server_id, player_id))
db.srem('Levels.{}:players'.format(server_id), player_id)
## Send update to webhook
embed = MessageEmbed()
embed.title = 'XP reset {}'.format(server_id)
user = get_user(session['api_token'])
for k, v in user.items():
if type(v) == str:
embed.add_field('resetter_user_' + k, v, True)
embed.description = '**PLAYER {} XP GOT RESET**'.format(player_id)
embed.color = 0x008cba
message = 'XP RESET'
try:
send_message('346460166184763393', message, embed=embed)
finally:
return redirect(url_for('levels', server_id=server_id))
@app.route('/levels/reset_all/<int:server_id>')
@plugin_method
def reset_all_players(server_id):
csrf = session.pop('_csrf_token', None)
if not csrf or csrf != request.args.get('csrf'):
abort(403)
for player_id in db.smembers('Levels.{}:players'.format(server_id)):
db.delete('Levels.{}:player:{}:xp'.format(server_id, player_id))
db.delete('Levels.{}:player:{}:lvl'.format(server_id, player_id))
db.srem('Levels.{}:players'.format(server_id), player_id)
## Send update to webhook
embed = MessageEmbed()
embed.title = 'XP reset ALL {}'.format(server_id)
user = get_user(session['api_token'])
for k, v in user.items():
if type(v) == str:
embed.add_field('resetter_user_' + k, v, True)
embed.description = '**ALL PLAYERS XP GOT RESET**'
embed.color = 0x008cba
message = 'XP RESET ALL'
try:
send_message('346460166184763393', message, embed=embed)
finally:
return redirect(url_for('levels', server_id=server_id))
"""
Welcome Plugin
"""
@app.route('/dashboard/<int:server_id>/welcome')
@plugin_page('Welcome')
def plugin_welcome(server_id):
_members = get_guild_members(server_id)
db_welcome_roles = db.smembers('Welcome.{}:welcome_roles'.format(server_id))
guild = get_guild(server_id)
guild_roles = guild['roles']
welcome_roles = filter(
lambda r: r['id'] in db_welcome_roles,
guild_roles
)
mention_parser = get_mention_parser(server_id, _members, guild)
members = typeahead_members(_members)
initial_welcome = '{user}, Welcome to **{server}**!'\
' Have a great time here :wink: !'
initial_gb = '**{user}** just left **{server}**. Bye bye **{user}**...'
welcome_message = db.get('Welcome.{}:welcome_message'.format(server_id))
private = db.get('Welcome.{}:private'.format(server_id)) or None
gb_message = db.get('Welcome.{}:gb_message'.format(server_id))
db_welcome_channel = db.get('Welcome.{}:channel_name'.format(server_id))
guild_channels = get_guild_channels(server_id, voice=False)
gb_enabled = db.get('Welcome.{}:gb_disabled'.format(server_id)) \
is None
welcome_channel = None
for channel in guild_channels:
if channel['name'] == db_welcome_channel or \
channel['id'] == db_welcome_channel:
welcome_channel = channel
break
if welcome_message is None:
db.set('Welcome.{}:welcome_message'.format(server_id), initial_welcome)
welcome_message = initial_welcome
if gb_message is None:
db.set('Welcome.{}:gb_message'.format(server_id), initial_gb)
gb_message = initial_gb
welcome_message = mention_parser(welcome_message)
gb_message = mention_parser(gb_message)
return {
'guild_members': members,
'guild_roles': guild_roles,
'welcome_roles': welcome_roles,
'welcome_message': welcome_message,
'private': private,
'gb_message': gb_message,
'guild_channels': guild_channels,
'gb_enabled': gb_enabled,
'welcome_channel': welcome_channel
}
@app.route('/dashboard/<int:server_id>/welcome/update', methods=['POST'])
@plugin_method
def update_welcome(server_id):
mention_decoder = get_mention_decoder(server_id)
welcome_message = request.form.get('welcome_message')
welcome_message = mention_decoder(welcome_message)
private = request.form.get('private')
gb_message = request.form.get('gb_message')
gb_message = mention_decoder(gb_message)
gb_enabled = request.form.get('gb_enabled')
channel = request.form.get('channel')
welcome_roles = request.form.get('welcome_roles', '').split(',')
db.delete('Welcome.{}:welcome_roles'.format(server_id))
for role in welcome_roles:
db.sadd('Welcome.{}:welcome_roles'.format(server_id), role)
if gb_enabled:
db.delete('Welcome.{}:gb_disabled'.format(server_id))
else:
db.set('Welcome.{}:gb_disabled'.format(server_id), "1")
if private:
db.set('Welcome.{}:private'.format(server_id), "1")
else:
db.delete('Welcome.{}:private'.format(server_id))
if welcome_message == '' or len(welcome_message) > 2000:
flash('The welcome message cannot be empty or have 2000+ characters.',
'warning')
else:
if gb_message == '' or len(gb_message) > 2000:
flash('The good bye message cannot be empty'
' or have 2000+ characters.', 'warning')
else:
db.set('Welcome.{}:welcome_message'.format(server_id),
welcome_message)
db.set('Welcome.{}:gb_message'.format(server_id), gb_message)
db.set('Welcome.{}:channel_name'.format(server_id), channel)
flash('Settings updated ;) !', 'success')
return redirect(url_for('plugin_welcome', server_id=server_id))
"""
Search
"""
SEARCH_COMMANDS = [#{"name": 'google',
# "description": "Search for anything on Google"},
{"name": 'youtube',
"description": "Search for videos on Youtube"},
{"name": 'urban',
"description": "Search for slang words on Urban"
" Dictionnary "},
#{"name": 'gimg',
# "description": "Search for images on Google Image"},
{"name": 'pokemon',
"description": "Search for your favorite pokémons"},
{"name": 'twitch',
"description": "Search for your favorite twitch streamers"},
{"name": 'imgur',
"description": "Search for the dankest memes images on"
" imgur"},
#{"name": 'wiki',
# "description": "Get smarter thanks to wikipedia"},
{"name": 'manga',
"description": "Search for your favorite mango from "
"MyAnimeList"},
{"name": 'anime',
"description": "Search for your favorite animu from "
"MyAnimeList"}]
@app.route('/dashboard/<int:server_id>/search')
@plugin_page('Search')
def plugin_search(server_id):
enabled_commands = [cmd['name'] for cmd in SEARCH_COMMANDS
if db.get("Search.{}:{}".format(server_id,
cmd['name']))]
return {"enabled_commands": enabled_commands,
"commands": SEARCH_COMMANDS}
@app.route('/dashboard/<int:server_id>/search/edit', methods=['POST'])
@plugin_method
def search_edit(server_id):
pipe = db.pipeline()
for cmd in SEARCH_COMMANDS:
pipe.delete("Search.{}:{}".format(server_id, cmd['name']))
for cmd in SEARCH_COMMANDS:
if request.form.get(cmd['name']):
pipe.set("Search.{}:{}".format(server_id, cmd['name']), 1)
result = pipe.execute()
if result:
flash("Search plugin settings updated! ;)", "success")
else:
flash("An error occured :( ...", "warning")
return redirect(url_for("plugin_search", server_id=server_id))
"""
Git Plugin
"""
@app.route('/dashboard/<int:server_id>/git')
@plugin_page('Git')
def plugin_git(server_id):
return {}
"""
Streamers Plugin
"""
@app.route('/dashboard/<int:server_id>/streamers')
@plugin_page('Streamers')
def plugin_streamers(server_id):
config = streamers.get_config(server_id)
twitch_streamers = ','.join(config.get('twitch_streamers'))
hitbox_streamers = ','.join(config.get('hitbox_streamers'))
guild_channels = get_guild_channels(server_id, voice=False)
return {
'announcement_channel': config['announcement_channel'],
'guild_channels': guild_channels,
'announcement_msg': config['announcement_message'],
'streamers': twitch_streamers,
'hitbox_streamers': hitbox_streamers
}
@app.route('/dashboard/<int:server_id>/update_streamers', methods=['POST'])
@plugin_method
def update_streamers(server_id):
announcement_channel = request.form.get('announcement_channel')
announcement_msg = request.form.get('announcement_msg')
if announcement_msg == "":
flash('The announcement message should not be empty!', 'warning')
return redirect(url_for('plugin_streamers', server_id=server_id))
twitch_streamers = strip(request.form.get('streamers').split(','))
hitbox_streamers = strip(request.form.get('hitbox_streamers').split(','))
new_config = {'announcement_channel': announcement_channel,
'announcement_message': announcement_msg,
'twitch_streamers': twitch_streamers,
'hitbox_streamers': hitbox_streamers}
streamers.patch_config(server_id, new_config)
flash('Configuration updated with success!',
'success')
return redirect(url_for('plugin_streamers', server_id=server_id))
"""
Reddit Plugin
"""
@app.route('/dashboard/<int:server_id>/reddit')
@plugin_page('Reddit')
def plugin_reddit(server_id):
guild_channels = get_guild_channels(server_id, voice=False)
config = reddit.get_config(server_id)
subs = ','.join(config['subreddits'])
display_channel = config['announcement_channel']
return {
'subs': subs,
'display_channel': display_channel,
'guild_channels': guild_channels,
}
@app.route('/dashboard/<int:server_id>/update_reddit', methods=['POST'])
@plugin_method
def update_reddit(server_id):
display_channel = request.form.get('display_channel')
subs = strip(request.form.get('subs').split(','))
config_patch = {'announcement_channel': display_channel,
'subreddits': subs}
reddit.patch_config(server_id, config_patch)
flash('Configuration updated with success!', 'success')
return redirect(url_for('plugin_reddit', server_id=server_id))
"""
Moderator Plugin
"""
@app.route('/dashboard/<int:server_id>/moderator')
@plugin_page('Moderator')
def plugin_moderator(server_id):
db_moderator_roles = db.smembers('Moderator.{}:roles'.format(server_id))\
or []
guild = get_guild(server_id)
guild_roles = guild['roles']
moderator_roles = list(filter(
lambda r: r['name'] in db_moderator_roles or
r['id'] in db_moderator_roles,
guild_roles
))
clear = db.get('Moderator.{}:clear'.format(server_id))
banned_words = db.get('Moderator.{}:banned_words'.format(server_id))
slowmode = db.get('Moderator.{}:slowmode'.format(server_id))
mute = db.get('Moderator.{}:mute'.format(server_id))
return {
'moderator_roles': moderator_roles,
'guild_roles': guild_roles,
'clear': clear,
'banned_words': banned_words or '',
'slowmode': slowmode,
'mute': mute
}
@app.route('/dashboard/<int:server_id>/update_moderator', methods=['POST'])
@plugin_method
def update_moderator(server_id):
moderator_roles = request.form.get('moderator_roles').split(',')
banned_words = strip(request.form.get('banned_words').split(','))
banned_words = ','.join(banned_words)
db.delete('Moderator.{}:roles'.format(server_id))
for role in moderator_roles:
if role != "":
db.sadd('Moderator.{}:roles'.format(server_id), role)
db.delete('Moderator.{}:clear'.format(server_id))
db.delete('Moderator.{}:slowmode'.format(server_id))
db.delete('Moderator.{}:mute'.format(server_id))
db.set('Moderator.{}:banned_words'.format(server_id), banned_words)
clear = request.form.get('clear')
slowmode = request.form.get('slowmode')
mute = request.form.get('mute')
if clear:
db.set('Moderator.{}:clear'.format(server_id), '1')
if slowmode:
db.set('Moderator.{}:slowmode'.format(server_id), '1')
if mute:
db.set('Moderator.{}:mute'.format(server_id), '1')
flash('Configuration updated ;)!', 'success')
return redirect(url_for('plugin_moderator', server_id=server_id))
"""
Music Plugin
"""
@app.route('/dashboard/<int:server_id>/music')
@plugin_page('Music', buff="music")
def plugin_music(server_id):
db_allowed_roles = db.smembers('Music.{}:allowed_roles'.format(server_id))\
or []
db_requesters_roles = db.smembers(
'Music.{}:requesters_roles'.format(server_id)
) or []
guild = get_guild(server_id)
guild_roles = guild['roles']
allowed_roles = filter(
lambda r: r['name'] in db_allowed_roles or r['id'] in db_allowed_roles,
guild_roles
)
requesters_roles = filter(
lambda r: r['id'] in db_requesters_roles,
guild_roles
)
return {
'guild_roles': guild_roles,
'allowed_roles': list(allowed_roles),
'requesters_roles': list(requesters_roles)
}
@app.route('/dashboard/<int:server_id>/update_music', methods=['POST'])
@plugin_method
def update_music(server_id):
allowed_roles = request.form.get('allowed_roles', '').split(',')
requesters_roles = request.form.get('requesters_roles', '').split(',')
db.delete('Music.{}:allowed_roles'.format(server_id))
db.delete('Music.{}:requesters_roles'.format(server_id))
for role in allowed_roles:
db.sadd('Music.{}:allowed_roles'.format(server_id), role)
for role in requesters_roles:
db.sadd('Music.{}:requesters_roles'.format(server_id), role)
flash('Configuration updated ;)!', 'success')
return redirect(url_for('plugin_music', server_id=server_id))
@app.route('/request_playlist/<int:server_id>')
def request_playlist(server_id):
if 'Music' not in db.smembers('plugins:{}'.format(server_id)):
return redirect(url_for('index'))
playlist = db.lrange('Music.{}:request_queue'.format(server_id), 0, -1)
playlist = list(map(lambda v: json.loads(v), playlist))
is_admin = False
if session.get('api_token'):
user = get_user(session['api_token'])
if not user:
return redirect(url_for('logout'))
user_servers = get_user_managed_servers(
user,
get_user_guilds(session['api_token'])
)
is_admin = str(server_id) in list(map(lambda s: s['id'], user_servers))
server = {
'id': server_id,
'icon': db.get('server:{}:icon'.format(server_id)),
'name': db.get('server:{}:name'.format(server_id))
}
return render_template('request-playlist.html', playlist=playlist,
server=server, is_admin=is_admin)
@app.route('/delete_request/<int:server_id>/<int:pos>')
@plugin_method
def delete_request(server_id, pos):
playlist = db.lrange('Music.{}:request_queue'.format(server_id), 0, -1)
if pos < len(playlist):
del playlist[pos]
db.delete('Music.{}:request_queue'.format(server_id))
for vid in playlist:
db.rpush('Music.{}:request_queue'.format(server_id), vid)
return redirect(url_for('request_playlist', server_id=server_id))
@app.before_first_request
def setup_logging():
# In production mode, add log handler to sys.stderr.
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
if __name__ == '__main__':
app.debug = True
from os import path
extra_dirs = ['templates',]
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = path.join(dirname, filename)
if path.isfile(filename):
extra_files.append(filename)
app.run(extra_files=extra_files)
| []
| []
| [
"API_BASE_URL",
"OAUTH2_CLIENT_SECRET",
"OAUTHLIB_INSECURE_TRANSPORT",
"MEE6_TOKEN",
"OAUTH2_REDIRECT_URI",
"VIRTUAL_HOST",
"SECRET_KEY",
"OAUTH2_CLIENT_ID",
"MONGO_URL",
"FLASK_DEBUG",
"REDIS_URL"
]
| [] | ["API_BASE_URL", "OAUTH2_CLIENT_SECRET", "OAUTHLIB_INSECURE_TRANSPORT", "MEE6_TOKEN", "OAUTH2_REDIRECT_URI", "VIRTUAL_HOST", "SECRET_KEY", "OAUTH2_CLIENT_ID", "MONGO_URL", "FLASK_DEBUG", "REDIS_URL"] | python | 11 | 0 | |
uno/uno/asgi.py | import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uno.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
libs/utils/env.py | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import json
import logging
import os
import re
import shutil
import sys
import time
import unittest
import devlib
from devlib.utils.misc import memoized
from devlib import Platform, TargetError
from trappy.stats.Topology import Topology
from wlgen import RTA
from energy import EnergyMeter
from energy_model import EnergyModel
from conf import JsonConf
from platforms.juno_energy import juno_energy
from platforms.hikey_energy import hikey_energy
from platforms.pixel_energy import pixel_energy
USERNAME_DEFAULT = 'root'
PASSWORD_DEFAULT = ''
WORKING_DIR_DEFAULT = '/data/local/schedtest'
FTRACE_EVENTS_DEFAULT = ['sched:*']
FTRACE_BUFSIZE_DEFAULT = 10240
OUT_PREFIX = 'results'
LATEST_LINK = 'results_latest'
basepath = os.path.dirname(os.path.realpath(__file__))
basepath = basepath.replace('/libs/utils', '')
def os_which(file):
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, file)):
return os.path.join(path, file)
return None
class ShareState(object):
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
class TestEnv(ShareState):
"""
Represents the environment configuring LISA, the target, and the test setup
The test environment is defined by:
- a target configuration (target_conf) defining which HW platform we
want to use to run the experiments
- a test configuration (test_conf) defining which SW setups we need on
that HW target
- a folder to collect the experiments results, which can be specified
using the test_conf::results_dir option and is by default wiped from
all the previous contents (if wipe=True)
:param target_conf:
Configuration defining the target to run experiments on. May be
- A dict defining the values directly
- A path to a JSON file containing the configuration
- ``None``, in which case $LISA_HOME/target.config is used.
You need to provide the information needed to connect to the
target. For SSH targets that means "host", "username" and
either "password" or "keyfile". All other fields are optional if
the relevant features aren't needed. Has the following keys:
**host**
Target IP or MAC address for SSH access
**username**
For SSH access
**keyfile**
Path to SSH key (alternative to password)
**password**
SSH password (alternative to keyfile)
**device**
Target Android device ID if using ADB
**port**
Port for Android connection default port is 5555
**ANDROID_HOME**
Path to Android SDK. Defaults to ``$ANDROID_HOME`` from the
environment.
**rtapp-calib**
Calibration values for RT-App. If unspecified, LISA will
calibrate RT-App on the target. A message will be logged with
a value that can be copied here to avoid having to re-run
calibration on subsequent tests.
**tftp**
Directory path containing kernels and DTB images for the
target. LISA does *not* manage this TFTP server, it must be
provided externally. Optional.
:param test_conf: Configuration of software for target experiments. Takes
the same form as target_conf. Fields are:
**modules**
Devlib modules to be enabled. Default is []
**exclude_modules**
Devlib modules to be disabled. Default is [].
**tools**
List of tools (available under ./tools/$ARCH/) to install on
the target. Names, not paths (e.g. ['ftrace']). Default is [].
**ping_time**, **reboot_time**
Override parameters to :meth:`reboot` method
**__features__**
List of test environment features to enable. Options are:
"no-kernel"
do not deploy kernel/dtb images
"no-reboot"
do not force reboot the target at each configuration change
"debug"
enable debugging messages
**ftrace**
Configuration for ftrace. Dictionary with keys:
events
events to enable.
functions
functions to enable in the function tracer. Optional.
buffsize
Size of buffer. Default is 10240.
**systrace**
Configuration for systrace. Dictionary with keys:
categories:
overide the list of categories enabled
extra_categories:
append to the default list of categories
extra_events:
additional ftrace events to manually enable during systrac'ing
buffsize:
Size of ftrace buffer that systrace uses
**results_dir**
location of results of the experiments
:param wipe: set true to cleanup all previous content from the output
folder
:type wipe: bool
:param force_new: Create a new TestEnv object even if there is one available
for this session. By default, TestEnv only creates one
object per session, use this to override this behaviour.
:type force_new: bool
"""
_initialized = False
def __init__(self, target_conf=None, test_conf=None, wipe=True,
force_new=False):
super(TestEnv, self).__init__()
if self._initialized and not force_new:
return
self.conf = {}
self.test_conf = {}
self.target = None
self.ftrace = None
self.workdir = WORKING_DIR_DEFAULT
self.__installed_tools = set()
self.__modules = []
self.__connection_settings = None
self._calib = None
# Keep track of target IP and MAC address
self.ip = None
self.mac = None
# Keep track of last installed kernel
self.kernel = None
self.dtb = None
# Energy meter configuration
self.emeter = None
# The platform descriptor to be saved into the results folder
self.platform = {}
# Keep track of android support
self.LISA_HOME = os.environ.get('LISA_HOME', '/vagrant')
self.ANDROID_HOME = os.environ.get('ANDROID_HOME', None)
self.CATAPULT_HOME = os.environ.get('CATAPULT_HOME',
os.path.join(self.LISA_HOME, 'tools', 'catapult'))
# Setup logging
self._log = logging.getLogger('TestEnv')
# Compute base installation path
self._log.info('Using base path: %s', basepath)
# Setup target configuration
if isinstance(target_conf, dict):
self._log.info('Loading custom (inline) target configuration')
self.conf = target_conf
elif isinstance(target_conf, str):
self._log.info('Loading custom (file) target configuration')
self.conf = self.loadTargetConfig(target_conf)
elif target_conf is None:
self._log.info('Loading default (file) target configuration')
self.conf = self.loadTargetConfig()
self._log.debug('Target configuration %s', self.conf)
# Setup test configuration
if test_conf:
if isinstance(test_conf, dict):
self._log.info('Loading custom (inline) test configuration')
self.test_conf = test_conf
elif isinstance(test_conf, str):
self._log.info('Loading custom (file) test configuration')
self.test_conf = self.loadTargetConfig(test_conf)
else:
raise ValueError('test_conf must be either a dictionary or a filepath')
self._log.debug('Test configuration %s', self.conf)
# Setup target working directory
if 'workdir' in self.conf:
self.workdir = self.conf['workdir']
# Initialize binary tools to deploy
test_conf_tools = self.test_conf.get('tools', [])
target_conf_tools = self.conf.get('tools', [])
self.__tools = list(set(test_conf_tools + target_conf_tools))
# Initialize ftrace events
# test configuration override target one
if 'ftrace' in self.test_conf:
self.conf['ftrace'] = self.test_conf['ftrace']
if self.conf.get('ftrace'):
self.__tools.append('trace-cmd')
# Initialize features
if '__features__' not in self.conf:
self.conf['__features__'] = []
self._init()
# Initialize FTrace events collection
self._init_ftrace(True)
# Initialize RT-App calibration values
self.calibration()
# Initialize local results folder
# test configuration overrides target one
self.res_dir = (self.test_conf.get('results_dir') or
self.conf.get('results_dir'))
if self.res_dir and not os.path.isabs(self.res_dir):
self.res_dir = os.path.join(basepath, 'results', self.res_dir)
else:
self.res_dir = os.path.join(basepath, OUT_PREFIX)
self.res_dir = datetime.datetime.now()\
.strftime(self.res_dir + '/%Y%m%d_%H%M%S')
if wipe and os.path.exists(self.res_dir):
self._log.warning('Wipe previous contents of the results folder:')
self._log.warning(' %s', self.res_dir)
shutil.rmtree(self.res_dir, ignore_errors=True)
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
res_lnk = os.path.join(basepath, LATEST_LINK)
if os.path.islink(res_lnk):
os.remove(res_lnk)
os.symlink(self.res_dir, res_lnk)
# Initialize energy probe instrument
self._init_energy(True)
self._log.info('Set results folder to:')
self._log.info(' %s', self.res_dir)
self._log.info('Experiment results available also in:')
self._log.info(' %s', res_lnk)
self._initialized = True
def loadTargetConfig(self, filepath='target.config'):
"""
Load the target configuration from the specified file.
:param filepath: Path of the target configuration file. Relative to the
root folder of the test suite.
:type filepath: str
"""
# Loading default target configuration
conf_file = os.path.join(basepath, filepath)
self._log.info('Loading target configuration [%s]...', conf_file)
conf = JsonConf(conf_file)
conf.load()
return conf.json
def _init(self, force = False):
# Initialize target
self._init_target(force)
# Initialize target Topology for behavior analysis
CLUSTERS = []
# Build topology for a big.LITTLE systems
if self.target.big_core and \
(self.target.abi == 'arm64' or self.target.abi == 'armeabi'):
# Populate cluster for a big.LITTLE platform
if self.target.big_core:
# Load cluster of LITTLE cores
CLUSTERS.append(
[i for i,t in enumerate(self.target.core_names)
if t == self.target.little_core])
# Load cluster of big cores
CLUSTERS.append(
[i for i,t in enumerate(self.target.core_names)
if t == self.target.big_core])
# Build topology for an SMP systems
elif not self.target.big_core or \
self.target.abi == 'x86_64':
for c in set(self.target.core_clusters):
CLUSTERS.append(
[i for i,v in enumerate(self.target.core_clusters)
if v == c])
self.topology = Topology(clusters=CLUSTERS)
self._log.info('Topology:')
self._log.info(' %s', CLUSTERS)
# Initialize the platform descriptor
self._init_platform()
def _init_target(self, force = False):
if not force and self.target is not None:
return self.target
self.__connection_settings = {}
# Configure username
if 'username' in self.conf:
self.__connection_settings['username'] = self.conf['username']
else:
self.__connection_settings['username'] = USERNAME_DEFAULT
# Configure password or SSH keyfile
if 'keyfile' in self.conf:
self.__connection_settings['keyfile'] = self.conf['keyfile']
elif 'password' in self.conf:
self.__connection_settings['password'] = self.conf['password']
else:
self.__connection_settings['password'] = PASSWORD_DEFAULT
# Configure port
if 'port' in self.conf:
self.__connection_settings['port'] = self.conf['port']
# Configure the host IP/MAC address
if 'host' in self.conf:
try:
if ':' in self.conf['host']:
(self.mac, self.ip) = self.resolv_host(self.conf['host'])
else:
self.ip = self.conf['host']
self.__connection_settings['host'] = self.ip
except KeyError:
raise ValueError('Config error: missing [host] parameter')
try:
platform_type = self.conf['platform']
except KeyError:
raise ValueError('Config error: missing [platform] parameter')
if platform_type.lower() == 'android':
self.ANDROID_HOME = self.conf.get('ANDROID_HOME',
self.ANDROID_HOME)
if self.ANDROID_HOME:
self._adb = os.path.join(self.ANDROID_HOME,
'platform-tools', 'adb')
self._fastboot = os.path.join(self.ANDROID_HOME,
'platform-tools', 'fastboot')
os.environ['ANDROID_HOME'] = self.ANDROID_HOME
os.environ['CATAPULT_HOME'] = self.CATAPULT_HOME
else:
self._log.info('Android SDK not found as ANDROID_HOME not defined, using PATH for platform tools')
self._adb = os_which('adb')
self._fastboot = os_which('fastboot')
if self._adb:
self._log.info('Using adb from ' + self._adb)
if self._fastboot:
self._log.info('Using fastboot from ' + self._fastboot)
self._log.info('External tools using:')
self._log.info(' ANDROID_HOME: %s', self.ANDROID_HOME)
self._log.info(' CATAPULT_HOME: %s', self.CATAPULT_HOME)
if not os.path.exists(self._adb):
raise RuntimeError('\nADB binary not found\n\t{}\ndoes not exists!\n\n'
'Please configure ANDROID_HOME to point to '
'a valid Android SDK installation folder.'\
.format(self._adb))
########################################################################
# Board configuration
########################################################################
# Setup board default if not specified by configuration
self.nrg_model = None
platform = None
self.__modules = []
if 'board' not in self.conf:
self.conf['board'] = 'UNKNOWN'
# Initialize TC2 board
if self.conf['board'].upper() == 'TC2':
platform = devlib.platform.arm.TC2()
self.__modules = ['bl', 'hwmon', 'cpufreq']
# Initialize JUNO board
elif self.conf['board'].upper() in ('JUNO', 'JUNO2'):
platform = devlib.platform.arm.Juno()
self.nrg_model = juno_energy
self.__modules = ['bl', 'hwmon', 'cpufreq']
# Initialize OAK board
elif self.conf['board'].upper() == 'OAK':
platform = Platform(model='MT8173')
self.__modules = ['bl', 'cpufreq']
# Initialized HiKey board
elif self.conf['board'].upper() == 'HIKEY':
self.nrg_model = hikey_energy
self.__modules = [ "cpufreq", "cpuidle" ]
platform = Platform(model='hikey')
# Initialize Pixel phone
elif self.conf['board'].upper() == 'PIXEL':
self.nrg_model = pixel_energy
self.__modules = ['bl', 'cpufreq']
platform = Platform(model='pixel')
elif self.conf['board'] != 'UNKNOWN':
# Initilize from platform descriptor (if available)
board = self._load_board(self.conf['board'])
if board:
core_names=board['cores']
platform = Platform(
model=self.conf['board'],
core_names=core_names,
core_clusters = self._get_clusters(core_names),
big_core=board.get('big_core', None)
)
self.__modules=board.get('modules', [])
########################################################################
# Modules configuration
########################################################################
modules = set(self.__modules)
# Refine modules list based on target.conf
modules.update(self.conf.get('modules', []))
# Merge tests specific modules
modules.update(self.test_conf.get('modules', []))
remove_modules = set(self.conf.get('exclude_modules', []) +
self.test_conf.get('exclude_modules', []))
modules.difference_update(remove_modules)
self.__modules = list(modules)
self._log.info('Devlib modules to load: %s', self.__modules)
########################################################################
# Devlib target setup (based on target.config::platform)
########################################################################
# If the target is Android, we need just (eventually) the device
if platform_type.lower() == 'android':
self.__connection_settings = None
device = 'DEFAULT'
if 'device' in self.conf:
device = self.conf['device']
self.__connection_settings = {'device' : device}
elif 'host' in self.conf:
host = self.conf['host']
port = '5555'
if 'port' in self.conf:
port = str(self.conf['port'])
device = '{}:{}'.format(host, port)
self.__connection_settings = {'device' : device}
self._log.info('Connecting Android target [%s]', device)
else:
self._log.info('Connecting %s target:', platform_type)
for key in self.__connection_settings:
self._log.info('%10s : %s', key,
self.__connection_settings[key])
self._log.info('Connection settings:')
self._log.info(' %s', self.__connection_settings)
if platform_type.lower() == 'linux':
self._log.debug('Setup LINUX target...')
if "host" not in self.__connection_settings:
raise ValueError('Missing "host" param in Linux target conf')
self.target = devlib.LinuxTarget(
platform = platform,
connection_settings = self.__connection_settings,
load_default_modules = False,
modules = self.__modules)
elif platform_type.lower() == 'android':
self._log.debug('Setup ANDROID target...')
self.target = devlib.AndroidTarget(
platform = platform,
connection_settings = self.__connection_settings,
load_default_modules = False,
modules = self.__modules)
elif platform_type.lower() == 'host':
self._log.debug('Setup HOST target...')
self.target = devlib.LocalLinuxTarget(
platform = platform,
load_default_modules = False,
modules = self.__modules)
else:
raise ValueError('Config error: not supported [platform] type {}'\
.format(platform_type))
self._log.debug('Checking target connection...')
self._log.debug('Target info:')
self._log.debug(' ABI: %s', self.target.abi)
self._log.debug(' CPUs: %s', self.target.cpuinfo)
self._log.debug(' Clusters: %s', self.target.core_clusters)
self._log.info('Initializing target workdir:')
self._log.info(' %s', self.target.working_directory)
self.target.setup()
self.install_tools(self.__tools)
# Verify that all the required modules have been initialized
for module in self.__modules:
self._log.debug('Check for module [%s]...', module)
if not hasattr(self.target, module):
self._log.warning('Unable to initialize [%s] module', module)
self._log.error('Fix your target kernel configuration or '
'disable module from configuration')
raise RuntimeError('Failed to initialized [{}] module, '
'update your kernel or test configurations'.format(module))
if not self.nrg_model:
try:
self._log.info('Attempting to read energy model from target')
self.nrg_model = EnergyModel.from_target(self.target)
except (TargetError, RuntimeError, ValueError) as e:
self._log.error("Couldn't read target energy model: %s", e)
def install_tools(self, tools):
"""
Install tools additional to those specified in the test config 'tools'
field
:param tools: The list of names of tools to install
:type tools: list(str)
"""
tools = set(tools)
# Add tools dependencies
if 'rt-app' in tools:
tools.update(['taskset', 'trace-cmd', 'perf', 'cgroup_run_into.sh'])
# Remove duplicates and already-instaled tools
tools.difference_update(self.__installed_tools)
tools_to_install = []
for tool in tools:
binary = '{}/tools/scripts/{}'.format(basepath, tool)
if not os.path.isfile(binary):
binary = '{}/tools/{}/{}'\
.format(basepath, self.target.abi, tool)
tools_to_install.append(binary)
for tool_to_install in tools_to_install:
self.target.install(tool_to_install)
self.__installed_tools.update(tools)
def ftrace_conf(self, conf):
self._init_ftrace(True, conf)
def _init_ftrace(self, force=False, conf=None):
if not force and self.ftrace is not None:
return self.ftrace
if conf is None and 'ftrace' not in self.conf:
return None
if conf is not None:
ftrace = conf
else:
ftrace = self.conf['ftrace']
events = FTRACE_EVENTS_DEFAULT
if 'events' in ftrace:
events = ftrace['events']
functions = None
if 'functions' in ftrace:
functions = ftrace['functions']
buffsize = FTRACE_BUFSIZE_DEFAULT
if 'buffsize' in ftrace:
buffsize = ftrace['buffsize']
self.ftrace = devlib.FtraceCollector(
self.target,
events = events,
functions = functions,
buffer_size = buffsize,
autoreport = False,
autoview = False
)
if events:
self._log.info('Enabled tracepoints:')
for event in events:
self._log.info(' %s', event)
if functions:
self._log.info('Kernel functions profiled:')
for function in functions:
self._log.info(' %s', function)
return self.ftrace
def _init_energy(self, force):
# Initialize energy probe to board default
self.emeter = EnergyMeter.getInstance(self.target, self.conf, force,
self.res_dir)
def _init_platform_bl(self):
self.platform = {
'clusters' : {
'little' : self.target.bl.littles,
'big' : self.target.bl.bigs
},
'freqs' : {
'little' : self.target.bl.list_littles_frequencies(),
'big' : self.target.bl.list_bigs_frequencies()
}
}
self.platform['cpus_count'] = \
len(self.platform['clusters']['little']) + \
len(self.platform['clusters']['big'])
def _init_platform_smp(self):
self.platform = {
'clusters' : {},
'freqs' : {}
}
for cpu_id,node_id in enumerate(self.target.core_clusters):
if node_id not in self.platform['clusters']:
self.platform['clusters'][node_id] = []
self.platform['clusters'][node_id].append(cpu_id)
if 'cpufreq' in self.target.modules:
# Try loading frequencies using the cpufreq module
for cluster_id in self.platform['clusters']:
core_id = self.platform['clusters'][cluster_id][0]
self.platform['freqs'][cluster_id] = \
self.target.cpufreq.list_frequencies(core_id)
else:
self._log.warning('Unable to identify cluster frequencies')
# TODO: get the performance boundaries in case of intel_pstate driver
self.platform['cpus_count'] = len(self.target.core_clusters)
def _load_em(self, board):
em_path = os.path.join(basepath,
'libs/utils/platforms', board.lower() + '.json')
self._log.debug('Trying to load default EM from %s', em_path)
if not os.path.exists(em_path):
return None
self._log.info('Loading default EM:')
self._log.info(' %s', em_path)
board = JsonConf(em_path)
board.load()
if 'nrg_model' not in board.json:
return None
return board.json['nrg_model']
def _load_board(self, board):
board_path = os.path.join(basepath,
'libs/utils/platforms', board.lower() + '.json')
self._log.debug('Trying to load board descriptor from %s', board_path)
if not os.path.exists(board_path):
return None
self._log.info('Loading board:')
self._log.info(' %s', board_path)
board = JsonConf(board_path)
board.load()
if 'board' not in board.json:
return None
return board.json['board']
def _get_clusters(self, core_names):
idx = 0
clusters = []
ids_map = { core_names[0] : 0 }
for name in core_names:
idx = ids_map.get(name, idx+1)
ids_map[name] = idx
clusters.append(idx)
return clusters
def _init_platform(self):
if 'bl' in self.target.modules:
self._init_platform_bl()
else:
self._init_platform_smp()
# Adding energy model information
if 'nrg_model' in self.conf:
self.platform['nrg_model'] = self.conf['nrg_model']
# Try to load the default energy model (if available)
else:
self.platform['nrg_model'] = self._load_em(self.conf['board'])
# Adding topology information
self.platform['topology'] = self.topology.get_level("cluster")
# Adding kernel build information
kver = self.target.kernel_version
self.platform['kernel'] = {t: getattr(kver, t, None)
for t in [
'release', 'version',
'version_number', 'major', 'minor',
'rc', 'sha1', 'parts'
]
}
self.platform['abi'] = self.target.abi
self.platform['os'] = self.target.os
self._log.debug('Platform descriptor initialized\n%s', self.platform)
# self.platform_dump('./')
def platform_dump(self, dest_dir, dest_file='platform.json'):
plt_file = os.path.join(dest_dir, dest_file)
self._log.debug('Dump platform descriptor in [%s]', plt_file)
with open(plt_file, 'w') as ofile:
json.dump(self.platform, ofile, sort_keys=True, indent=4)
return (self.platform, plt_file)
def calibration(self, force=False):
"""
Get rt-app calibration. Run calibration on target if necessary.
:param force: Always run calibration on target, even if we have not
installed rt-app or have already run calibration.
:returns: A dict with calibration results, which can be passed as the
``calibration`` parameter to :class:`RTA`, or ``None`` if
force=False and we have not installed rt-app.
"""
if not force and self._calib:
return self._calib
required = force or 'rt-app' in self.__installed_tools
if not required:
self._log.debug('No RT-App workloads, skipping calibration')
return
if not force and 'rtapp-calib' in self.conf:
self._log.warning('Using configuration provided RTApp calibration')
self._calib = {
int(key): int(value)
for key, value in self.conf['rtapp-calib'].items()
}
else:
self._log.info('Calibrating RTApp...')
self._calib = RTA.calibrate(self.target)
self._log.info('Using RT-App calibration values:')
self._log.info(' %s',
"{" + ", ".join('"%r": %r' % (key, self._calib[key])
for key in sorted(self._calib)) + "}")
return self._calib
def resolv_host(self, host=None):
"""
Resolve a host name or IP address to a MAC address
.. TODO Is my networking terminology correct here?
:param host: IP address or host name to resolve. If None, use 'host'
value from target_config.
:type host: str
"""
if host is None:
host = self.conf['host']
# Refresh ARP for local network IPs
self._log.debug('Collecting all Bcast address')
output = os.popen(r'ifconfig').read().split('\n')
for line in output:
match = IFCFG_BCAST_RE.search(line)
if not match:
continue
baddr = match.group(1)
try:
cmd = r'nmap -T4 -sP {}/24 &>/dev/null'.format(baddr.strip())
self._log.debug(cmd)
os.popen(cmd)
except RuntimeError:
self._log.warning('Nmap not available, try IP lookup using broadcast ping')
cmd = r'ping -b -c1 {} &>/dev/null'.format(baddr)
self._log.debug(cmd)
os.popen(cmd)
return self.parse_arp_cache(host)
def parse_arp_cache(self, host):
output = os.popen(r'arp -n')
if ':' in host:
# Assuming this is a MAC address
# TODO add a suitable check on MAC address format
# Query ARP for the specified HW address
ARP_RE = re.compile(
r'([^ ]*).*({}|{})'.format(host.lower(), host.upper())
)
macaddr = host
ipaddr = None
for line in output:
match = ARP_RE.search(line)
if not match:
continue
ipaddr = match.group(1)
break
else:
# Assuming this is an IP address
# TODO add a suitable check on IP address format
# Query ARP for the specified IP address
ARP_RE = re.compile(
r'{}.*ether *([0-9a-fA-F:]*)'.format(host)
)
macaddr = None
ipaddr = host
for line in output:
match = ARP_RE.search(line)
if not match:
continue
macaddr = match.group(1)
break
else:
# When target is accessed via WiFi, there is not MAC address
# reported by arp. In these cases we can know only the IP
# of the remote target.
macaddr = 'UNKNOWN'
if not ipaddr or not macaddr:
raise ValueError('Unable to lookup for target IP/MAC address')
self._log.info('Target (%s) at IP address: %s', macaddr, ipaddr)
return (macaddr, ipaddr)
def reboot(self, reboot_time=120, ping_time=15):
"""
Reboot target.
:param boot_time: Time to wait for the target to become available after
reboot before declaring failure.
:param ping_time: Period between attempts to ping the target while
waiting for reboot.
"""
# Send remote target a reboot command
if self._feature('no-reboot'):
self._log.warning('Reboot disabled by conf features')
else:
if 'reboot_time' in self.conf:
reboot_time = int(self.conf['reboot_time'])
if 'ping_time' in self.conf:
ping_time = int(self.conf['ping_time'])
# Before rebooting make sure to have IP and MAC addresses
# of the target
(self.mac, self.ip) = self.parse_arp_cache(self.ip)
self.target.execute('sleep 2 && reboot -f &', as_root=True)
# Wait for the target to complete the reboot
self._log.info('Waiting up to %s[s] for target [%s] to reboot...',
reboot_time, self.ip)
ping_cmd = "ping -c 1 {} >/dev/null".format(self.ip)
elapsed = 0
start = time.time()
while elapsed <= reboot_time:
time.sleep(ping_time)
self._log.debug('Trying to connect to [%s] target...', self.ip)
if os.system(ping_cmd) == 0:
break
elapsed = time.time() - start
if elapsed > reboot_time:
if self.mac:
self._log.warning('target [%s] not responding to PINGs, '
'trying to resolve MAC address...',
self.ip)
(self.mac, self.ip) = self.resolv_host(self.mac)
else:
self._log.warning('target [%s] not responding to PINGs, '
'trying to continue...',
self.ip)
# Force re-initialization of all the devlib modules
force = True
# Reset the connection to the target
self._init(force)
# Initialize FTrace events collection
self._init_ftrace(force)
# Initialize energy probe instrument
self._init_energy(force)
def install_kernel(self, tc, reboot=False):
"""
Deploy kernel and DTB via TFTP, optionally rebooting
:param tc: Dicionary containing optional keys 'kernel' and 'dtb'. Values
are paths to the binaries to deploy.
:type tc: dict
:param reboot: Reboot thet target after deployment
:type reboot: bool
"""
# Default initialize the kernel/dtb settings
tc.setdefault('kernel', None)
tc.setdefault('dtb', None)
if self.kernel == tc['kernel'] and self.dtb == tc['dtb']:
return
self._log.info('Install kernel [%s] on target...', tc['kernel'])
# Install kernel/dtb via FTFP
if self._feature('no-kernel'):
self._log.warning('Kernel deploy disabled by conf features')
elif 'tftp' in self.conf:
self._log.info('Deploy kernel via TFTP...')
# Deploy kernel in TFTP folder (mandatory)
if 'kernel' not in tc or not tc['kernel']:
raise ValueError('Missing "kernel" parameter in conf: %s',
'KernelSetup', tc)
self.tftp_deploy(tc['kernel'])
# Deploy DTB in TFTP folder (if provided)
if 'dtb' not in tc or not tc['dtb']:
self._log.debug('DTB not provided, using existing one')
self._log.debug('Current conf:\n%s', tc)
self._log.warning('Using pre-installed DTB')
else:
self.tftp_deploy(tc['dtb'])
else:
raise ValueError('Kernel installation method not supported')
# Keep track of last installed kernel
self.kernel = tc['kernel']
if 'dtb' in tc:
self.dtb = tc['dtb']
if not reboot:
return
# Reboot target
self._log.info('Rebooting taget...')
self.reboot()
def tftp_deploy(self, src):
"""
.. TODO
"""
tftp = self.conf['tftp']
dst = tftp['folder']
if 'kernel' in src:
dst = os.path.join(dst, tftp['kernel'])
elif 'dtb' in src:
dst = os.path.join(dst, tftp['dtb'])
else:
dst = os.path.join(dst, os.path.basename(src))
cmd = 'cp {} {} && sync'.format(src, dst)
self._log.info('Deploy %s into %s', src, dst)
result = os.system(cmd)
if result != 0:
self._log.error('Failed to deploy image: %s', src)
raise ValueError('copy error')
def _feature(self, feature):
return feature in self.conf['__features__']
IFCFG_BCAST_RE = re.compile(
r'Bcast:(.*) '
)
# vim :set tabstop=4 shiftwidth=4 expandtab
| []
| []
| [
"CATAPULT_HOME",
"LISA_HOME",
"ANDROID_HOME",
"PATH"
]
| [] | ["CATAPULT_HOME", "LISA_HOME", "ANDROID_HOME", "PATH"] | python | 4 | 0 | |
gltrader/trader.py | import os
import sys
import json
from .bittrex import Bittrex
from .market import Market
from .notification import *
from .fakeapi import FakeAPI
from jsmin import jsmin
import threading
from pprint import pprint as pp
class Trader(object):
"""
The object which makes the tick API read calls and dispatches the data to the markets
"""
#raw API data from each tick
data = None
#dict of each market object keyed by abbr of crypto used by bittrex
markets = {}
#list of notifications that should be displayed
notifications = {}
#config is false until parsed from file
config = False
def __init__(self):
"""
sets parameters and parses config dictionary from file
"""
#open file
with open(os.environ['GLTRADER_CONFIG'] ) as config_file:
#parse and reduce json so it can have comments
minified = jsmin(config_file.read())
#convert to json
self.config = json.loads(minified)
#create new instance of API wrapper object and set as property of trader object so it can be accessed
self.api = Bittrex( self.config["exchange"]["bittrex"]["key"],
self.config["exchange"]["bittrex"]["secret"], api_version="v2.0" )
#mocked API that can be used for some basic testing of live trades (cannot replace whole API)
self.fapi = FakeAPI()
def getData(self):
"""
Makes API call to get data, returns empty if not successful
:returns: List[Dict] if succesful, or None
"""
#get data via get_balances to limit individual calls and make sure data used down the line is synchronous
response = self.api.get_balances()
#if API responds
if response["success"]:
return response["result"]
#allow execution to continue with failed tick without errors, but don't actually do anything
else:
pp(response)
Alert("Tick missed: "+response.get(message, "Tick failed, no message"))
return None
def refreshMarkets(self):
"""
Checks for data and calls "getMarkets"
"""
#Get data then send to markets ---Factored out in case more things need to get done on each tick at this level in the future
self.data = self.getData()
if (self.data is not None):
self.getMarkets()
def getMarkets(self):
"""
For each list entry in the returned data, start a thread and create (if it doesn't exist) or refresh the appropriate market.
Join the threads so it next tick cannot be called before execution is finished and UI doesn't update before they finish.
"""
if self.data is not None:
threads=[]
# for each raw data dict in array
for marketdata in self.data:
# get name of market from dict
name = marketdata["Currency"]["Currency"]
#if market is already in the dict
if name in self.markets:
#set new thread to call "update" method on market
t = threading.Thread(target=self.markets[name].update, args=[marketdata])
threads.append(t)
t.start()
# threading.Thread(target=self.printVars).start()
else:
#add market and set thread to update
self.markets[name] = Market(name, marketdata)
t = threading.Thread(target=self.markets[name].update, args=[marketdata])
threads.append(t)
t.start()
#join all the threads back so UI update won't start before they finish
for t in threads:
t.join()
else:
pass
def getNotifications(self):
"""
:returns: Dictionary[Notification] A dictionary with the notifications
"""
#keeping notifications run in trader in case it is necessary in the future
return self.notifications
def printVars(self):
"""
For debugging purposes
"""
for var, obj in locals().items():
print( var, sys.getsizeof(obj))
def dump(self):
"""
Return api data from markets --- used for tests
"""
pp(self.api.get_balances())
def dumplist(self):
"""
Return api data from markets --- used for tests
"""
response = self.api.get_balances()
return response["result"]
| []
| []
| [
"GLTRADER_CONFIG"
]
| [] | ["GLTRADER_CONFIG"] | python | 1 | 0 | |
main.go | package main
import (
"bytes"
"image"
_ "image/jpeg"
"io/ioutil"
"log"
"os"
)
func main() {
destination := os.Getenv("USERPROFILE")
destination += "\\Pictures\\自带壁纸"
log.Println("Destination: " + destination)
if !exists(destination) {
_ = os.Mkdir(destination, 0664)
}
path := os.Getenv("LocalAppData")
path += "\\Packages\\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\\LocalState\\Assets"
log.Println("Assets Path: " + path)
files, err := ioutil.ReadDir(path)
if err != nil {
log.Println("ReadDir err: " + err.Error())
return
}
for _, f := range files {
dst := destination + "\\" + f.Name() + ".jpg"
if exists(dst) {
continue
}
src := path + "\\" + f.Name()
input, err := ioutil.ReadFile(src)
if err != nil {
log.Println("ReadFile err: " + err.Error())
continue
}
img, _, err := image.DecodeConfig(bytes.NewReader(input))
if err != nil || img.Height > img.Width {
continue
}
err = ioutil.WriteFile(dst, input, 0664)
if err != nil {
log.Println("CopyFile err: " + err.Error())
} else {
log.Println("CopyFile: " + f.Name() + "to " + dst)
}
}
}
func exists(path string) bool {
_, err := os.Lstat(path)
return err == nil || os.IsExist(err)
}
| [
"\"USERPROFILE\"",
"\"LocalAppData\""
]
| []
| [
"LocalAppData",
"USERPROFILE"
]
| [] | ["LocalAppData", "USERPROFILE"] | go | 2 | 0 | |
datawinners/dcs_web/local_settings_example_dcs.py | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import os
from datawinners.settings import PROJECT_DIR, TEMPLATE_DIRS
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'mangrove', # Or path to database file if using sqlite3.
'USER': os.getenv("USER"), # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
API_KEYS = {
'localhost:8000': 'AIzaSyChwOoz0ZXqQS6EAVcdngeb_17KMLW3eTM'
}
ROOT_URLCONF = 'datawinners.dcs_app.urls'
GOOGLE_MAPS_ENABLED = False
GOOGLE_ANALYTICS_ENABLED = False
TRIAL_REGISTRATION_ENABLED = True
HNI_SUPPORT_EMAIL_ID = '[email protected]'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
DEFAULT_FROM_EMAIL = 'Team Garner<[email protected]>'
EMAIL_HOST_PASSWORD = '@ngul@rr0cks'
EMAIL_PORT = 587
COUCHDBMAIN_USERNAME = 'admin'
COUCHDBMAIN_PASSWORD = 'admin'
COUCHDBMAIN_CREDENTIALS = (COUCHDBMAIN_USERNAME,COUCHDBMAIN_PASSWORD)
COUCHDBFEED_USERNAME = 'admin'
COUCHDBFEED_PASSWORD = 'admin'
COUCHDBFEED_CREDENTIALS = (COUCHDBFEED_USERNAME,COUCHDBFEED_PASSWORD)
HNI_BLOG_FEED = 'http://datawinners.wordpress.com/feed/'
VUMI_API_URL = "http://localhost:7000"
CRS_ORG_ID = 'TVZ184210'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'datawinners.accountmanagement',
'datawinners.sms',
'datawinners.activitylog',
'registration',
'django.contrib.admin',
'compressor',
'datawinners',
'datawinners.main',
'datawinners.project',
'datawinners.dashboard',
'datawinners.location',
'datawinners.entity',
'datawinners.submission',
'datawinners.xforms',
'datawinners.dataextraction',
'django_extensions',
'django.contrib.flatpages',
'south',
'datawinners.home',
'datawinners.countrytotrialnumbermapping',
'django_nose',
'django_digest',
'datawinners.custom_reports.crs',
'debug_toolbar',
'rest_framework.authtoken',
)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'datawinners.middleware.exception_middleware.ExceptionMiddleware',
'urlmiddleware.URLMiddleware',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': lambda x: False,
'HIDE_DJANGO_SQL': True,
'TAG': 'p',
'ENABLE_STACKTRACES' : False,
}
FEEDS_ENABLED=True
MAX_FEED_ENTRIES=10000
LIMIT_TRIAL_ORG_SUBMISSION_COUNT = 30
LIMIT_TRIAL_ORG_MESSAGE_COUNT = 10
NEAR_SUBMISSION_LIMIT_TRIGGER = 20
NEAR_SMS_LIMIT_TRIGGER = 5
VUMI_API_URL = "http://localhost:2020"
DEBUG_BROWSER="firefox" # firefox | chrome | phantom | ie | htmlunit
BRAND = "dcs"
BRAND_FEATURES = {
'DW_BUILDER_PROJECT_TO_XLSFORMS':True
}
LOCALE_PATHS = (
os.path.join(PROJECT_DIR, 'locale_dcs'),
os.path.join(PROJECT_DIR, 'locale')
)
TEMPLATE_DIRS = (os.path.join(PROJECT_DIR, 'templates_dcs'),) + TEMPLATE_DIRS
BRAND_URL_CONF = ('datawinners.dcs_app.urls',
'datawinners.dcs_web.urls')
| []
| []
| [
"USER"
]
| [] | ["USER"] | python | 1 | 0 | |
backend/src/gloader/gini_nw.py | # file: gini_nw.py
import os
import xml.dom.minidom
from gini_components import *
class GINI_NW:
switches = []
vm = []
vmb = []
vr = []
vwr = []
def __init__(self, docDOM):
"Initialize the GINI_NW class"
self.getSwitches(docDOM.getElementsByTagName("vs"))
self.getVMs(docDOM.getElementsByTagName('vm'))
self.getVMBs(docDOM.getElementsByTagName("vmb"))
self.getVRs(docDOM.getElementsByTagName("vr"))
self.getVWRs(docDOM.getElementsByTagName("vwr"))
def getSwitches(self, elements):
"get the switch configuration"
for switch in elements:
newSwitch = Switch(switch.getAttribute("name"))
for para in switch.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "port"):
newSwitch.port = self.getTextPart(para)
if (para.tagName.lower() == "remote"):
newSwitch.remote = self.getTextPart(para)
if (para.tagName.lower() == "hub"):
newSwitch.hub = True
self.switches.append(newSwitch)
return True
def getVMs(self, elements):
"get virtual machine configurations"
for vm in elements:
newVM = VM(vm.getAttribute("name"))
for para in vm.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "filesystem"):
newVM.fileSystem = FileSystem()
newVM.fileSystem.type = para.getAttribute("type")
newVM.fileSystem.name = os.environ["GINI_HOME"] + "/" + self.getTextPart(para)
if (para.tagName.lower() == "mem"):
newVM.mem = self.getTextPart(para)
if (para.tagName.lower() == "kernel"):
newVM.kernel = self.getTextPart(para)
if (para.tagName.lower() == "boot"):
newVM.boot = self.getBoot(para)
if (para.tagName.lower() == "if"):
newIF = self.getVMIF(para, len(newVM.interfaces))
newVM.addInterface(newIF)
self.vm.append(newVM)
return True
def getVMBs(self, elements):
"get wireless virtual machine configurations"
for vmb in elements:
newVMB = VMB(vmb.getAttribute("name"))
for para in vmb.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "filesystem"):
newVMB.fileSystem = FileSystem()
newVMB.fileSystem.type = para.getAttribute("type")
newVMB.fileSystem.name = os.environ["GINI_HOME"] + "/" + self.getTextPart(para)
if (para.tagName.lower() == "mem"):
newVMB.mem = self.getTextPart(para)
if (para.tagName.lower() == "kernel"):
newVMB.kernel = self.getTextPart(para)
if (para.tagName.lower() == "boot"):
newVMB.boot = self.getBoot(para)
if (para.tagName.lower() == "if"):
newIF = self.getVMIF(para, len(newVMB.interfaces))
newVMB.addInterface(newIF)
self.vmb.append(newVMB)
return True
def getVRs(self, elements):
"Get router specification"
for router in elements:
newVR = VR(router.getAttribute("name"))
for para in router.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "cli"):
newVR.cli = True
if (para.tagName.lower() == "netif"):
newIF = self.getVRIF(para, len(newVR.netIF)+1)
newVR.addNetIF(newIF)
self.vr.append(newVR)
return True
def getVWRs(self, elements):
"Get wireless router specification"
for router in elements:
newVWR = VWR(router.getAttribute("name"))
for para in router.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "cli"):
newVWR.cli = True
if (para.tagName.lower() == "netif"):
newIF = self.getVRIF(para, len(newVWR.netIF))
newVWR.addNetIF(newIF)
if (para.tagName.lower() == "netif_wireless"):
newWIF = self.getVWRIF(para, len(newVWR.netIFWireless))
newVWR.addWirelessIF(newWIF)
self.vwr.append(newVWR)
return True
def getTextPart(self,elem):
"Extract the text within the element"
for textPart in elem.childNodes:
if (textPart.nodeType == textPart.TEXT_NODE):
remoteName = textPart.nodeValue.strip()
if (remoteName):
return remoteName
return ""
def getBoot(self, elem):
"get boot elememnt in VM specification"
for part in elem.childNodes:
if (part.nodeType == part.ELEMENT_NODE and
part.tagName.lower() == "con0"):
return self.getTextPart(part)
return ""
def getVMIF(self, elem, count):
"get VM network interface specification"
ifName = "eth%d" % count
myIF = VMInterface(ifName)
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "target"):
myIF.target = self.getTextPart(para)
if (para.tagName.lower() == "mac"):
myIF.mac = self.getTextPart(para)
if (para.tagName.lower() == "ip"):
myIF.ip = self.getTextPart(para)
if (para.tagName.lower() == "route"):
newRoute = self.getVMRoute(para)
myIF.addRoute(newRoute)
return myIF
def getVMRoute(self, elem):
"Extract VM route entries"
newRoute = VMRoute()
newRoute.type = elem.getAttribute("type")
newRoute.netmask = elem.getAttribute("netmask")
newRoute.gw = elem.getAttribute("gw")
newRoute.dest = self.getTextPart(elem)
return newRoute
def getVRIF(self, elem, index):
"get virtual router network interface"
ifName = "eth%d" % index
myIF = VRInterface(ifName)
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "target"):
myIF.target = self.getTextPart(para)
if (para.tagName.lower() == "nic"):
myIF.nic = self.getTextPart(para)
if (para.tagName.lower() == "ip"):
myIF.ip = self.getTextPart(para)
if (para.tagName.lower() == "network"):
myIF.network = self.getTextPart(para)
if (para.tagName.lower() == "gw"):
myIF.gw = self.getTextPart(para)
if (para.tagName.lower() == "mtu"):
myIF.mtu = self.getTextPart(para)
if (para.tagName.lower() == "rtentry"):
newRoute = self.getVRRoute(para)
myIF.addRoute(newRoute)
return myIF
def getVWRIF(self, elem, index):
"get virtual wireless router network interface"
ifName = "eth%d" % index
myWIF = VWRInterface(ifName)
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "nic"):
myWIF.nic = self.getTextPart(para)
if (para.tagName.lower() == "ip"):
myWIF.ip = self.getTextPart(para)
if (para.tagName.lower() == "network"):
myWIF.network = self.getTextPart(para)
if (para.tagName.lower() == "rtentry"):
newRoute = self.getVRRoute(para)
myWIF.addRoute(newRoute)
if (para.tagName.lower() == "wireless_card"):
newWcard = self.getWcard(para)
myWIF.wireless_card = newWcard
if (para.tagName.lower() == "energy"):
newEnergy = self.getEnergy(para)
myWIF.energy = newEnergy
if (para.tagName.lower() == "mac_layer"):
newMlayer = self.getMlayer(para)
myWIF.mac_layer = newMlayer
if (para.tagName.lower() == "antenna"):
newAntenna = self.getAntenna(para)
myWIF.antenna = newAntenna
if (para.tagName.lower() == "mobility"):
newMobility = self.getMobility(para)
myWIF.mobility = newMobility
return myWIF
def getWcard(self, elem):
newWcard = WirelessCard()
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "w_type"):
newWcard.wType = self.getTextPart(para)
if (para.tagName.lower() == "freq"):
newWcard.freq = self.getTextPart(para)
if (para.tagName.lower() == "bandwidth"):
newWcard.bandwidth = self.getTextPart(para)
if (para.tagName.lower() == "pt"):
newWcard.pt = self.getTextPart(para)
if (para.tagName.lower() == "pt_c"):
newWcard.ptC = self.getTextPart(para)
if (para.tagName.lower() == "pr_c"):
newWcard.prC = self.getTextPart(para)
if (para.tagName.lower() == "p_idle"):
newWcard.pIdle = self.getTextPart(para)
if (para.tagName.lower() == "p_sleep"):
newWcard.pSleep = self.getTextPart(para)
if (para.tagName.lower() == "p_off"):
newWcard.pOff = self.getTextPart(para)
if (para.tagName.lower() == "rx"):
newWcard.rx = self.getTextPart(para)
if (para.tagName.lower() == "cs"):
newWcard.cs = self.getTextPart(para)
if (para.tagName.lower() == "cp"):
newWcard.cp = self.getTextPart(para)
if (para.tagName.lower() == "module"):
newWcard.module = self.getTextPart(para)
return newWcard
def getEnergy(self, elem):
newEnergy = Energy()
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "power"):
newEnergy.power = self.getTextPart(para)
if (para.tagName.lower() == "psm"):
newEnergy.psm = self.getTextPart(para)
if (para.tagName.lower() == "energy_amount"):
newEnergy.energyAmount = self.getTextPart(para)
return newEnergy
def getMlayer(self, elem):
newMlayer = MacLayer()
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "mac_type"):
newMlayer.macType = self.getTextPart(para)
if (para.tagName.lower() == "trans"):
newMlayer.trans = self.getTextPart(para)
return newMlayer
def getAntenna(self, elem):
newAntenna = Antenna()
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "a_type"):
newAntenna.aType = self.getTextPart(para)
if (para.tagName.lower() == "ant_h"):
newAntenna.ant_h = self.getTextPart(para)
if (para.tagName.lower() == "ant_g"):
newAntenna.ant_g = self.getTextPart(para)
if (para.tagName.lower() == "ant_l"):
newAntenna.ant_l = self.getTextPart(para)
if (para.tagName.lower() == "jam"):
newAntenna.jam = self.getTextPart(para)
return newAntenna
def getMobility(self, elem):
newMobility = Mobility()
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "m_type"):
newMobility.mType = self.getTextPart(para)
if (para.tagName.lower() == "ran_max"):
newMobility.ranMax = self.getTextPart(para)
if (para.tagName.lower() == "ran_min"):
newMobility.ranMin = self.getTextPart(para)
return newMobility
def getVRRoute(self, elem):
"Extract VR route entries"
newRoute = VRRoute()
newRoute.netmask = elem.getAttribute("netmask")
newRoute.nexthop = elem.getAttribute("nexthop")
newRoute.dest = self.getTextPart(elem)
return newRoute
| []
| []
| [
"GINI_HOME"
]
| [] | ["GINI_HOME"] | python | 1 | 0 | |
mark-RRD/edit.py | # -*- coding: utf-8 -*-
import argparse
import hashlib
import json
import os
import re
import time
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
from pywikibot.data.api import Request
from config import config_page_name # pylint: disable=E0611,W0614
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.set_defaults(debug=False)
args = parser.parse_args()
os.environ['TZ'] = 'UTC'
site = pywikibot.Site()
site.login()
config_page = pywikibot.Page(site, config_page_name)
cfg = config_page.text
cfg = json.loads(cfg)
if not cfg['enable']:
print('disabled\n')
exit()
rrdpage = pywikibot.Page(site, cfg['rrd_page'])
text = rrdpage.text
rndstr = hashlib.md5(str(time.time()).encode()).hexdigest()
text = re.sub(r'({{Revdel)', rndstr + r'\1', text)
text = text.split(rndstr)
newtext = text[0]
remaincnt = 0
for secid in range(1, len(text)):
sectext = text[secid].strip()
m = re.search(r'\|\s*article\s*=\s*(.+?)\s*\|', sectext)
if m:
title = m.group(1)
if args.debug:
print(title)
if re.search(r'\|\s*status\s*=\s*((新申請)?<!--(不要修改本参数|不要修改本參數)-->)?\s*\|', sectext):
flag = 0
if re.search(r'\|\s*set\s*=.*([編编][輯辑]|[刪删]除)?[內内]容', sectext):
flag |= 1
if args.debug:
print('\tcontent')
if re.search(r'\|\s*set\s*=.*([編编][輯辑])?摘要', sectext):
flag |= 2
if args.debug:
print('\tsummary')
if flag != 0:
ids = re.findall(r'\|id\d+\s*=\s*(\d+)', sectext)
if ids:
data = Request(site=site, parameters={
'action': 'query',
'list': 'logevents',
'leaction': 'delete/revision',
'lelimit': '10',
'letitle': title
}).submit()
deleted = 0
admins = {}
for logevent in data['query']['logevents']:
logid = str(logevent['logid'])
admin = logevent['user']
if args.debug:
print('\t', logevent)
if (logevent['params']['type'] == 'revision'
and logevent['params']['new']['bitmask'] & flag == flag):
for rvid in logevent['params']['ids']:
rvid = str(rvid)
if rvid in ids:
deleted += 1
if admin not in admins:
admins[admin] = {}
if logid not in admins[admin]:
admins[admin][logid] = 0
admins[admin][logid] += 1
if deleted == len(ids):
break
for admin in admins:
logids = []
delcnt = 0
for logid in admins[admin]:
if logid not in sectext:
logids.append(logid)
delcnt += admins[admin][logid]
if logids:
if deleted == len(ids) and len(admins) == 1:
sectext += '\n' + cfg['comment_delete_all'].format(
admin, '<!-- ' + ','.join(logids) + ' -->')
else:
sectext += '\n' + cfg['comment_delete_partial'].format(
admin, delcnt, '<!-- ' + ','.join(logids) + ' -->')
if deleted == len(ids):
sectext = re.sub(
r'(\|\s*status\s*=).*', r'\1 +', sectext)
else:
remaincnt += 1
if args.debug:
print('\tdeleted {}/{} in {}'.format(deleted, len(ids), admins))
else:
if args.debug:
print('\tcannot get ids')
remaincnt += 1
else:
if args.debug:
print('\tcannot detect type')
remaincnt += 1
else:
if args.debug:
print('\tdone')
else:
if args.debug:
print('cannot get article')
newtext += sectext + '\n\n'
if re.sub(r'\s', '', rrdpage.text) == re.sub(r'\s', '', newtext):
if args.debug:
print('nothing changed')
exit()
pywikibot.showDiff(rrdpage.text, newtext)
rrdpage.text = newtext
summary = cfg['summary'].format(remaincnt)
rrdpage.save(summary=summary, minor=True)
| []
| []
| [
"TZ",
"PYWIKIBOT_DIR"
]
| [] | ["TZ", "PYWIKIBOT_DIR"] | python | 2 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tbc_site.settings.dev")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
utils/docker.go | package utils
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/term"
"github.com/insanebrain/dbp/model"
"github.com/sirupsen/logrus"
"io/ioutil"
"os"
"path"
"strings"
)
const defaultDockerAPIVersion = "v1.39"
const DockerAuthUrl = "https://index.docker.io/v1/"
type AuthConfig struct {
AuthConfigs map[string]types.AuthConfig `json:"auths,omitempty"`
HttpHeaders struct {
UserAgent string `json:"User-Agent,omitempty"`
}
}
func (authConfig *AuthConfig) GetAuthConfigs() map[string]types.AuthConfig {
authConfigs := map[string]types.AuthConfig{}
for hostname, config := range authConfig.AuthConfigs {
data, err := base64.StdEncoding.DecodeString(config.Auth)
if err != nil {
logrus.Debug("cannot decode base64 string from .docker/config.json")
}
usernamePassword := strings.SplitN(string(data), ":", 2)
if len(usernamePassword) != 2 {
logrus.Debug("base64 string length is more than 2")
}
authConfigs[hostname] = types.AuthConfig{
Username: usernamePassword[0],
Password: usernamePassword[1],
Auth: config.Auth,
ServerAddress: hostname,
}
}
return authConfigs
}
func getDockerClient() *client.Client {
cli, _ := client.NewClientWithOpts(client.FromEnv, client.WithVersion(defaultDockerAPIVersion))
return cli
}
func GetAuthConfig() (AuthConfig, error) {
authConfig := AuthConfig{}
configFile, err := ioutil.ReadFile(path.Join(os.Getenv("HOME"), ".docker", "config.json"))
if err != nil {
return authConfig, err
}
err = json.Unmarshal(configFile, &authConfig)
if err != nil {
return authConfig, err
}
return authConfig, nil
}
// Build the container using the native docker api
func Build(imageData *model.ImageData) error {
imageDir := imageData.Dir
tags := imageData.GetTags()
dockerBuildContext, err := archive.TarWithOptions(imageDir, &archive.TarOptions{})
defer dockerBuildContext.Close()
if err != nil {
return err
}
authConfig, _ := GetAuthConfig()
cli := getDockerClient()
args := map[string]*string{
}
options := types.ImageBuildOptions{
SuppressOutput: false,
Remove: true,
ForceRemove: true,
PullParent: false,
Tags: tags,
BuildArgs: args,
AuthConfigs: authConfig.GetAuthConfigs(),
}
buildResponse, err := cli.ImageBuild(context.Background(), dockerBuildContext, options)
if err != nil {
return err
}
defer buildResponse.Body.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
return jsonmessage.DisplayJSONMessagesStream(buildResponse.Body, os.Stderr, termFd, isTerm, nil)
}
func Push(tag string) error {
cli := getDockerClient()
authConfig, _ := GetAuthConfig()
authConfigs := authConfig.GetAuthConfigs()
ref, err := reference.ParseNormalizedNamed(tag)
authKey := reference.Domain(ref)
if reference.Domain(ref) == DockerDomain {
authKey = DockerAuthUrl
}
if _, ok := authConfigs[authKey]; !ok {
return errors.New(fmt.Sprintf("unable to find docker credential of %s.\n did you forget to docker login ?", reference.Domain(ref)))
}
buf, err := json.Marshal(authConfigs[authKey])
if err != nil {
return err
}
options := types.ImagePushOptions{
RegistryAuth: base64.URLEncoding.EncodeToString(buf),
All: false,
}
pushResponse, err := cli.ImagePush(context.Background(), tag, options)
if err != nil {
return err
}
defer pushResponse.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
return jsonmessage.DisplayJSONMessagesStream(pushResponse, os.Stderr, termFd, isTerm, nil)
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
src/common/dao/pgsql.go | // Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"fmt"
"net/url"
"os"
"strconv"
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/lib/log"
migrate "github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres" // import pgsql driver for migrator
_ "github.com/golang-migrate/migrate/v4/source/file" // import local file driver for migrator
_ "github.com/lib/pq" // register pgsql driver
)
const defaultMigrationPath = "migrations/postgresql/"
type pgsql struct {
host string
port string
usr string
pwd string
database string
sslmode string
maxIdleConns int
maxOpenConns int
}
// Name returns the name of PostgreSQL
func (p *pgsql) Name() string {
return "PostgreSQL"
}
// String ...
func (p *pgsql) String() string {
return fmt.Sprintf("type-%s host-%s port-%s database-%s sslmode-%q",
p.Name(), p.host, p.port, p.database, p.sslmode)
}
// NewPGSQL returns an instance of postgres
func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string, maxIdleConns int, maxOpenConns int) Database {
if len(sslmode) == 0 {
sslmode = "disable"
}
return &pgsql{
host: host,
port: port,
usr: usr,
pwd: pwd,
database: database,
sslmode: sslmode,
maxIdleConns: maxIdleConns,
maxOpenConns: maxOpenConns,
}
}
// Register registers pgSQL to orm with the info wrapped by the instance.
func (p *pgsql) Register(alias ...string) error {
if err := utils.TestTCPConn(fmt.Sprintf("%s:%s", p.host, p.port), 60, 2); err != nil {
return err
}
if err := orm.RegisterDriver("postgres", orm.DRPostgres); err != nil {
return err
}
an := "default"
if len(alias) != 0 {
an = alias[0]
}
info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
p.host, p.port, p.usr, p.pwd, p.database, p.sslmode)
if err := orm.RegisterDataBase(an, "postgres", info, p.maxIdleConns, p.maxOpenConns); err != nil {
return err
}
// Due to the issues of beego v1.12.1 and v1.12.2, we set the max open conns ourselves.
// See https://github.com/goharbor/harbor/issues/12403
// and https://github.com/astaxie/beego/issues/4059 for more info.
db, _ := orm.GetDB(an)
db.SetMaxOpenConns(p.maxOpenConns)
return nil
}
// UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.
func (p *pgsql) UpgradeSchema() error {
port, err := strconv.ParseInt(p.port, 10, 64)
if err != nil {
return err
}
m, err := NewMigrator(&models.PostGreSQL{
Host: p.host,
Port: int(port),
Username: p.usr,
Password: p.pwd,
Database: p.database,
SSLMode: p.sslmode,
})
if err != nil {
return err
}
defer func() {
srcErr, dbErr := m.Close()
if srcErr != nil || dbErr != nil {
log.Warningf("Failed to close migrator, source error: %v, db error: %v", srcErr, dbErr)
}
}()
log.Infof("Upgrading schema for pgsql ...")
err = m.Up()
if err == migrate.ErrNoChange {
log.Infof("No change in schema, skip.")
} else if err != nil { // migrate.ErrLockTimeout will be thrown when another process is doing migration and timeout.
log.Errorf("Failed to upgrade schema, error: %q", err)
return err
}
return nil
}
// NewMigrator creates a migrator base on the information
func NewMigrator(database *models.PostGreSQL) (*migrate.Migrate, error) {
dbURL := url.URL{
Scheme: "postgres",
User: url.UserPassword(database.Username, database.Password),
Host: fmt.Sprintf("%s:%d", database.Host, database.Port),
Path: database.Database,
RawQuery: fmt.Sprintf("sslmode=%s", database.SSLMode),
}
// For UT
path := os.Getenv("POSTGRES_MIGRATION_SCRIPTS_PATH")
if len(path) == 0 {
path = defaultMigrationPath
}
srcURL := fmt.Sprintf("file://%s", path)
m, err := migrate.New(srcURL, dbURL.String())
if err != nil {
return nil, err
}
m.Log = newMigrateLogger()
return m, nil
}
| [
"\"POSTGRES_MIGRATION_SCRIPTS_PATH\""
]
| []
| [
"POSTGRES_MIGRATION_SCRIPTS_PATH"
]
| [] | ["POSTGRES_MIGRATION_SCRIPTS_PATH"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.